| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 20.0, | |
| "eval_steps": 500, | |
| "global_step": 1420, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07042253521126761, | |
| "grad_norm": 298.37652587890625, | |
| "learning_rate": 0.00019943661971830986, | |
| "loss": 32.3915, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.14084507042253522, | |
| "grad_norm": 93.51129150390625, | |
| "learning_rate": 0.0001987323943661972, | |
| "loss": 28.743, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2112676056338028, | |
| "grad_norm": 47.780601501464844, | |
| "learning_rate": 0.00019802816901408452, | |
| "loss": 25.2619, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.28169014084507044, | |
| "grad_norm": 50.333736419677734, | |
| "learning_rate": 0.00019732394366197184, | |
| "loss": 22.2152, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.352112676056338, | |
| "grad_norm": 16.70964241027832, | |
| "learning_rate": 0.00019661971830985917, | |
| "loss": 18.6695, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.4225352112676056, | |
| "grad_norm": 15.14587688446045, | |
| "learning_rate": 0.0001959154929577465, | |
| "loss": 14.7068, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.49295774647887325, | |
| "grad_norm": 9.351914405822754, | |
| "learning_rate": 0.00019521126760563382, | |
| "loss": 8.1565, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5633802816901409, | |
| "grad_norm": 8.563234329223633, | |
| "learning_rate": 0.00019450704225352114, | |
| "loss": 5.4416, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6338028169014085, | |
| "grad_norm": 6.7714691162109375, | |
| "learning_rate": 0.00019380281690140847, | |
| "loss": 5.0701, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.704225352112676, | |
| "grad_norm": 4.738491535186768, | |
| "learning_rate": 0.0001930985915492958, | |
| "loss": 4.6657, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7746478873239436, | |
| "grad_norm": 5.511097431182861, | |
| "learning_rate": 0.00019239436619718312, | |
| "loss": 4.2706, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.8450704225352113, | |
| "grad_norm": 5.683650970458984, | |
| "learning_rate": 0.00019169014084507045, | |
| "loss": 3.8374, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9154929577464789, | |
| "grad_norm": 4.763166427612305, | |
| "learning_rate": 0.00019098591549295774, | |
| "loss": 3.3471, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9859154929577465, | |
| "grad_norm": 2.353513240814209, | |
| "learning_rate": 0.00019028169014084507, | |
| "loss": 2.9019, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 2.327265977859497, | |
| "eval_runtime": 1.3559, | |
| "eval_samples_per_second": 279.528, | |
| "eval_steps_per_second": 2.213, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 1.056338028169014, | |
| "grad_norm": 2.5209953784942627, | |
| "learning_rate": 0.0001895774647887324, | |
| "loss": 2.5521, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.1267605633802817, | |
| "grad_norm": 0.8227373957633972, | |
| "learning_rate": 0.00018887323943661972, | |
| "loss": 2.3236, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.1971830985915493, | |
| "grad_norm": 0.8206098675727844, | |
| "learning_rate": 0.00018816901408450705, | |
| "loss": 2.1289, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.267605633802817, | |
| "grad_norm": 0.748960018157959, | |
| "learning_rate": 0.00018746478873239437, | |
| "loss": 1.6767, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3380281690140845, | |
| "grad_norm": 0.4808853566646576, | |
| "learning_rate": 0.0001867605633802817, | |
| "loss": 1.419, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.408450704225352, | |
| "grad_norm": 1.964240312576294, | |
| "learning_rate": 0.00018605633802816902, | |
| "loss": 1.3036, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4788732394366197, | |
| "grad_norm": 0.2643977403640747, | |
| "learning_rate": 0.00018535211267605635, | |
| "loss": 1.2568, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.5492957746478875, | |
| "grad_norm": 0.38880959153175354, | |
| "learning_rate": 0.00018464788732394367, | |
| "loss": 1.2002, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.619718309859155, | |
| "grad_norm": 0.22605347633361816, | |
| "learning_rate": 0.000183943661971831, | |
| "loss": 1.1694, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.6901408450704225, | |
| "grad_norm": 0.23560728132724762, | |
| "learning_rate": 0.00018323943661971832, | |
| "loss": 1.1492, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.76056338028169, | |
| "grad_norm": 0.15073446929454803, | |
| "learning_rate": 0.00018253521126760565, | |
| "loss": 1.1263, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.8309859154929577, | |
| "grad_norm": 0.12264095991849899, | |
| "learning_rate": 0.00018183098591549298, | |
| "loss": 1.1051, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.9014084507042255, | |
| "grad_norm": 0.1781918853521347, | |
| "learning_rate": 0.0001811267605633803, | |
| "loss": 1.0964, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.971830985915493, | |
| "grad_norm": 0.15171071887016296, | |
| "learning_rate": 0.00018042253521126763, | |
| "loss": 1.0779, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.0098963975906372, | |
| "eval_runtime": 1.3563, | |
| "eval_samples_per_second": 279.438, | |
| "eval_steps_per_second": 2.212, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 2.0422535211267605, | |
| "grad_norm": 0.11982790380716324, | |
| "learning_rate": 0.00017971830985915495, | |
| "loss": 1.0746, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.112676056338028, | |
| "grad_norm": 0.11876315623521805, | |
| "learning_rate": 0.00017901408450704228, | |
| "loss": 1.0658, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.183098591549296, | |
| "grad_norm": 0.09626977145671844, | |
| "learning_rate": 0.0001783098591549296, | |
| "loss": 1.0535, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.2535211267605635, | |
| "grad_norm": 0.11824846267700195, | |
| "learning_rate": 0.00017760563380281693, | |
| "loss": 1.0492, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.323943661971831, | |
| "grad_norm": 0.1524844616651535, | |
| "learning_rate": 0.00017690140845070425, | |
| "loss": 1.04, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.3943661971830985, | |
| "grad_norm": 0.10610729455947876, | |
| "learning_rate": 0.00017619718309859158, | |
| "loss": 1.0355, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.464788732394366, | |
| "grad_norm": 0.09342360496520996, | |
| "learning_rate": 0.0001754929577464789, | |
| "loss": 1.0333, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.535211267605634, | |
| "grad_norm": 0.10479816794395447, | |
| "learning_rate": 0.0001747887323943662, | |
| "loss": 1.0318, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.6056338028169015, | |
| "grad_norm": 0.0711260586977005, | |
| "learning_rate": 0.00017408450704225353, | |
| "loss": 1.0241, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.676056338028169, | |
| "grad_norm": 0.11890017986297607, | |
| "learning_rate": 0.00017338028169014086, | |
| "loss": 1.0241, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.7464788732394365, | |
| "grad_norm": 0.10851440578699112, | |
| "learning_rate": 0.00017267605633802818, | |
| "loss": 1.018, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.816901408450704, | |
| "grad_norm": 0.1457432061433792, | |
| "learning_rate": 0.0001719718309859155, | |
| "loss": 1.0149, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.887323943661972, | |
| "grad_norm": 0.0658695176243782, | |
| "learning_rate": 0.00017126760563380283, | |
| "loss": 1.0093, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 2.9577464788732395, | |
| "grad_norm": 0.09453574568033218, | |
| "learning_rate": 0.00017056338028169016, | |
| "loss": 1.0081, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.9575695395469666, | |
| "eval_runtime": 1.3565, | |
| "eval_samples_per_second": 279.401, | |
| "eval_steps_per_second": 2.212, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 3.028169014084507, | |
| "grad_norm": 0.13185155391693115, | |
| "learning_rate": 0.00016985915492957746, | |
| "loss": 1.0059, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.0985915492957745, | |
| "grad_norm": 0.10181483626365662, | |
| "learning_rate": 0.00016915492957746478, | |
| "loss": 0.9993, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.169014084507042, | |
| "grad_norm": 0.07031014561653137, | |
| "learning_rate": 0.0001684507042253521, | |
| "loss": 1.0002, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 3.23943661971831, | |
| "grad_norm": 0.08129975944757462, | |
| "learning_rate": 0.00016774647887323943, | |
| "loss": 0.9966, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 3.3098591549295775, | |
| "grad_norm": 0.061685919761657715, | |
| "learning_rate": 0.00016704225352112676, | |
| "loss": 0.9986, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 3.380281690140845, | |
| "grad_norm": 0.10376816242933273, | |
| "learning_rate": 0.00016633802816901408, | |
| "loss": 0.9914, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.4507042253521125, | |
| "grad_norm": 0.16595374047756195, | |
| "learning_rate": 0.0001656338028169014, | |
| "loss": 0.9944, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 3.52112676056338, | |
| "grad_norm": 0.0959262102842331, | |
| "learning_rate": 0.00016492957746478873, | |
| "loss": 0.99, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 3.591549295774648, | |
| "grad_norm": 0.04954326152801514, | |
| "learning_rate": 0.00016422535211267606, | |
| "loss": 0.9887, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 3.6619718309859155, | |
| "grad_norm": 0.09042917937040329, | |
| "learning_rate": 0.00016352112676056339, | |
| "loss": 0.989, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 3.732394366197183, | |
| "grad_norm": 0.1556464284658432, | |
| "learning_rate": 0.0001628169014084507, | |
| "loss": 0.9914, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 3.802816901408451, | |
| "grad_norm": 0.0660383403301239, | |
| "learning_rate": 0.00016211267605633804, | |
| "loss": 0.9874, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 3.873239436619718, | |
| "grad_norm": 0.08755503594875336, | |
| "learning_rate": 0.00016140845070422536, | |
| "loss": 0.9823, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 3.943661971830986, | |
| "grad_norm": 0.07488470524549484, | |
| "learning_rate": 0.0001607042253521127, | |
| "loss": 0.9846, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.9477686285972595, | |
| "eval_runtime": 1.356, | |
| "eval_samples_per_second": 279.49, | |
| "eval_steps_per_second": 2.212, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 4.014084507042254, | |
| "grad_norm": 0.0663798451423645, | |
| "learning_rate": 0.00016, | |
| "loss": 0.9813, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 4.084507042253521, | |
| "grad_norm": 0.07960853725671768, | |
| "learning_rate": 0.00015929577464788734, | |
| "loss": 0.9834, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 4.154929577464789, | |
| "grad_norm": 0.0459803082048893, | |
| "learning_rate": 0.00015859154929577466, | |
| "loss": 0.9813, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 4.225352112676056, | |
| "grad_norm": 1.369140386581421, | |
| "learning_rate": 0.00015788732394366196, | |
| "loss": 0.9847, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 4.295774647887324, | |
| "grad_norm": 5.1048102378845215, | |
| "learning_rate": 0.0001571830985915493, | |
| "loss": 0.9825, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 4.366197183098592, | |
| "grad_norm": 0.722586452960968, | |
| "learning_rate": 0.00015647887323943661, | |
| "loss": 0.9782, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 4.436619718309859, | |
| "grad_norm": 0.11551281064748764, | |
| "learning_rate": 0.00015577464788732394, | |
| "loss": 0.9775, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 4.507042253521127, | |
| "grad_norm": 0.07706153392791748, | |
| "learning_rate": 0.00015507042253521126, | |
| "loss": 0.9765, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 4.577464788732394, | |
| "grad_norm": 0.13360825181007385, | |
| "learning_rate": 0.0001543661971830986, | |
| "loss": 0.9825, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 4.647887323943662, | |
| "grad_norm": 0.0996808111667633, | |
| "learning_rate": 0.00015366197183098592, | |
| "loss": 0.9755, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 4.71830985915493, | |
| "grad_norm": 0.064101941883564, | |
| "learning_rate": 0.00015295774647887324, | |
| "loss": 0.9747, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 4.788732394366197, | |
| "grad_norm": 0.0587599016726017, | |
| "learning_rate": 0.00015225352112676057, | |
| "loss": 0.9762, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 4.859154929577465, | |
| "grad_norm": 0.08781857788562775, | |
| "learning_rate": 0.0001515492957746479, | |
| "loss": 0.9725, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 4.929577464788732, | |
| "grad_norm": 0.09201648831367493, | |
| "learning_rate": 0.00015084507042253522, | |
| "loss": 0.9751, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.07896555215120316, | |
| "learning_rate": 0.00015014084507042254, | |
| "loss": 0.974, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.9434144496917725, | |
| "eval_runtime": 1.3282, | |
| "eval_samples_per_second": 285.356, | |
| "eval_steps_per_second": 2.259, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 5.070422535211268, | |
| "grad_norm": 0.06868085265159607, | |
| "learning_rate": 0.00014943661971830987, | |
| "loss": 0.9773, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 5.140845070422535, | |
| "grad_norm": 0.06904036551713943, | |
| "learning_rate": 0.0001487323943661972, | |
| "loss": 0.9724, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 5.211267605633803, | |
| "grad_norm": 0.05451333895325661, | |
| "learning_rate": 0.00014802816901408452, | |
| "loss": 0.9716, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 5.28169014084507, | |
| "grad_norm": 0.08863852173089981, | |
| "learning_rate": 0.00014732394366197185, | |
| "loss": 0.9692, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 5.352112676056338, | |
| "grad_norm": 0.06055117025971413, | |
| "learning_rate": 0.00014661971830985917, | |
| "loss": 0.9713, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 5.422535211267606, | |
| "grad_norm": 0.10994315892457962, | |
| "learning_rate": 0.0001459154929577465, | |
| "loss": 0.9707, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 5.492957746478873, | |
| "grad_norm": 0.09731490910053253, | |
| "learning_rate": 0.00014521126760563382, | |
| "loss": 0.9694, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 5.563380281690141, | |
| "grad_norm": 0.05324326828122139, | |
| "learning_rate": 0.00014450704225352115, | |
| "loss": 0.9666, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 5.633802816901408, | |
| "grad_norm": 0.05008189380168915, | |
| "learning_rate": 0.00014380281690140847, | |
| "loss": 0.9672, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 5.704225352112676, | |
| "grad_norm": 0.0279605221003294, | |
| "learning_rate": 0.0001430985915492958, | |
| "loss": 0.9676, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 5.774647887323944, | |
| "grad_norm": 0.0670681744813919, | |
| "learning_rate": 0.00014239436619718312, | |
| "loss": 0.9654, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 5.845070422535211, | |
| "grad_norm": 0.04129045829176903, | |
| "learning_rate": 0.00014169014084507045, | |
| "loss": 0.9659, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 5.915492957746479, | |
| "grad_norm": 0.04552697390317917, | |
| "learning_rate": 0.00014098591549295775, | |
| "loss": 0.9675, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 5.985915492957746, | |
| "grad_norm": 0.05515826866030693, | |
| "learning_rate": 0.00014028169014084507, | |
| "loss": 0.967, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.9389934539794922, | |
| "eval_runtime": 1.3536, | |
| "eval_samples_per_second": 279.994, | |
| "eval_steps_per_second": 2.216, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 6.056338028169014, | |
| "grad_norm": 0.060634512454271317, | |
| "learning_rate": 0.0001395774647887324, | |
| "loss": 0.9661, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 6.126760563380282, | |
| "grad_norm": 0.035951510071754456, | |
| "learning_rate": 0.00013887323943661972, | |
| "loss": 0.9673, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 6.197183098591549, | |
| "grad_norm": 0.07630197703838348, | |
| "learning_rate": 0.00013816901408450705, | |
| "loss": 0.969, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 6.267605633802817, | |
| "grad_norm": 0.048709649592638016, | |
| "learning_rate": 0.00013746478873239438, | |
| "loss": 0.9605, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 6.338028169014084, | |
| "grad_norm": 0.08284774422645569, | |
| "learning_rate": 0.0001367605633802817, | |
| "loss": 0.9647, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 6.408450704225352, | |
| "grad_norm": 0.10234501212835312, | |
| "learning_rate": 0.00013605633802816903, | |
| "loss": 0.9649, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 6.47887323943662, | |
| "grad_norm": 0.07582994550466537, | |
| "learning_rate": 0.00013535211267605635, | |
| "loss": 0.9637, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 6.549295774647887, | |
| "grad_norm": 0.032033782452344894, | |
| "learning_rate": 0.00013464788732394368, | |
| "loss": 0.9616, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 6.619718309859155, | |
| "grad_norm": 0.029730021953582764, | |
| "learning_rate": 0.000133943661971831, | |
| "loss": 0.961, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 6.690140845070422, | |
| "grad_norm": 0.036565788090229034, | |
| "learning_rate": 0.00013323943661971833, | |
| "loss": 0.9613, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 6.76056338028169, | |
| "grad_norm": 0.046678755432367325, | |
| "learning_rate": 0.00013253521126760565, | |
| "loss": 0.9631, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 6.830985915492958, | |
| "grad_norm": 0.11235543340444565, | |
| "learning_rate": 0.00013183098591549295, | |
| "loss": 0.9617, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 6.901408450704225, | |
| "grad_norm": 0.04812972992658615, | |
| "learning_rate": 0.00013112676056338028, | |
| "loss": 0.9599, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 6.971830985915493, | |
| "grad_norm": 0.05159539356827736, | |
| "learning_rate": 0.0001304225352112676, | |
| "loss": 0.9622, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.938995361328125, | |
| "eval_runtime": 1.3575, | |
| "eval_samples_per_second": 279.181, | |
| "eval_steps_per_second": 2.21, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 7.042253521126761, | |
| "grad_norm": 0.046111464500427246, | |
| "learning_rate": 0.00012971830985915493, | |
| "loss": 0.9592, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 7.112676056338028, | |
| "grad_norm": 0.09057960659265518, | |
| "learning_rate": 0.00012901408450704226, | |
| "loss": 0.9605, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 7.183098591549296, | |
| "grad_norm": 0.06652987748384476, | |
| "learning_rate": 0.00012830985915492958, | |
| "loss": 0.9595, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 7.253521126760563, | |
| "grad_norm": 0.05949646607041359, | |
| "learning_rate": 0.0001276056338028169, | |
| "loss": 0.9586, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 7.323943661971831, | |
| "grad_norm": 0.04339510202407837, | |
| "learning_rate": 0.00012690140845070423, | |
| "loss": 0.9594, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 7.394366197183099, | |
| "grad_norm": 0.06728670001029968, | |
| "learning_rate": 0.00012619718309859156, | |
| "loss": 0.9613, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 7.464788732394366, | |
| "grad_norm": 0.03795556724071503, | |
| "learning_rate": 0.00012549295774647888, | |
| "loss": 0.9563, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 7.535211267605634, | |
| "grad_norm": 0.0375334694981575, | |
| "learning_rate": 0.00012478873239436618, | |
| "loss": 0.9601, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 7.605633802816901, | |
| "grad_norm": 0.057856108993291855, | |
| "learning_rate": 0.0001240845070422535, | |
| "loss": 0.9616, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 7.676056338028169, | |
| "grad_norm": 0.03536657989025116, | |
| "learning_rate": 0.00012338028169014083, | |
| "loss": 0.9557, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 7.746478873239437, | |
| "grad_norm": 0.05437963828444481, | |
| "learning_rate": 0.00012267605633802816, | |
| "loss": 0.9596, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 7.816901408450704, | |
| "grad_norm": 0.0574585422873497, | |
| "learning_rate": 0.0001219718309859155, | |
| "loss": 0.9583, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 7.887323943661972, | |
| "grad_norm": 0.0447322279214859, | |
| "learning_rate": 0.00012126760563380282, | |
| "loss": 0.9599, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 7.957746478873239, | |
| "grad_norm": 0.06250979751348495, | |
| "learning_rate": 0.00012056338028169015, | |
| "loss": 0.9581, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.9374349117279053, | |
| "eval_runtime": 1.356, | |
| "eval_samples_per_second": 279.498, | |
| "eval_steps_per_second": 2.212, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 8.028169014084508, | |
| "grad_norm": 0.02564265951514244, | |
| "learning_rate": 0.00011985915492957746, | |
| "loss": 0.9574, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 8.098591549295774, | |
| "grad_norm": 0.047478966414928436, | |
| "learning_rate": 0.00011915492957746479, | |
| "loss": 0.9538, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 8.169014084507042, | |
| "grad_norm": 0.04486701637506485, | |
| "learning_rate": 0.00011845070422535211, | |
| "loss": 0.957, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 8.23943661971831, | |
| "grad_norm": 0.03243768587708473, | |
| "learning_rate": 0.00011774647887323944, | |
| "loss": 0.9549, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 8.309859154929578, | |
| "grad_norm": 0.023309363052248955, | |
| "learning_rate": 0.00011704225352112676, | |
| "loss": 0.9552, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 8.380281690140846, | |
| "grad_norm": 0.06186755374073982, | |
| "learning_rate": 0.00011633802816901409, | |
| "loss": 0.956, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 8.450704225352112, | |
| "grad_norm": 0.033569905906915665, | |
| "learning_rate": 0.00011563380281690141, | |
| "loss": 0.9558, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 8.52112676056338, | |
| "grad_norm": 0.02437894605100155, | |
| "learning_rate": 0.00011492957746478874, | |
| "loss": 0.9539, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 8.591549295774648, | |
| "grad_norm": 0.0500415675342083, | |
| "learning_rate": 0.00011422535211267606, | |
| "loss": 0.9567, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 8.661971830985916, | |
| "grad_norm": 0.03755724057555199, | |
| "learning_rate": 0.00011352112676056339, | |
| "loss": 0.9561, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 8.732394366197184, | |
| "grad_norm": 0.06001519784331322, | |
| "learning_rate": 0.00011281690140845072, | |
| "loss": 0.9587, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 8.80281690140845, | |
| "grad_norm": 0.03620074316859245, | |
| "learning_rate": 0.00011211267605633804, | |
| "loss": 0.9541, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 8.873239436619718, | |
| "grad_norm": 0.052633192390203476, | |
| "learning_rate": 0.00011140845070422537, | |
| "loss": 0.9552, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 8.943661971830986, | |
| "grad_norm": 0.06923436373472214, | |
| "learning_rate": 0.00011070422535211269, | |
| "loss": 0.9562, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.9356324672698975, | |
| "eval_runtime": 1.356, | |
| "eval_samples_per_second": 279.507, | |
| "eval_steps_per_second": 2.212, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 9.014084507042254, | |
| "grad_norm": 0.030308526009321213, | |
| "learning_rate": 0.00011000000000000002, | |
| "loss": 0.9567, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 9.084507042253522, | |
| "grad_norm": 0.04463675990700722, | |
| "learning_rate": 0.00010929577464788734, | |
| "loss": 0.9534, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 9.154929577464788, | |
| "grad_norm": 0.07801490277051926, | |
| "learning_rate": 0.00010859154929577467, | |
| "loss": 0.9532, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 9.225352112676056, | |
| "grad_norm": 0.07350599020719528, | |
| "learning_rate": 0.00010788732394366197, | |
| "loss": 0.9573, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 9.295774647887324, | |
| "grad_norm": 0.04489121586084366, | |
| "learning_rate": 0.00010718309859154929, | |
| "loss": 0.9529, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 9.366197183098592, | |
| "grad_norm": 0.03186199814081192, | |
| "learning_rate": 0.00010647887323943662, | |
| "loss": 0.9547, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 9.43661971830986, | |
| "grad_norm": 0.050086986273527145, | |
| "learning_rate": 0.00010577464788732394, | |
| "loss": 0.9537, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 9.507042253521126, | |
| "grad_norm": 0.037186432629823685, | |
| "learning_rate": 0.00010507042253521127, | |
| "loss": 0.9526, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 9.577464788732394, | |
| "grad_norm": 0.07604589313268661, | |
| "learning_rate": 0.0001043661971830986, | |
| "loss": 0.9548, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 9.647887323943662, | |
| "grad_norm": 0.0647517517209053, | |
| "learning_rate": 0.00010366197183098592, | |
| "loss": 0.9565, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 9.71830985915493, | |
| "grad_norm": 0.0681651309132576, | |
| "learning_rate": 0.00010295774647887325, | |
| "loss": 0.9533, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 9.788732394366198, | |
| "grad_norm": 0.04616454616189003, | |
| "learning_rate": 0.00010225352112676057, | |
| "loss": 0.9529, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 9.859154929577464, | |
| "grad_norm": 0.029675917699933052, | |
| "learning_rate": 0.0001015492957746479, | |
| "loss": 0.9521, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 9.929577464788732, | |
| "grad_norm": 0.025709936395287514, | |
| "learning_rate": 0.00010084507042253521, | |
| "loss": 0.9494, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.02850981615483761, | |
| "learning_rate": 0.00010014084507042253, | |
| "loss": 0.9522, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.9344412684440613, | |
| "eval_runtime": 1.3287, | |
| "eval_samples_per_second": 285.24, | |
| "eval_steps_per_second": 2.258, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 10.070422535211268, | |
| "grad_norm": 0.05556517466902733, | |
| "learning_rate": 9.943661971830986e-05, | |
| "loss": 0.9529, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 10.140845070422536, | |
| "grad_norm": 0.06735611706972122, | |
| "learning_rate": 9.873239436619719e-05, | |
| "loss": 0.9531, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 10.211267605633802, | |
| "grad_norm": 0.05679089203476906, | |
| "learning_rate": 9.802816901408451e-05, | |
| "loss": 0.9511, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 10.28169014084507, | |
| "grad_norm": 0.09331786632537842, | |
| "learning_rate": 9.732394366197184e-05, | |
| "loss": 0.9509, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 10.352112676056338, | |
| "grad_norm": 0.06282954663038254, | |
| "learning_rate": 9.661971830985916e-05, | |
| "loss": 0.952, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 10.422535211267606, | |
| "grad_norm": 0.06508725136518478, | |
| "learning_rate": 9.591549295774649e-05, | |
| "loss": 0.9534, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 10.492957746478874, | |
| "grad_norm": 0.041396528482437134, | |
| "learning_rate": 9.52112676056338e-05, | |
| "loss": 0.9537, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 10.56338028169014, | |
| "grad_norm": 0.04602223262190819, | |
| "learning_rate": 9.450704225352112e-05, | |
| "loss": 0.951, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 10.633802816901408, | |
| "grad_norm": 0.03372340276837349, | |
| "learning_rate": 9.380281690140845e-05, | |
| "loss": 0.9498, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 10.704225352112676, | |
| "grad_norm": 0.03808495029807091, | |
| "learning_rate": 9.309859154929578e-05, | |
| "loss": 0.9504, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 10.774647887323944, | |
| "grad_norm": 0.027203522622585297, | |
| "learning_rate": 9.23943661971831e-05, | |
| "loss": 0.9514, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 10.845070422535212, | |
| "grad_norm": 0.03273136168718338, | |
| "learning_rate": 9.169014084507043e-05, | |
| "loss": 0.9496, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 10.915492957746478, | |
| "grad_norm": 0.03632248193025589, | |
| "learning_rate": 9.098591549295775e-05, | |
| "loss": 0.9518, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 10.985915492957746, | |
| "grad_norm": 0.04196527600288391, | |
| "learning_rate": 9.028169014084508e-05, | |
| "loss": 0.9493, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 0.9340574145317078, | |
| "eval_runtime": 1.3554, | |
| "eval_samples_per_second": 279.624, | |
| "eval_steps_per_second": 2.213, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 11.056338028169014, | |
| "grad_norm": 0.02801569737493992, | |
| "learning_rate": 8.95774647887324e-05, | |
| "loss": 0.9506, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 11.126760563380282, | |
| "grad_norm": 0.07875282317399979, | |
| "learning_rate": 8.887323943661973e-05, | |
| "loss": 0.9527, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 11.19718309859155, | |
| "grad_norm": 0.06978793442249298, | |
| "learning_rate": 8.816901408450705e-05, | |
| "loss": 0.9513, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 11.267605633802816, | |
| "grad_norm": 0.05102715268731117, | |
| "learning_rate": 8.746478873239437e-05, | |
| "loss": 0.9493, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 11.338028169014084, | |
| "grad_norm": 0.037087179720401764, | |
| "learning_rate": 8.676056338028169e-05, | |
| "loss": 0.9482, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 11.408450704225352, | |
| "grad_norm": 0.026025516912341118, | |
| "learning_rate": 8.605633802816902e-05, | |
| "loss": 0.9499, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 11.47887323943662, | |
| "grad_norm": 0.046463027596473694, | |
| "learning_rate": 8.535211267605634e-05, | |
| "loss": 0.9526, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 11.549295774647888, | |
| "grad_norm": 0.03885842487215996, | |
| "learning_rate": 8.464788732394367e-05, | |
| "loss": 0.9508, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 11.619718309859154, | |
| "grad_norm": 0.042792994529008865, | |
| "learning_rate": 8.3943661971831e-05, | |
| "loss": 0.9491, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 11.690140845070422, | |
| "grad_norm": 0.040800243616104126, | |
| "learning_rate": 8.323943661971832e-05, | |
| "loss": 0.9502, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 11.76056338028169, | |
| "grad_norm": 0.04244118928909302, | |
| "learning_rate": 8.253521126760565e-05, | |
| "loss": 0.9463, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 11.830985915492958, | |
| "grad_norm": 0.027077561244368553, | |
| "learning_rate": 8.183098591549296e-05, | |
| "loss": 0.9486, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 11.901408450704226, | |
| "grad_norm": 0.0710897371172905, | |
| "learning_rate": 8.112676056338028e-05, | |
| "loss": 0.9498, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 11.971830985915492, | |
| "grad_norm": 0.027009285986423492, | |
| "learning_rate": 8.042253521126761e-05, | |
| "loss": 0.9492, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.934472382068634, | |
| "eval_runtime": 1.3559, | |
| "eval_samples_per_second": 279.512, | |
| "eval_steps_per_second": 2.212, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 12.04225352112676, | |
| "grad_norm": 0.027241826057434082, | |
| "learning_rate": 7.971830985915493e-05, | |
| "loss": 0.9485, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 12.112676056338028, | |
| "grad_norm": 17.60222816467285, | |
| "learning_rate": 7.901408450704225e-05, | |
| "loss": 0.9505, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 12.183098591549296, | |
| "grad_norm": 0.04522601515054703, | |
| "learning_rate": 7.830985915492957e-05, | |
| "loss": 0.9511, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 12.253521126760564, | |
| "grad_norm": 0.0453733466565609, | |
| "learning_rate": 7.76056338028169e-05, | |
| "loss": 0.9488, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 12.323943661971832, | |
| "grad_norm": 0.04800290986895561, | |
| "learning_rate": 7.690140845070422e-05, | |
| "loss": 0.9484, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 12.394366197183098, | |
| "grad_norm": 0.056329064071178436, | |
| "learning_rate": 7.619718309859155e-05, | |
| "loss": 0.9488, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 12.464788732394366, | |
| "grad_norm": 0.04998723790049553, | |
| "learning_rate": 7.549295774647887e-05, | |
| "loss": 0.9491, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 12.535211267605634, | |
| "grad_norm": 0.024647079408168793, | |
| "learning_rate": 7.47887323943662e-05, | |
| "loss": 0.9474, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 12.605633802816902, | |
| "grad_norm": 0.06089835241436958, | |
| "learning_rate": 7.408450704225352e-05, | |
| "loss": 0.9487, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 12.676056338028168, | |
| "grad_norm": 0.07820327579975128, | |
| "learning_rate": 7.338028169014085e-05, | |
| "loss": 0.9486, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 12.746478873239436, | |
| "grad_norm": 0.03777018189430237, | |
| "learning_rate": 7.267605633802818e-05, | |
| "loss": 0.9483, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 12.816901408450704, | |
| "grad_norm": 0.038660142570734024, | |
| "learning_rate": 7.19718309859155e-05, | |
| "loss": 0.9478, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 12.887323943661972, | |
| "grad_norm": 0.028829969465732574, | |
| "learning_rate": 7.126760563380283e-05, | |
| "loss": 0.9471, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 12.95774647887324, | |
| "grad_norm": 0.02644144371151924, | |
| "learning_rate": 7.056338028169014e-05, | |
| "loss": 0.9493, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 0.9329857230186462, | |
| "eval_runtime": 1.3566, | |
| "eval_samples_per_second": 279.384, | |
| "eval_steps_per_second": 2.211, | |
| "step": 923 | |
| }, | |
| { | |
| "epoch": 13.028169014084508, | |
| "grad_norm": 0.027125298976898193, | |
| "learning_rate": 6.985915492957746e-05, | |
| "loss": 0.948, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 13.098591549295774, | |
| "grad_norm": 0.025396650657057762, | |
| "learning_rate": 6.915492957746479e-05, | |
| "loss": 0.9485, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 13.169014084507042, | |
| "grad_norm": 0.039986852556467056, | |
| "learning_rate": 6.845070422535212e-05, | |
| "loss": 0.948, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 13.23943661971831, | |
| "grad_norm": 0.04669572040438652, | |
| "learning_rate": 6.774647887323944e-05, | |
| "loss": 0.9484, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 13.309859154929578, | |
| "grad_norm": 0.05597477778792381, | |
| "learning_rate": 6.704225352112677e-05, | |
| "loss": 0.9492, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 13.380281690140846, | |
| "grad_norm": 0.03363762050867081, | |
| "learning_rate": 6.633802816901409e-05, | |
| "loss": 0.9479, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 13.450704225352112, | |
| "grad_norm": 0.0501028336584568, | |
| "learning_rate": 6.563380281690142e-05, | |
| "loss": 0.9472, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 13.52112676056338, | |
| "grad_norm": 0.02368428371846676, | |
| "learning_rate": 6.492957746478874e-05, | |
| "loss": 0.9473, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 13.591549295774648, | |
| "grad_norm": 0.024430401623249054, | |
| "learning_rate": 6.422535211267607e-05, | |
| "loss": 0.9461, | |
| "step": 965 | |
| }, | |
| { | |
| "epoch": 13.661971830985916, | |
| "grad_norm": 0.036314696073532104, | |
| "learning_rate": 6.35211267605634e-05, | |
| "loss": 0.9469, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 13.732394366197184, | |
| "grad_norm": 0.03353135287761688, | |
| "learning_rate": 6.28169014084507e-05, | |
| "loss": 0.9468, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 13.80281690140845, | |
| "grad_norm": 0.0263178963214159, | |
| "learning_rate": 6.211267605633803e-05, | |
| "loss": 0.9484, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 13.873239436619718, | |
| "grad_norm": 0.06421630084514618, | |
| "learning_rate": 6.140845070422536e-05, | |
| "loss": 0.9481, | |
| "step": 985 | |
| }, | |
| { | |
| "epoch": 13.943661971830986, | |
| "grad_norm": 0.03617294877767563, | |
| "learning_rate": 6.0704225352112676e-05, | |
| "loss": 0.9467, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.9325685501098633, | |
| "eval_runtime": 1.3557, | |
| "eval_samples_per_second": 279.559, | |
| "eval_steps_per_second": 2.213, | |
| "step": 994 | |
| }, | |
| { | |
| "epoch": 14.014084507042254, | |
| "grad_norm": 0.05025329068303108, | |
| "learning_rate": 6e-05, | |
| "loss": 0.9447, | |
| "step": 995 | |
| }, | |
| { | |
| "epoch": 14.084507042253522, | |
| "grad_norm": 0.03912140801548958, | |
| "learning_rate": 5.929577464788733e-05, | |
| "loss": 0.9468, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 14.154929577464788, | |
| "grad_norm": 0.030682148411870003, | |
| "learning_rate": 5.859154929577465e-05, | |
| "loss": 0.9454, | |
| "step": 1005 | |
| }, | |
| { | |
| "epoch": 14.225352112676056, | |
| "grad_norm": 0.06476651132106781, | |
| "learning_rate": 5.788732394366198e-05, | |
| "loss": 0.946, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 14.295774647887324, | |
| "grad_norm": 0.01931784488260746, | |
| "learning_rate": 5.71830985915493e-05, | |
| "loss": 0.9458, | |
| "step": 1015 | |
| }, | |
| { | |
| "epoch": 14.366197183098592, | |
| "grad_norm": 0.030390406027436256, | |
| "learning_rate": 5.647887323943662e-05, | |
| "loss": 0.9464, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 14.43661971830986, | |
| "grad_norm": 0.060740068554878235, | |
| "learning_rate": 5.577464788732395e-05, | |
| "loss": 0.9459, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 14.507042253521126, | |
| "grad_norm": 0.034336596727371216, | |
| "learning_rate": 5.5070422535211273e-05, | |
| "loss": 0.9454, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 14.577464788732394, | |
| "grad_norm": 0.04022248461842537, | |
| "learning_rate": 5.43661971830986e-05, | |
| "loss": 0.9459, | |
| "step": 1035 | |
| }, | |
| { | |
| "epoch": 14.647887323943662, | |
| "grad_norm": 0.03897751495242119, | |
| "learning_rate": 5.366197183098591e-05, | |
| "loss": 0.948, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 14.71830985915493, | |
| "grad_norm": 0.017990631982684135, | |
| "learning_rate": 5.2957746478873237e-05, | |
| "loss": 0.9461, | |
| "step": 1045 | |
| }, | |
| { | |
| "epoch": 14.788732394366198, | |
| "grad_norm": 0.04828361049294472, | |
| "learning_rate": 5.225352112676056e-05, | |
| "loss": 0.9473, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 14.859154929577464, | |
| "grad_norm": 0.040516018867492676, | |
| "learning_rate": 5.154929577464789e-05, | |
| "loss": 0.9466, | |
| "step": 1055 | |
| }, | |
| { | |
| "epoch": 14.929577464788732, | |
| "grad_norm": 0.023633386939764023, | |
| "learning_rate": 5.084507042253521e-05, | |
| "loss": 0.9483, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "grad_norm": 0.03356311842799187, | |
| "learning_rate": 5.014084507042254e-05, | |
| "loss": 0.9433, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 0.9321981072425842, | |
| "eval_runtime": 1.3288, | |
| "eval_samples_per_second": 285.221, | |
| "eval_steps_per_second": 2.258, | |
| "step": 1065 | |
| }, | |
| { | |
| "epoch": 15.070422535211268, | |
| "grad_norm": 0.031062249094247818, | |
| "learning_rate": 4.9436619718309864e-05, | |
| "loss": 0.9449, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 15.140845070422536, | |
| "grad_norm": 0.03606761246919632, | |
| "learning_rate": 4.873239436619719e-05, | |
| "loss": 0.9452, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 15.211267605633802, | |
| "grad_norm": 0.03723893314599991, | |
| "learning_rate": 4.8028169014084515e-05, | |
| "loss": 0.9495, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 15.28169014084507, | |
| "grad_norm": 0.022561002522706985, | |
| "learning_rate": 4.7323943661971834e-05, | |
| "loss": 0.9451, | |
| "step": 1085 | |
| }, | |
| { | |
| "epoch": 15.352112676056338, | |
| "grad_norm": 0.03934817761182785, | |
| "learning_rate": 4.661971830985915e-05, | |
| "loss": 0.9465, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 15.422535211267606, | |
| "grad_norm": 0.021020477637648582, | |
| "learning_rate": 4.591549295774648e-05, | |
| "loss": 0.9458, | |
| "step": 1095 | |
| }, | |
| { | |
| "epoch": 15.492957746478874, | |
| "grad_norm": 0.029691854491829872, | |
| "learning_rate": 4.5211267605633804e-05, | |
| "loss": 0.9464, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 15.56338028169014, | |
| "grad_norm": 0.030643166974186897, | |
| "learning_rate": 4.450704225352113e-05, | |
| "loss": 0.9438, | |
| "step": 1105 | |
| }, | |
| { | |
| "epoch": 15.633802816901408, | |
| "grad_norm": 0.0261048823595047, | |
| "learning_rate": 4.3802816901408455e-05, | |
| "loss": 0.9442, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 15.704225352112676, | |
| "grad_norm": 0.04185587912797928, | |
| "learning_rate": 4.3098591549295774e-05, | |
| "loss": 0.9455, | |
| "step": 1115 | |
| }, | |
| { | |
| "epoch": 15.774647887323944, | |
| "grad_norm": 0.03181832283735275, | |
| "learning_rate": 4.23943661971831e-05, | |
| "loss": 0.9452, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 15.845070422535212, | |
| "grad_norm": 0.017347080633044243, | |
| "learning_rate": 4.1690140845070425e-05, | |
| "loss": 0.9445, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 15.915492957746478, | |
| "grad_norm": 0.0175313837826252, | |
| "learning_rate": 4.098591549295775e-05, | |
| "loss": 0.9462, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 15.985915492957746, | |
| "grad_norm": 0.016237597912549973, | |
| "learning_rate": 4.0281690140845076e-05, | |
| "loss": 0.9441, | |
| "step": 1135 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.932042121887207, | |
| "eval_runtime": 1.3559, | |
| "eval_samples_per_second": 279.512, | |
| "eval_steps_per_second": 2.212, | |
| "step": 1136 | |
| }, | |
| { | |
| "epoch": 16.056338028169016, | |
| "grad_norm": 0.02838326431810856, | |
| "learning_rate": 3.9577464788732395e-05, | |
| "loss": 0.9443, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 16.12676056338028, | |
| "grad_norm": 0.03755342960357666, | |
| "learning_rate": 3.887323943661972e-05, | |
| "loss": 0.9448, | |
| "step": 1145 | |
| }, | |
| { | |
| "epoch": 16.197183098591548, | |
| "grad_norm": 0.019139522686600685, | |
| "learning_rate": 3.8169014084507046e-05, | |
| "loss": 0.945, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 16.267605633802816, | |
| "grad_norm": 0.029503419995307922, | |
| "learning_rate": 3.746478873239437e-05, | |
| "loss": 0.9437, | |
| "step": 1155 | |
| }, | |
| { | |
| "epoch": 16.338028169014084, | |
| "grad_norm": 0.02175034210085869, | |
| "learning_rate": 3.676056338028169e-05, | |
| "loss": 0.9458, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 16.408450704225352, | |
| "grad_norm": 0.041107743978500366, | |
| "learning_rate": 3.6056338028169015e-05, | |
| "loss": 0.9462, | |
| "step": 1165 | |
| }, | |
| { | |
| "epoch": 16.47887323943662, | |
| "grad_norm": 0.03304363787174225, | |
| "learning_rate": 3.5352112676056334e-05, | |
| "loss": 0.9456, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 16.549295774647888, | |
| "grad_norm": 0.02730601467192173, | |
| "learning_rate": 3.464788732394366e-05, | |
| "loss": 0.9447, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 16.619718309859156, | |
| "grad_norm": 0.03291373327374458, | |
| "learning_rate": 3.3943661971830985e-05, | |
| "loss": 0.9452, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 16.690140845070424, | |
| "grad_norm": 0.023141978308558464, | |
| "learning_rate": 3.323943661971831e-05, | |
| "loss": 0.9437, | |
| "step": 1185 | |
| }, | |
| { | |
| "epoch": 16.760563380281692, | |
| "grad_norm": 0.02332274429500103, | |
| "learning_rate": 3.2535211267605636e-05, | |
| "loss": 0.9438, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 16.830985915492956, | |
| "grad_norm": 0.050522513687610626, | |
| "learning_rate": 3.183098591549296e-05, | |
| "loss": 0.9437, | |
| "step": 1195 | |
| }, | |
| { | |
| "epoch": 16.901408450704224, | |
| "grad_norm": 0.02046947181224823, | |
| "learning_rate": 3.112676056338028e-05, | |
| "loss": 0.9453, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 16.971830985915492, | |
| "grad_norm": 0.06999664008617401, | |
| "learning_rate": 3.0422535211267606e-05, | |
| "loss": 0.9453, | |
| "step": 1205 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 0.9319289326667786, | |
| "eval_runtime": 1.3556, | |
| "eval_samples_per_second": 279.59, | |
| "eval_steps_per_second": 2.213, | |
| "step": 1207 | |
| }, | |
| { | |
| "epoch": 17.04225352112676, | |
| "grad_norm": 0.027964303269982338, | |
| "learning_rate": 2.971830985915493e-05, | |
| "loss": 0.9437, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 17.112676056338028, | |
| "grad_norm": 0.021849192678928375, | |
| "learning_rate": 2.9014084507042254e-05, | |
| "loss": 0.9451, | |
| "step": 1215 | |
| }, | |
| { | |
| "epoch": 17.183098591549296, | |
| "grad_norm": 0.06382541358470917, | |
| "learning_rate": 2.830985915492958e-05, | |
| "loss": 0.9446, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 17.253521126760564, | |
| "grad_norm": 0.018395431339740753, | |
| "learning_rate": 2.7605633802816905e-05, | |
| "loss": 0.9449, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 17.323943661971832, | |
| "grad_norm": 0.02647424302995205, | |
| "learning_rate": 2.6901408450704224e-05, | |
| "loss": 0.943, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 17.3943661971831, | |
| "grad_norm": 0.053954143077135086, | |
| "learning_rate": 2.619718309859155e-05, | |
| "loss": 0.9451, | |
| "step": 1235 | |
| }, | |
| { | |
| "epoch": 17.464788732394368, | |
| "grad_norm": 0.03957201540470123, | |
| "learning_rate": 2.5492957746478875e-05, | |
| "loss": 0.9448, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 17.535211267605632, | |
| "grad_norm": 0.0455038957297802, | |
| "learning_rate": 2.47887323943662e-05, | |
| "loss": 0.9441, | |
| "step": 1245 | |
| }, | |
| { | |
| "epoch": 17.6056338028169, | |
| "grad_norm": 0.03737751021981239, | |
| "learning_rate": 2.4084507042253522e-05, | |
| "loss": 0.9469, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 17.676056338028168, | |
| "grad_norm": 0.038107406347990036, | |
| "learning_rate": 2.3380281690140845e-05, | |
| "loss": 0.9445, | |
| "step": 1255 | |
| }, | |
| { | |
| "epoch": 17.746478873239436, | |
| "grad_norm": 0.04548390954732895, | |
| "learning_rate": 2.267605633802817e-05, | |
| "loss": 0.9451, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 17.816901408450704, | |
| "grad_norm": 0.03373611345887184, | |
| "learning_rate": 2.1971830985915496e-05, | |
| "loss": 0.9438, | |
| "step": 1265 | |
| }, | |
| { | |
| "epoch": 17.887323943661972, | |
| "grad_norm": 0.029089247807860374, | |
| "learning_rate": 2.1267605633802818e-05, | |
| "loss": 0.9435, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 17.95774647887324, | |
| "grad_norm": 0.027908792719244957, | |
| "learning_rate": 2.0563380281690143e-05, | |
| "loss": 0.9433, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 0.9321526885032654, | |
| "eval_runtime": 1.3562, | |
| "eval_samples_per_second": 279.464, | |
| "eval_steps_per_second": 2.212, | |
| "step": 1278 | |
| }, | |
| { | |
| "epoch": 18.028169014084508, | |
| "grad_norm": 0.038007136434316635, | |
| "learning_rate": 1.9859154929577465e-05, | |
| "loss": 0.9433, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 18.098591549295776, | |
| "grad_norm": 0.055055730044841766, | |
| "learning_rate": 1.9154929577464788e-05, | |
| "loss": 0.9444, | |
| "step": 1285 | |
| }, | |
| { | |
| "epoch": 18.169014084507044, | |
| "grad_norm": 0.024233724921941757, | |
| "learning_rate": 1.8450704225352113e-05, | |
| "loss": 0.9448, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 18.239436619718308, | |
| "grad_norm": 0.02525465376675129, | |
| "learning_rate": 1.774647887323944e-05, | |
| "loss": 0.9452, | |
| "step": 1295 | |
| }, | |
| { | |
| "epoch": 18.309859154929576, | |
| "grad_norm": 0.03696692734956741, | |
| "learning_rate": 1.704225352112676e-05, | |
| "loss": 0.9451, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 18.380281690140844, | |
| "grad_norm": 0.012775393202900887, | |
| "learning_rate": 1.6338028169014086e-05, | |
| "loss": 0.9424, | |
| "step": 1305 | |
| }, | |
| { | |
| "epoch": 18.450704225352112, | |
| "grad_norm": 0.04282708466053009, | |
| "learning_rate": 1.5633802816901412e-05, | |
| "loss": 0.9439, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 18.52112676056338, | |
| "grad_norm": 0.026728734374046326, | |
| "learning_rate": 1.4929577464788732e-05, | |
| "loss": 0.9432, | |
| "step": 1315 | |
| }, | |
| { | |
| "epoch": 18.591549295774648, | |
| "grad_norm": 0.038278259336948395, | |
| "learning_rate": 1.4225352112676058e-05, | |
| "loss": 0.9437, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 18.661971830985916, | |
| "grad_norm": 0.0476648211479187, | |
| "learning_rate": 1.352112676056338e-05, | |
| "loss": 0.9442, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 18.732394366197184, | |
| "grad_norm": 0.017152875661849976, | |
| "learning_rate": 1.2816901408450704e-05, | |
| "loss": 0.9439, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 18.802816901408452, | |
| "grad_norm": 0.02195524424314499, | |
| "learning_rate": 1.211267605633803e-05, | |
| "loss": 0.9441, | |
| "step": 1335 | |
| }, | |
| { | |
| "epoch": 18.87323943661972, | |
| "grad_norm": 0.03179704770445824, | |
| "learning_rate": 1.1408450704225353e-05, | |
| "loss": 0.944, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 18.943661971830984, | |
| "grad_norm": 0.01666986383497715, | |
| "learning_rate": 1.0704225352112677e-05, | |
| "loss": 0.9436, | |
| "step": 1345 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 0.9319114685058594, | |
| "eval_runtime": 1.3555, | |
| "eval_samples_per_second": 279.596, | |
| "eval_steps_per_second": 2.213, | |
| "step": 1349 | |
| }, | |
| { | |
| "epoch": 19.014084507042252, | |
| "grad_norm": 0.027670254930853844, | |
| "learning_rate": 1e-05, | |
| "loss": 0.9441, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 19.08450704225352, | |
| "grad_norm": 0.05388191342353821, | |
| "learning_rate": 9.295774647887325e-06, | |
| "loss": 0.9445, | |
| "step": 1355 | |
| }, | |
| { | |
| "epoch": 19.154929577464788, | |
| "grad_norm": 0.02966221235692501, | |
| "learning_rate": 8.591549295774648e-06, | |
| "loss": 0.9432, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 19.225352112676056, | |
| "grad_norm": 0.02553911693394184, | |
| "learning_rate": 7.887323943661972e-06, | |
| "loss": 0.9434, | |
| "step": 1365 | |
| }, | |
| { | |
| "epoch": 19.295774647887324, | |
| "grad_norm": 0.03100278414785862, | |
| "learning_rate": 7.183098591549296e-06, | |
| "loss": 0.9438, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 19.366197183098592, | |
| "grad_norm": 0.03018755465745926, | |
| "learning_rate": 6.47887323943662e-06, | |
| "loss": 0.9442, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 19.43661971830986, | |
| "grad_norm": 0.019777249544858932, | |
| "learning_rate": 5.774647887323944e-06, | |
| "loss": 0.9438, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 19.507042253521128, | |
| "grad_norm": 0.036921270191669464, | |
| "learning_rate": 5.070422535211268e-06, | |
| "loss": 0.9435, | |
| "step": 1385 | |
| }, | |
| { | |
| "epoch": 19.577464788732396, | |
| "grad_norm": 0.038511138409376144, | |
| "learning_rate": 4.3661971830985915e-06, | |
| "loss": 0.9442, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 19.647887323943664, | |
| "grad_norm": 0.03045705333352089, | |
| "learning_rate": 3.6619718309859158e-06, | |
| "loss": 0.9443, | |
| "step": 1395 | |
| }, | |
| { | |
| "epoch": 19.718309859154928, | |
| "grad_norm": 0.037487465888261795, | |
| "learning_rate": 2.9577464788732396e-06, | |
| "loss": 0.9435, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 19.788732394366196, | |
| "grad_norm": 0.022748373448848724, | |
| "learning_rate": 2.2535211267605635e-06, | |
| "loss": 0.9439, | |
| "step": 1405 | |
| }, | |
| { | |
| "epoch": 19.859154929577464, | |
| "grad_norm": 0.037895116955041885, | |
| "learning_rate": 1.5492957746478875e-06, | |
| "loss": 0.9438, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 19.929577464788732, | |
| "grad_norm": 0.02999270148575306, | |
| "learning_rate": 8.450704225352112e-07, | |
| "loss": 0.9429, | |
| "step": 1415 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "grad_norm": 0.03205498680472374, | |
| "learning_rate": 1.4084507042253522e-07, | |
| "loss": 0.9441, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 0.931971549987793, | |
| "eval_runtime": 1.3285, | |
| "eval_samples_per_second": 285.277, | |
| "eval_steps_per_second": 2.258, | |
| "step": 1420 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 1420, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.424179214123008e+16, | |
| "train_batch_size": 48, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |