| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 9.395973154362416, | |
| "global_step": 1400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 1.7916, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 1.7906, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 3e-06, | |
| "loss": 1.7887, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.7891, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 5e-06, | |
| "loss": 1.7873, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 6e-06, | |
| "loss": 1.7828, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 7.000000000000001e-06, | |
| "loss": 1.7828, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 1.7809, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 9e-06, | |
| "loss": 1.7793, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1e-05, | |
| "loss": 1.7742, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 1.1000000000000001e-05, | |
| "loss": 1.7633, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.7617, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.3000000000000001e-05, | |
| "loss": 1.7601, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.4000000000000001e-05, | |
| "loss": 1.7457, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.3411764705882353, | |
| "eval_loss": 1.6935209035873413, | |
| "eval_runtime": 22.6549, | |
| "eval_samples_per_second": 26.264, | |
| "eval_steps_per_second": 3.311, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 1.5e-05, | |
| "loss": 1.7224, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.6744, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 1.7000000000000003e-05, | |
| "loss": 1.6978, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 1.8e-05, | |
| "loss": 1.6583, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 1.9e-05, | |
| "loss": 1.5927, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 2e-05, | |
| "loss": 1.6234, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 2.1e-05, | |
| "loss": 1.577, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 2.2000000000000003e-05, | |
| "loss": 1.5456, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 2.3000000000000003e-05, | |
| "loss": 1.4884, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 2.4e-05, | |
| "loss": 1.5366, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.5622, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 2.6000000000000002e-05, | |
| "loss": 1.5054, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 2.7000000000000002e-05, | |
| "loss": 1.5265, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 2.8000000000000003e-05, | |
| "loss": 1.4786, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 2.9e-05, | |
| "loss": 1.45, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.43529411764705883, | |
| "eval_loss": 1.4212191104888916, | |
| "eval_runtime": 22.0113, | |
| "eval_samples_per_second": 27.032, | |
| "eval_steps_per_second": 3.407, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 3e-05, | |
| "loss": 1.5353, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 3.1e-05, | |
| "loss": 1.4117, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 1.4371, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 3.3e-05, | |
| "loss": 1.475, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 3.4000000000000007e-05, | |
| "loss": 1.4499, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 3.5e-05, | |
| "loss": 1.4489, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 3.6e-05, | |
| "loss": 1.4577, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 3.7e-05, | |
| "loss": 1.3862, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 3.8e-05, | |
| "loss": 1.3997, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 3.9000000000000006e-05, | |
| "loss": 1.408, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 4e-05, | |
| "loss": 1.3716, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 4.1e-05, | |
| "loss": 1.3804, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 4.2e-05, | |
| "loss": 1.3478, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 4.3e-05, | |
| "loss": 1.3447, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.3691, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.5563025210084034, | |
| "eval_loss": 1.1688096523284912, | |
| "eval_runtime": 21.4253, | |
| "eval_samples_per_second": 27.771, | |
| "eval_steps_per_second": 3.501, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.3441, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.2688, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.1629, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 4.8e-05, | |
| "loss": 1.225, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 4.9e-05, | |
| "loss": 1.2406, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 5e-05, | |
| "loss": 1.1661, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 4.94949494949495e-05, | |
| "loss": 1.2401, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 4.898989898989899e-05, | |
| "loss": 1.1792, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 4.848484848484849e-05, | |
| "loss": 1.2781, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 4.797979797979798e-05, | |
| "loss": 1.1531, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 4.7474747474747476e-05, | |
| "loss": 1.1768, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 4.696969696969697e-05, | |
| "loss": 1.1918, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 4.6464646464646464e-05, | |
| "loss": 1.164, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 4.595959595959596e-05, | |
| "loss": 1.1254, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 4.545454545454546e-05, | |
| "loss": 1.1553, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6084033613445378, | |
| "eval_loss": 1.0190620422363281, | |
| "eval_runtime": 21.3507, | |
| "eval_samples_per_second": 27.868, | |
| "eval_steps_per_second": 3.513, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 4.494949494949495e-05, | |
| "loss": 1.1626, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 4.4444444444444447e-05, | |
| "loss": 1.1336, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 4.3939393939393944e-05, | |
| "loss": 1.1021, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 4.343434343434344e-05, | |
| "loss": 1.1986, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 4.292929292929293e-05, | |
| "loss": 1.1517, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 4.242424242424243e-05, | |
| "loss": 1.0188, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "learning_rate": 4.191919191919192e-05, | |
| "loss": 1.0938, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 4.141414141414142e-05, | |
| "loss": 1.1621, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 4.0909090909090915e-05, | |
| "loss": 1.0986, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 4.0404040404040405e-05, | |
| "loss": 1.1434, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 3.98989898989899e-05, | |
| "loss": 1.0971, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 3.939393939393939e-05, | |
| "loss": 0.9853, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 1.1469, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 3.838383838383838e-05, | |
| "loss": 1.0782, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "learning_rate": 3.787878787878788e-05, | |
| "loss": 0.9854, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.653781512605042, | |
| "eval_loss": 0.9426462650299072, | |
| "eval_runtime": 22.7302, | |
| "eval_samples_per_second": 26.177, | |
| "eval_steps_per_second": 3.3, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 5.03, | |
| "learning_rate": 3.7373737373737376e-05, | |
| "loss": 1.07, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 5.1, | |
| "learning_rate": 3.686868686868687e-05, | |
| "loss": 1.054, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 5.17, | |
| "learning_rate": 3.6363636363636364e-05, | |
| "loss": 0.9113, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 5.23, | |
| "learning_rate": 3.5858585858585855e-05, | |
| "loss": 1.0446, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 5.3, | |
| "learning_rate": 3.535353535353535e-05, | |
| "loss": 1.074, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 5.37, | |
| "learning_rate": 3.484848484848485e-05, | |
| "loss": 1.0417, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 5.44, | |
| "learning_rate": 3.434343434343435e-05, | |
| "loss": 1.1119, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "learning_rate": 3.3838383838383844e-05, | |
| "loss": 1.056, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 5.57, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 1.0155, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "learning_rate": 3.282828282828283e-05, | |
| "loss": 1.0486, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "learning_rate": 3.232323232323233e-05, | |
| "loss": 1.1064, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 5.77, | |
| "learning_rate": 3.181818181818182e-05, | |
| "loss": 0.9373, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 5.84, | |
| "learning_rate": 3.131313131313132e-05, | |
| "loss": 0.9218, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 5.91, | |
| "learning_rate": 3.080808080808081e-05, | |
| "loss": 0.9732, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 5.97, | |
| "learning_rate": 3.0303030303030306e-05, | |
| "loss": 1.088, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6756302521008404, | |
| "eval_loss": 0.9188197255134583, | |
| "eval_runtime": 22.0566, | |
| "eval_samples_per_second": 26.976, | |
| "eval_steps_per_second": 3.4, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 6.04, | |
| "learning_rate": 2.9797979797979796e-05, | |
| "loss": 0.9063, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 6.11, | |
| "learning_rate": 2.9292929292929294e-05, | |
| "loss": 1.0027, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 6.17, | |
| "learning_rate": 2.878787878787879e-05, | |
| "loss": 1.122, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 6.24, | |
| "learning_rate": 2.8282828282828282e-05, | |
| "loss": 0.9835, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 6.31, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.9308, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 6.38, | |
| "learning_rate": 2.7272727272727273e-05, | |
| "loss": 1.0989, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 6.44, | |
| "learning_rate": 2.676767676767677e-05, | |
| "loss": 0.9864, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 6.51, | |
| "learning_rate": 2.6262626262626268e-05, | |
| "loss": 1.0551, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 6.58, | |
| "learning_rate": 2.575757575757576e-05, | |
| "loss": 1.0034, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 6.64, | |
| "learning_rate": 2.5252525252525256e-05, | |
| "loss": 0.9375, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 6.71, | |
| "learning_rate": 2.474747474747475e-05, | |
| "loss": 1.0388, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.78, | |
| "learning_rate": 2.4242424242424244e-05, | |
| "loss": 0.9212, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 6.85, | |
| "learning_rate": 2.3737373737373738e-05, | |
| "loss": 1.0113, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 6.91, | |
| "learning_rate": 2.3232323232323232e-05, | |
| "loss": 0.9493, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 6.98, | |
| "learning_rate": 2.272727272727273e-05, | |
| "loss": 0.9801, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.6789915966386555, | |
| "eval_loss": 0.9123912453651428, | |
| "eval_runtime": 21.4548, | |
| "eval_samples_per_second": 27.733, | |
| "eval_steps_per_second": 3.496, | |
| "step": 1043 | |
| }, | |
| { | |
| "epoch": 7.05, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.9011, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 7.11, | |
| "learning_rate": 2.171717171717172e-05, | |
| "loss": 0.96, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 7.18, | |
| "learning_rate": 2.1212121212121215e-05, | |
| "loss": 0.9343, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "learning_rate": 2.070707070707071e-05, | |
| "loss": 0.9807, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "learning_rate": 2.0202020202020203e-05, | |
| "loss": 0.9954, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "learning_rate": 1.9696969696969697e-05, | |
| "loss": 0.8943, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 7.45, | |
| "learning_rate": 1.919191919191919e-05, | |
| "loss": 1.004, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 7.52, | |
| "learning_rate": 1.8686868686868688e-05, | |
| "loss": 0.9443, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 7.58, | |
| "learning_rate": 1.8181818181818182e-05, | |
| "loss": 0.9772, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 7.65, | |
| "learning_rate": 1.7676767676767676e-05, | |
| "loss": 0.9806, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 7.72, | |
| "learning_rate": 1.7171717171717173e-05, | |
| "loss": 0.8561, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.9052, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 7.85, | |
| "learning_rate": 1.6161616161616165e-05, | |
| "loss": 0.9653, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 7.92, | |
| "learning_rate": 1.565656565656566e-05, | |
| "loss": 1.0037, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 7.99, | |
| "learning_rate": 1.5151515151515153e-05, | |
| "loss": 0.9003, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.6941176470588235, | |
| "eval_loss": 0.8562597036361694, | |
| "eval_runtime": 21.0406, | |
| "eval_samples_per_second": 28.279, | |
| "eval_steps_per_second": 3.565, | |
| "step": 1192 | |
| }, | |
| { | |
| "epoch": 8.05, | |
| "learning_rate": 1.4646464646464647e-05, | |
| "loss": 0.9756, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 8.12, | |
| "learning_rate": 1.4141414141414141e-05, | |
| "loss": 1.0478, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 8.19, | |
| "learning_rate": 1.3636363636363637e-05, | |
| "loss": 0.8094, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 8.26, | |
| "learning_rate": 1.3131313131313134e-05, | |
| "loss": 0.9645, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 8.32, | |
| "learning_rate": 1.2626262626262628e-05, | |
| "loss": 0.9773, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 8.39, | |
| "learning_rate": 1.2121212121212122e-05, | |
| "loss": 1.0006, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "learning_rate": 1.1616161616161616e-05, | |
| "loss": 0.9612, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 8.52, | |
| "learning_rate": 1.1111111111111112e-05, | |
| "loss": 0.9107, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 8.59, | |
| "learning_rate": 1.0606060606060607e-05, | |
| "loss": 0.876, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 8.66, | |
| "learning_rate": 1.0101010101010101e-05, | |
| "loss": 0.8718, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 8.72, | |
| "learning_rate": 9.595959595959595e-06, | |
| "loss": 0.8687, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "learning_rate": 9.090909090909091e-06, | |
| "loss": 0.9235, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 8.86, | |
| "learning_rate": 8.585858585858587e-06, | |
| "loss": 0.9192, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 8.93, | |
| "learning_rate": 8.080808080808082e-06, | |
| "loss": 0.9701, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 8.99, | |
| "learning_rate": 7.5757575757575764e-06, | |
| "loss": 0.9199, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.7025210084033613, | |
| "eval_loss": 0.8486049771308899, | |
| "eval_runtime": 21.6062, | |
| "eval_samples_per_second": 27.538, | |
| "eval_steps_per_second": 3.471, | |
| "step": 1341 | |
| }, | |
| { | |
| "epoch": 9.06, | |
| "learning_rate": 7.0707070707070704e-06, | |
| "loss": 0.8483, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 9.13, | |
| "learning_rate": 6.565656565656567e-06, | |
| "loss": 0.8321, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 9.19, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 1.006, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 9.26, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.9249, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 9.33, | |
| "learning_rate": 5.050505050505051e-06, | |
| "loss": 0.8196, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 9.4, | |
| "learning_rate": 4.5454545454545455e-06, | |
| "loss": 0.8964, | |
| "step": 1400 | |
| } | |
| ], | |
| "max_steps": 1490, | |
| "num_train_epochs": 10, | |
| "total_flos": 1.3539817231250708e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |