| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.416, | |
| "eval_steps": 32, | |
| "global_step": 832, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0005, | |
| "grad_norm": 1.1535558177055725, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4543, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.001, | |
| "grad_norm": 0.5039215671909039, | |
| "learning_rate": 1e-05, | |
| "loss": 0.3002, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0015, | |
| "grad_norm": 0.4111085873239227, | |
| "learning_rate": 1e-05, | |
| "loss": 0.2138, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.002, | |
| "grad_norm": 0.3258240952011462, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1761, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0025, | |
| "grad_norm": 0.25417983371771585, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1923, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.003, | |
| "grad_norm": 0.2023054568437071, | |
| "learning_rate": 1e-05, | |
| "loss": 0.2035, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0035, | |
| "grad_norm": 0.29479546251817934, | |
| "learning_rate": 1e-05, | |
| "loss": 0.166, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.004, | |
| "grad_norm": 0.2603707531918473, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1563, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0045, | |
| "grad_norm": 0.27772543950112105, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1742, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.005, | |
| "grad_norm": 0.33899668368493546, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1623, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0055, | |
| "grad_norm": 0.17293404759978015, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1902, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.006, | |
| "grad_norm": 0.16812733049304054, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1505, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.0065, | |
| "grad_norm": 0.24635507048250077, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1496, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.007, | |
| "grad_norm": 0.23026986852648482, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1428, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0075, | |
| "grad_norm": 0.20312493098149043, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1411, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.008, | |
| "grad_norm": 0.19056569111470645, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1556, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.0085, | |
| "grad_norm": 0.19180586046592904, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1448, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.009, | |
| "grad_norm": 0.23670452313674922, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1272, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.0095, | |
| "grad_norm": 0.2241037709056984, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1584, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 0.1828368631692288, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1347, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0105, | |
| "grad_norm": 0.23115912688140353, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1293, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.011, | |
| "grad_norm": 0.18886903067681482, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1175, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.0115, | |
| "grad_norm": 0.19723236256202736, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1665, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.012, | |
| "grad_norm": 0.2141017706399641, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1491, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.0125, | |
| "grad_norm": 0.19090893978368054, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1113, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.013, | |
| "grad_norm": 0.2460352826446744, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1142, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.0135, | |
| "grad_norm": 0.21847319513439203, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1273, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.014, | |
| "grad_norm": 0.18979686333150375, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1441, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.0145, | |
| "grad_norm": 0.18886921913659824, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1481, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.015, | |
| "grad_norm": 0.2115532479997522, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0792, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0155, | |
| "grad_norm": 0.20266301051804922, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1379, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 0.1865900767381873, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1453, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "eval_dev_acc": 0.61328125, | |
| "eval_dev_token": 5204.017578125, | |
| "eval_runtime": 351.6008, | |
| "eval_samples_per_second": 0.182, | |
| "eval_steps_per_second": 0.003, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.0165, | |
| "grad_norm": 0.23402804608211233, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1461, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.017, | |
| "grad_norm": 0.2191224488373381, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1076, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.0175, | |
| "grad_norm": 0.18221820677824999, | |
| "learning_rate": 1e-05, | |
| "loss": 0.138, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.018, | |
| "grad_norm": 0.20326615377722793, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1021, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.0185, | |
| "grad_norm": 0.2449658821374275, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0881, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.019, | |
| "grad_norm": 0.2068478649138205, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1249, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.0195, | |
| "grad_norm": 0.22630770783782977, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1259, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.21173641583720768, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1331, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0205, | |
| "grad_norm": 0.23616623193061376, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1192, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.021, | |
| "grad_norm": 0.20789242969830385, | |
| "learning_rate": 1e-05, | |
| "loss": 0.159, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.0215, | |
| "grad_norm": 0.21662842275351119, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1455, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.022, | |
| "grad_norm": 0.224914886884455, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1565, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.0225, | |
| "grad_norm": 0.17642201019062015, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1022, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.023, | |
| "grad_norm": 0.19476006095917964, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1738, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.0235, | |
| "grad_norm": 0.2041987726527936, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1545, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.024, | |
| "grad_norm": 0.15364946520211809, | |
| "learning_rate": 1e-05, | |
| "loss": 0.141, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.0245, | |
| "grad_norm": 0.1632596100583654, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1493, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.025, | |
| "grad_norm": 0.23305215604624085, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1164, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0255, | |
| "grad_norm": 0.2697225282405861, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0921, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.026, | |
| "grad_norm": 0.19242004892152365, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1224, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.0265, | |
| "grad_norm": 0.21422189358330607, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1084, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.027, | |
| "grad_norm": 0.22368520523252378, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1018, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.0275, | |
| "grad_norm": 0.15315126193234804, | |
| "learning_rate": 1e-05, | |
| "loss": 0.117, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.028, | |
| "grad_norm": 0.2042745134173473, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1355, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.0285, | |
| "grad_norm": 0.23889007522498773, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1387, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.029, | |
| "grad_norm": 0.17240068749452392, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1634, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.0295, | |
| "grad_norm": 0.1899912754500666, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1027, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 0.17118724561465037, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1398, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0305, | |
| "grad_norm": 0.19950059905690032, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1118, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.031, | |
| "grad_norm": 0.19928365636144094, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1146, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.0315, | |
| "grad_norm": 0.23146246196039105, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0871, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 0.22746980065915193, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1673, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "eval_dev_acc": 0.541015625, | |
| "eval_dev_token": 5677.76171875, | |
| "eval_runtime": 358.7405, | |
| "eval_samples_per_second": 0.178, | |
| "eval_steps_per_second": 0.003, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.0325, | |
| "grad_norm": 0.1992092305273338, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1292, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.033, | |
| "grad_norm": 0.19429880128063629, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0911, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.0335, | |
| "grad_norm": 0.1287473705431077, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1377, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.034, | |
| "grad_norm": 0.2228992327697556, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0933, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.0345, | |
| "grad_norm": 0.18793882506839266, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1097, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.035, | |
| "grad_norm": 0.1932965061071618, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1062, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0355, | |
| "grad_norm": 0.20585403698562318, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1279, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.036, | |
| "grad_norm": 0.17833203451544913, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1353, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.0365, | |
| "grad_norm": 0.21658266347490054, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0961, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.037, | |
| "grad_norm": 0.2364173046757495, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1038, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.0375, | |
| "grad_norm": 0.20844999333456934, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1266, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.038, | |
| "grad_norm": 0.2288998611422715, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0872, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.0385, | |
| "grad_norm": 0.1878179335817694, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1038, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.039, | |
| "grad_norm": 0.19984002821227043, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1376, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.0395, | |
| "grad_norm": 0.20050327741314344, | |
| "learning_rate": 1e-05, | |
| "loss": 0.135, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.19353835667751798, | |
| "learning_rate": 1e-05, | |
| "loss": 0.12, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.0405, | |
| "grad_norm": 0.12986340217496, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1754, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.041, | |
| "grad_norm": 0.1889393654868388, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1206, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.0415, | |
| "grad_norm": 0.17201063783314552, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1401, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.042, | |
| "grad_norm": 0.19004092111820917, | |
| "learning_rate": 1e-05, | |
| "loss": 0.122, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.0425, | |
| "grad_norm": 0.21797692428743218, | |
| "learning_rate": 1e-05, | |
| "loss": 0.127, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.043, | |
| "grad_norm": 0.16779726003397347, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1437, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.0435, | |
| "grad_norm": 0.23214262430834917, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1187, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.044, | |
| "grad_norm": 0.19415443790822307, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1063, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.0445, | |
| "grad_norm": 0.19254517148646239, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1039, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.045, | |
| "grad_norm": 0.17307659461239167, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1554, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.0455, | |
| "grad_norm": 0.24265029122082277, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1387, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.046, | |
| "grad_norm": 0.17107218032177454, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1301, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.0465, | |
| "grad_norm": 0.18075891622609033, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1174, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.047, | |
| "grad_norm": 0.21595935885391185, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1129, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.0475, | |
| "grad_norm": 0.23262812126963384, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0983, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "grad_norm": 0.20420999614021612, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1161, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "eval_dev_acc": 0.63671875, | |
| "eval_dev_token": 5515.37109375, | |
| "eval_runtime": 348.8656, | |
| "eval_samples_per_second": 0.183, | |
| "eval_steps_per_second": 0.003, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.0485, | |
| "grad_norm": 0.22980199994832454, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.049, | |
| "grad_norm": 0.19262135795217625, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1124, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.0495, | |
| "grad_norm": 0.22869326649776367, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1094, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.24173954720541516, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0847, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0505, | |
| "grad_norm": 0.2332215752101726, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0946, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.051, | |
| "grad_norm": 0.26475494931892063, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1094, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.0515, | |
| "grad_norm": 0.20833383536152603, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0931, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.052, | |
| "grad_norm": 0.22676431614598225, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0866, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.0525, | |
| "grad_norm": 0.22204744101927545, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0984, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.053, | |
| "grad_norm": 0.2012349417230909, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0899, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.0535, | |
| "grad_norm": 0.20131082411517556, | |
| "learning_rate": 1e-05, | |
| "loss": 0.095, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.054, | |
| "grad_norm": 0.2501268960199406, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0872, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.0545, | |
| "grad_norm": 0.1877564425582315, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1447, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.055, | |
| "grad_norm": 0.26373944955124323, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1104, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.0555, | |
| "grad_norm": 0.2201267469286863, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0864, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.056, | |
| "grad_norm": 0.2584324977531668, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1243, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.0565, | |
| "grad_norm": 0.21251509346212935, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1287, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.057, | |
| "grad_norm": 0.2391921924682281, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1174, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.0575, | |
| "grad_norm": 0.2250627442441596, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0961, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.058, | |
| "grad_norm": 0.21589217619835932, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1041, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.0585, | |
| "grad_norm": 0.23947622053978754, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1027, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.059, | |
| "grad_norm": 0.20944871960722214, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1117, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.0595, | |
| "grad_norm": 0.2025724984657677, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1102, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.1801908473330023, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1072, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0605, | |
| "grad_norm": 0.1940191661946107, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0904, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.061, | |
| "grad_norm": 0.21867530545592728, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1193, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.0615, | |
| "grad_norm": 0.22225462024793874, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0782, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.062, | |
| "grad_norm": 0.24241920616546134, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1099, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 0.21360306106975577, | |
| "learning_rate": 1e-05, | |
| "loss": 0.091, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.063, | |
| "grad_norm": 0.2007423950283759, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1152, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.0635, | |
| "grad_norm": 0.19343038731295426, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1181, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 0.234786663580031, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1149, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "eval_dev_acc": 0.548828125, | |
| "eval_dev_token": 5586.20703125, | |
| "eval_runtime": 364.0015, | |
| "eval_samples_per_second": 0.176, | |
| "eval_steps_per_second": 0.003, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.0645, | |
| "grad_norm": 0.20468481909378916, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0976, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.065, | |
| "grad_norm": 0.25320635754138643, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1183, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0655, | |
| "grad_norm": 0.21530698126365438, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1156, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.066, | |
| "grad_norm": 0.20489101859011527, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0991, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.0665, | |
| "grad_norm": 0.21113632835377186, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0838, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.067, | |
| "grad_norm": 0.18341595697478763, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1036, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.0675, | |
| "grad_norm": 0.1762785527366556, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1674, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.068, | |
| "grad_norm": 0.2632947258005063, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1202, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.0685, | |
| "grad_norm": 0.21085919863317307, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1131, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.069, | |
| "grad_norm": 0.19457697084640746, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1005, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.0695, | |
| "grad_norm": 0.17119664823512107, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1389, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.19084161070023772, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1527, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.0705, | |
| "grad_norm": 0.19580784724822164, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1175, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.071, | |
| "grad_norm": 0.2110588281462844, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1062, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.0715, | |
| "grad_norm": 0.20012341575489243, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0848, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.072, | |
| "grad_norm": 0.1940479696118561, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0694, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.0725, | |
| "grad_norm": 0.21593290579494073, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0766, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.073, | |
| "grad_norm": 0.22638726501654005, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1084, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.0735, | |
| "grad_norm": 0.20997037800742063, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0761, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.074, | |
| "grad_norm": 0.2380179005894331, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0927, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.0745, | |
| "grad_norm": 0.23889516090857615, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0827, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "grad_norm": 0.18767850358859223, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0881, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.0755, | |
| "grad_norm": 0.19463069265385494, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0917, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.076, | |
| "grad_norm": 0.212834963744102, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0756, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.0765, | |
| "grad_norm": 0.20584172308777918, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0762, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.077, | |
| "grad_norm": 0.2371449651260928, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0978, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.0775, | |
| "grad_norm": 0.2049083024101962, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0925, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.078, | |
| "grad_norm": 0.21217273061342656, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0825, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.0785, | |
| "grad_norm": 0.20105825530151383, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0858, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.079, | |
| "grad_norm": 0.2257052714675071, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1105, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.0795, | |
| "grad_norm": 0.20210858652943217, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1022, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.19482796495242663, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1262, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "eval_dev_acc": 0.6015625, | |
| "eval_dev_token": 5543.318359375, | |
| "eval_runtime": 353.7068, | |
| "eval_samples_per_second": 0.181, | |
| "eval_steps_per_second": 0.003, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.0805, | |
| "grad_norm": 0.22955090034416561, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0988, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.081, | |
| "grad_norm": 0.23547588568917174, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0803, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.0815, | |
| "grad_norm": 0.230658904399123, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0881, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.082, | |
| "grad_norm": 0.2595571094242936, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0881, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.0825, | |
| "grad_norm": 0.26763452927239884, | |
| "learning_rate": 1e-05, | |
| "loss": 0.114, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.083, | |
| "grad_norm": 0.257549186353109, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1045, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.0835, | |
| "grad_norm": 0.19712751160118708, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0899, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.084, | |
| "grad_norm": 0.17276675854807147, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0684, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.0845, | |
| "grad_norm": 0.21103265575626073, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0821, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.085, | |
| "grad_norm": 0.22292947141761962, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0983, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.0855, | |
| "grad_norm": 0.21612821069411284, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0917, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.086, | |
| "grad_norm": 0.20757781370778242, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0919, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.0865, | |
| "grad_norm": 0.2054200483785948, | |
| "learning_rate": 1e-05, | |
| "loss": 0.077, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.087, | |
| "grad_norm": 0.24143931624393172, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0919, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.0875, | |
| "grad_norm": 0.22708368965968964, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0931, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.088, | |
| "grad_norm": 0.20044838709826737, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0808, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.0885, | |
| "grad_norm": 0.20148090317828546, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0905, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.089, | |
| "grad_norm": 0.2090109676571514, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0801, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.0895, | |
| "grad_norm": 0.19147542578517765, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0774, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.22051053694604383, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0949, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.0905, | |
| "grad_norm": 0.20152485004966214, | |
| "learning_rate": 1e-05, | |
| "loss": 0.077, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.091, | |
| "grad_norm": 0.18668034234550573, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0887, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.0915, | |
| "grad_norm": 0.21497572062563422, | |
| "learning_rate": 1e-05, | |
| "loss": 0.095, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.092, | |
| "grad_norm": 0.21716742542098177, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0822, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.0925, | |
| "grad_norm": 0.20262525326126424, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0804, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.093, | |
| "grad_norm": 0.18652482669251277, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0959, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.0935, | |
| "grad_norm": 0.232270946947485, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0785, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.094, | |
| "grad_norm": 0.22559277541611453, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0868, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.0945, | |
| "grad_norm": 0.21772738934026295, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1384, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.095, | |
| "grad_norm": 0.19366625753900965, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0962, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.0955, | |
| "grad_norm": 0.2162137483161777, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0753, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 0.2111612755929646, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0776, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "eval_dev_acc": 0.546875, | |
| "eval_dev_token": 5439.14453125, | |
| "eval_runtime": 358.6784, | |
| "eval_samples_per_second": 0.178, | |
| "eval_steps_per_second": 0.003, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.0965, | |
| "grad_norm": 0.20427587800007568, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1026, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.097, | |
| "grad_norm": 0.1913558266102393, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0947, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.0975, | |
| "grad_norm": 0.21823580107793827, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0852, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.098, | |
| "grad_norm": 0.21947391378003933, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0834, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.0985, | |
| "grad_norm": 0.2234108965736748, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0922, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.099, | |
| "grad_norm": 0.20738196532743278, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0814, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.0995, | |
| "grad_norm": 0.21586171895392783, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0988, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.20785742252782521, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0872, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1005, | |
| "grad_norm": 0.22570317690675268, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0937, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.101, | |
| "grad_norm": 0.19453877023547578, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1083, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.1015, | |
| "grad_norm": 0.20591293588894416, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0767, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.102, | |
| "grad_norm": 0.1798554117116506, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1017, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.1025, | |
| "grad_norm": 0.1927231622743242, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0911, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.103, | |
| "grad_norm": 0.21444122564196544, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0856, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.1035, | |
| "grad_norm": 0.17259842125017608, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0904, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.104, | |
| "grad_norm": 0.23979518316140722, | |
| "learning_rate": 1e-05, | |
| "loss": 0.084, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.1045, | |
| "grad_norm": 0.22442151254111703, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0949, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.105, | |
| "grad_norm": 0.19615294291751353, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0864, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1055, | |
| "grad_norm": 0.18344154651920094, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0843, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.106, | |
| "grad_norm": 0.21335203803361255, | |
| "learning_rate": 1e-05, | |
| "loss": 0.077, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.1065, | |
| "grad_norm": 0.252518444172673, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0888, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.107, | |
| "grad_norm": 0.2116629068211744, | |
| "learning_rate": 1e-05, | |
| "loss": 0.098, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.1075, | |
| "grad_norm": 0.21920052784491295, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0823, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.108, | |
| "grad_norm": 0.19779399311082105, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0752, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.1085, | |
| "grad_norm": 0.19973286372655655, | |
| "learning_rate": 1e-05, | |
| "loss": 0.074, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.109, | |
| "grad_norm": 0.22343594632856933, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0907, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.1095, | |
| "grad_norm": 0.25396426928555105, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1075, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.1945715284952783, | |
| "learning_rate": 1e-05, | |
| "loss": 0.1053, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.1105, | |
| "grad_norm": 0.21559958220587308, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0865, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.111, | |
| "grad_norm": 0.22369755043282374, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0841, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.1115, | |
| "grad_norm": 0.2097379198995065, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0753, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "grad_norm": 0.17224880947033328, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0699, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "eval_dev_acc": 0.560546875, | |
| "eval_dev_token": 5619.009765625, | |
| "eval_runtime": 357.3428, | |
| "eval_samples_per_second": 0.179, | |
| "eval_steps_per_second": 0.003, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.1125, | |
| "grad_norm": 0.19442868536212735, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0846, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.113, | |
| "grad_norm": 0.1573515275602218, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0634, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.1135, | |
| "grad_norm": 0.17680777096637068, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0761, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.114, | |
| "grad_norm": 0.20935704393341403, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0549, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.1145, | |
| "grad_norm": 0.19829321187204563, | |
| "learning_rate": 1e-05, | |
| "loss": 0.05, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.115, | |
| "grad_norm": 0.18833561824346334, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0656, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.1155, | |
| "grad_norm": 0.17277292328026173, | |
| "learning_rate": 1e-05, | |
| "loss": 0.08, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.116, | |
| "grad_norm": 0.2083709354078263, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0628, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.1165, | |
| "grad_norm": 0.18113171413223286, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0633, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.117, | |
| "grad_norm": 0.19985236934976783, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0676, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.1175, | |
| "grad_norm": 0.2023196182410912, | |
| "learning_rate": 1e-05, | |
| "loss": 0.062, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.118, | |
| "grad_norm": 0.19446408540605106, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0609, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.1185, | |
| "grad_norm": 0.1879635955015942, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0631, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.119, | |
| "grad_norm": 0.18000098291861588, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0628, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.1195, | |
| "grad_norm": 0.2091766063587954, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0642, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.19781458462773657, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0551, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.1205, | |
| "grad_norm": 0.20570535681429145, | |
| "learning_rate": 1e-05, | |
| "loss": 0.062, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.121, | |
| "grad_norm": 0.17479813291913535, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0657, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.1215, | |
| "grad_norm": 0.20023576215167263, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0545, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.122, | |
| "grad_norm": 0.21569894284947272, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0641, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.1225, | |
| "grad_norm": 0.16426082027771785, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0488, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.123, | |
| "grad_norm": 0.23142807048539513, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0617, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.1235, | |
| "grad_norm": 0.21054969399806525, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0566, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.124, | |
| "grad_norm": 0.1533567582820314, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0559, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.1245, | |
| "grad_norm": 0.19025683614022437, | |
| "learning_rate": 1e-05, | |
| "loss": 0.051, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.16411772241541067, | |
| "learning_rate": 1e-05, | |
| "loss": 0.068, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.1255, | |
| "grad_norm": 0.23699209914417435, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0845, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.126, | |
| "grad_norm": 0.2047456890167515, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0588, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.1265, | |
| "grad_norm": 0.20625503133501016, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0543, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.127, | |
| "grad_norm": 0.16081505489801892, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0413, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.1275, | |
| "grad_norm": 0.20221086748641462, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0704, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 0.20711399972324054, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0563, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "eval_dev_acc": 0.587890625, | |
| "eval_dev_token": 5704.3125, | |
| "eval_runtime": 356.7726, | |
| "eval_samples_per_second": 0.179, | |
| "eval_steps_per_second": 0.003, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.1285, | |
| "grad_norm": 0.18942704583355485, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0692, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.129, | |
| "grad_norm": 0.19171984906136896, | |
| "learning_rate": 1e-05, | |
| "loss": 0.05, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.1295, | |
| "grad_norm": 0.2125382567332232, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0615, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.17877261040661208, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0597, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1305, | |
| "grad_norm": 0.1708210973205684, | |
| "learning_rate": 1e-05, | |
| "loss": 0.067, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.131, | |
| "grad_norm": 0.1850887764718648, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0544, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.1315, | |
| "grad_norm": 0.2010060822058417, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0696, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.132, | |
| "grad_norm": 0.18460835555899294, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0607, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.1325, | |
| "grad_norm": 0.2264686856441524, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0638, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.133, | |
| "grad_norm": 0.17252712599660533, | |
| "learning_rate": 1e-05, | |
| "loss": 0.055, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.1335, | |
| "grad_norm": 0.1668268871760919, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0467, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.134, | |
| "grad_norm": 0.17711472127782535, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0462, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.1345, | |
| "grad_norm": 0.16354851889499628, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0687, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.135, | |
| "grad_norm": 0.17844367901102645, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0455, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1355, | |
| "grad_norm": 0.19303024902618696, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0565, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.136, | |
| "grad_norm": 0.19049740233006035, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0575, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.1365, | |
| "grad_norm": 0.18444934835307936, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0495, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.137, | |
| "grad_norm": 0.2029153556589725, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0632, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.1375, | |
| "grad_norm": 0.17742276348080663, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0591, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.138, | |
| "grad_norm": 0.2086941032177491, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0453, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.1385, | |
| "grad_norm": 0.1599025673655474, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0346, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.139, | |
| "grad_norm": 0.20223917188200294, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0516, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.1395, | |
| "grad_norm": 0.168572629555483, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0502, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.21316857087336016, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0585, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.1405, | |
| "grad_norm": 0.1533009935920478, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0732, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.141, | |
| "grad_norm": 0.2226592493652288, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0683, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.1415, | |
| "grad_norm": 0.2005052634299014, | |
| "learning_rate": 1e-05, | |
| "loss": 0.064, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.142, | |
| "grad_norm": 0.16977898059181232, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0759, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.1425, | |
| "grad_norm": 0.17622048198257903, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0546, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.143, | |
| "grad_norm": 0.15734345344681852, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0371, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.1435, | |
| "grad_norm": 0.16097527322211574, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0523, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "grad_norm": 0.18490744056020517, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0518, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "eval_dev_acc": 0.5234375, | |
| "eval_dev_token": 5945.451171875, | |
| "eval_runtime": 402.5197, | |
| "eval_samples_per_second": 0.159, | |
| "eval_steps_per_second": 0.002, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.1445, | |
| "grad_norm": 0.16803932674344507, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0564, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.145, | |
| "grad_norm": 0.18279683357896828, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0604, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.1455, | |
| "grad_norm": 0.1773550481655194, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0465, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.146, | |
| "grad_norm": 0.37855180864427374, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0687, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.1465, | |
| "grad_norm": 0.2256173969538139, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0597, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.147, | |
| "grad_norm": 0.19318315047474646, | |
| "learning_rate": 1e-05, | |
| "loss": 0.05, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.1475, | |
| "grad_norm": 0.1949573691249157, | |
| "learning_rate": 1e-05, | |
| "loss": 0.054, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.148, | |
| "grad_norm": 0.1784893290182381, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0551, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.1485, | |
| "grad_norm": 0.1893518286787237, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0582, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.149, | |
| "grad_norm": 0.16491423015511872, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0393, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.1495, | |
| "grad_norm": 0.1762274160177828, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0485, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.18398727773770782, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0573, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.1505, | |
| "grad_norm": 0.18217281656040227, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0485, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.151, | |
| "grad_norm": 0.17276763991718358, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0571, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.1515, | |
| "grad_norm": 0.15668609882813492, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0741, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.152, | |
| "grad_norm": 0.18965888700513778, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0548, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.1525, | |
| "grad_norm": 0.1514861962476675, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0408, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.153, | |
| "grad_norm": 0.21027930287961952, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0473, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.1535, | |
| "grad_norm": 0.20086505297048218, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0457, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.154, | |
| "grad_norm": 0.1834058060370301, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0391, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.1545, | |
| "grad_norm": 0.1675035648173745, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0346, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.155, | |
| "grad_norm": 0.19041217604042332, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0447, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.1555, | |
| "grad_norm": 0.2063641120441124, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0454, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.156, | |
| "grad_norm": 0.18205494058640856, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0402, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.1565, | |
| "grad_norm": 0.18642039675473027, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0397, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.157, | |
| "grad_norm": 0.16971154384699963, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0426, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.1575, | |
| "grad_norm": 0.19779499600374534, | |
| "learning_rate": 1e-05, | |
| "loss": 0.042, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.158, | |
| "grad_norm": 0.1597506922805199, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0561, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.1585, | |
| "grad_norm": 0.22455898943377722, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0508, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.159, | |
| "grad_norm": 0.22816888070811367, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0684, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.1595, | |
| "grad_norm": 0.20829314487846406, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0558, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.1726376642892394, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0382, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_dev_acc": 0.62109375, | |
| "eval_dev_token": 5348.43359375, | |
| "eval_runtime": 348.0694, | |
| "eval_samples_per_second": 0.184, | |
| "eval_steps_per_second": 0.003, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1605, | |
| "grad_norm": 0.21533911997871472, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0573, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.161, | |
| "grad_norm": 0.1945926926705701, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0482, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.1615, | |
| "grad_norm": 0.16282045186801866, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0395, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.162, | |
| "grad_norm": 0.21192465805738625, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0534, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.1625, | |
| "grad_norm": 0.18412473332196624, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0457, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.163, | |
| "grad_norm": 0.1760683716395308, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0621, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.1635, | |
| "grad_norm": 0.1977120327808734, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0443, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.164, | |
| "grad_norm": 0.17834928432327893, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0474, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.1645, | |
| "grad_norm": 0.19810362807929732, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0472, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.165, | |
| "grad_norm": 0.17306554655229037, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0514, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.1655, | |
| "grad_norm": 0.1732660830513622, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0487, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.166, | |
| "grad_norm": 0.15153749458139032, | |
| "learning_rate": 1e-05, | |
| "loss": 0.044, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.1665, | |
| "grad_norm": 0.20187085045913772, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0551, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.167, | |
| "grad_norm": 0.16579582791974742, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0497, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.1675, | |
| "grad_norm": 0.19316064563692958, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0549, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.168, | |
| "grad_norm": 0.16491714800111232, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0371, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.1685, | |
| "grad_norm": 0.17551519178449135, | |
| "learning_rate": 1e-05, | |
| "loss": 0.041, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.169, | |
| "grad_norm": 0.1734781492111704, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0441, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.1695, | |
| "grad_norm": 0.18684315556716974, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0488, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.16917376679576818, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0437, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1705, | |
| "grad_norm": 0.1501957333162884, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0393, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.171, | |
| "grad_norm": 0.2205121105977978, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0579, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.1715, | |
| "grad_norm": 0.18041963938373073, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0492, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.172, | |
| "grad_norm": 0.17449103579952038, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0401, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.1725, | |
| "grad_norm": 0.15528177663303303, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0379, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.173, | |
| "grad_norm": 0.20473861699417553, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0499, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.1735, | |
| "grad_norm": 0.22954222855804748, | |
| "learning_rate": 1e-05, | |
| "loss": 0.066, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.174, | |
| "grad_norm": 0.18144976711639327, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0417, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.1745, | |
| "grad_norm": 0.1647885822020398, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0406, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.175, | |
| "grad_norm": 0.13381930823784724, | |
| "learning_rate": 1e-05, | |
| "loss": 0.039, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.1755, | |
| "grad_norm": 0.17233840695200286, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0504, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.176, | |
| "grad_norm": 0.18220085493590332, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0411, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.176, | |
| "eval_dev_acc": 0.53515625, | |
| "eval_dev_token": 5787.59375, | |
| "eval_runtime": 364.0085, | |
| "eval_samples_per_second": 0.176, | |
| "eval_steps_per_second": 0.003, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.1765, | |
| "grad_norm": 0.20744185724682074, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0606, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.177, | |
| "grad_norm": 0.18600570998480834, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0416, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.1775, | |
| "grad_norm": 0.1776469292641811, | |
| "learning_rate": 1e-05, | |
| "loss": 0.042, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.178, | |
| "grad_norm": 0.1783568782453835, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0457, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.1785, | |
| "grad_norm": 0.1981729227656145, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0578, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.179, | |
| "grad_norm": 0.18984703197303243, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0427, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.1795, | |
| "grad_norm": 0.21799846739281004, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0492, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.2263278306065525, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0708, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1805, | |
| "grad_norm": 0.16612125798758726, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0447, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.181, | |
| "grad_norm": 0.15311766872023147, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0369, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.1815, | |
| "grad_norm": 0.18614757917185834, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0484, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.182, | |
| "grad_norm": 0.18253431030668557, | |
| "learning_rate": 1e-05, | |
| "loss": 0.044, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.1825, | |
| "grad_norm": 0.19238661256236803, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0615, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.183, | |
| "grad_norm": 0.1808155264273284, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0444, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.1835, | |
| "grad_norm": 0.1743762662626829, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0501, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.184, | |
| "grad_norm": 0.16508774246157967, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0448, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.1845, | |
| "grad_norm": 0.1546243168773746, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0366, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.185, | |
| "grad_norm": 0.1746189186464954, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0471, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.1855, | |
| "grad_norm": 0.17995461422580256, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0405, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.186, | |
| "grad_norm": 0.16745033647841967, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0371, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.1865, | |
| "grad_norm": 0.14177227347565124, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0336, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.187, | |
| "grad_norm": 0.19568633642105135, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0419, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 0.1694809590901385, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0365, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.188, | |
| "grad_norm": 0.16086017791775223, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0382, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.1885, | |
| "grad_norm": 0.14863922525565496, | |
| "learning_rate": 1e-05, | |
| "loss": 0.039, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.189, | |
| "grad_norm": 0.16084357735487792, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0312, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.1895, | |
| "grad_norm": 0.19070881724879324, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0491, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.17240390839318184, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0455, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.1905, | |
| "grad_norm": 0.13331277326103189, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0289, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.191, | |
| "grad_norm": 0.18756729894366522, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0468, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.1915, | |
| "grad_norm": 0.1660248717735821, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0424, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 0.16346974130070938, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0311, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "eval_dev_acc": 0.599609375, | |
| "eval_dev_token": 5596.130859375, | |
| "eval_runtime": 352.4793, | |
| "eval_samples_per_second": 0.182, | |
| "eval_steps_per_second": 0.003, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.1925, | |
| "grad_norm": 0.16858862798497806, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0389, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.193, | |
| "grad_norm": 0.1484958580298565, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0397, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.1935, | |
| "grad_norm": 0.17660261356555002, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0515, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.194, | |
| "grad_norm": 0.1783517215939047, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0431, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.1945, | |
| "grad_norm": 0.14136150090913457, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0323, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.195, | |
| "grad_norm": 0.16595913921658337, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0394, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1955, | |
| "grad_norm": 0.17788297569443248, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0698, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.196, | |
| "grad_norm": 0.14755167079389797, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0308, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.1965, | |
| "grad_norm": 0.20681855290430337, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0494, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.197, | |
| "grad_norm": 0.19060439020439998, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0445, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.1975, | |
| "grad_norm": 0.17199443698076167, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0414, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.198, | |
| "grad_norm": 0.15210077373082737, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0296, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.1985, | |
| "grad_norm": 0.17482591540638856, | |
| "learning_rate": 1e-05, | |
| "loss": 0.044, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.199, | |
| "grad_norm": 0.15501601608099658, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0376, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.1995, | |
| "grad_norm": 0.17142493205422682, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0386, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.1921162644413309, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0469, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2005, | |
| "grad_norm": 0.15938080403417312, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0496, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.201, | |
| "grad_norm": 0.14786848292294155, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0426, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.2015, | |
| "grad_norm": 0.18628997533329272, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0581, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.202, | |
| "grad_norm": 0.16058096254934043, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0336, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.2025, | |
| "grad_norm": 0.19319024386507233, | |
| "learning_rate": 1e-05, | |
| "loss": 0.047, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.203, | |
| "grad_norm": 0.17328115011013, | |
| "learning_rate": 1e-05, | |
| "loss": 0.049, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.2035, | |
| "grad_norm": 0.13258378170371796, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0286, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.204, | |
| "grad_norm": 0.17945245697241183, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0518, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.2045, | |
| "grad_norm": 0.16689764407399071, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0458, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.205, | |
| "grad_norm": 0.18446815699746041, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0408, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.2055, | |
| "grad_norm": 0.1489326060726689, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0656, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.206, | |
| "grad_norm": 0.14974593012017515, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0297, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.2065, | |
| "grad_norm": 0.1918114395748189, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0439, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.207, | |
| "grad_norm": 0.1689953495305046, | |
| "learning_rate": 1e-05, | |
| "loss": 0.04, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.2075, | |
| "grad_norm": 0.1403733317703667, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0472, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.208, | |
| "grad_norm": 0.173982074128614, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0395, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.208, | |
| "eval_dev_acc": 0.49609375, | |
| "eval_dev_token": 5482.95703125, | |
| "eval_runtime": 374.469, | |
| "eval_samples_per_second": 0.171, | |
| "eval_steps_per_second": 0.003, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.2085, | |
| "grad_norm": 0.16717306009352031, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0369, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.209, | |
| "grad_norm": 0.17317803700896214, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0581, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.2095, | |
| "grad_norm": 0.14729050118039705, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0469, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.14599122830811173, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0571, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2105, | |
| "grad_norm": 0.16285142688584706, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0291, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.211, | |
| "grad_norm": 0.19044973230329837, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0599, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.2115, | |
| "grad_norm": 0.188861960333507, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0471, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.212, | |
| "grad_norm": 0.19188548951756218, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0529, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.2125, | |
| "grad_norm": 0.16267402517673002, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0305, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.213, | |
| "grad_norm": 0.1447850696130614, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0324, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.2135, | |
| "grad_norm": 0.15248164794588065, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0388, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.214, | |
| "grad_norm": 0.1661241871100943, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0328, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.2145, | |
| "grad_norm": 0.16566625624023265, | |
| "learning_rate": 1e-05, | |
| "loss": 0.031, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.215, | |
| "grad_norm": 0.15249287061514458, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0319, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2155, | |
| "grad_norm": 0.12995530917181783, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0233, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.216, | |
| "grad_norm": 0.1704276552962093, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0405, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.2165, | |
| "grad_norm": 0.17386329346754434, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0336, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.217, | |
| "grad_norm": 0.15704760833763615, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0325, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.2175, | |
| "grad_norm": 0.1495524799308763, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0341, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.218, | |
| "grad_norm": 0.1686891909288217, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0343, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.2185, | |
| "grad_norm": 0.13995459985426573, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0398, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.219, | |
| "grad_norm": 0.15473569116081692, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0412, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.2195, | |
| "grad_norm": 0.1801609077983992, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0534, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.17809364795872226, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0548, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.2205, | |
| "grad_norm": 0.1535032114151188, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0593, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.221, | |
| "grad_norm": 0.20882248884544774, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0402, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.2215, | |
| "grad_norm": 0.14517381058327564, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0436, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.222, | |
| "grad_norm": 0.17014179155102424, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0333, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.2225, | |
| "grad_norm": 0.1729306341614305, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0301, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.223, | |
| "grad_norm": 0.1686712423851483, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0406, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.2235, | |
| "grad_norm": 0.1535287640721648, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0282, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "grad_norm": 0.1406783148617548, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0264, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.224, | |
| "eval_dev_acc": 0.576171875, | |
| "eval_dev_token": 5738.91015625, | |
| "eval_runtime": 360.4892, | |
| "eval_samples_per_second": 0.178, | |
| "eval_steps_per_second": 0.003, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.2245, | |
| "grad_norm": 0.15218760519443988, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0227, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "grad_norm": 0.13165646820927943, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0244, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.2255, | |
| "grad_norm": 0.13658776890249372, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0235, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.226, | |
| "grad_norm": 0.1595102880182028, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0361, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.2265, | |
| "grad_norm": 0.18272272767076744, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0341, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.227, | |
| "grad_norm": 0.15970250529787045, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0277, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.2275, | |
| "grad_norm": 0.1641447638351716, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0284, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.228, | |
| "grad_norm": 0.1294308434226962, | |
| "learning_rate": 1e-05, | |
| "loss": 0.022, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.2285, | |
| "grad_norm": 0.11954195360401737, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0202, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.229, | |
| "grad_norm": 0.16068276912989043, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0251, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.2295, | |
| "grad_norm": 0.15307414897001792, | |
| "learning_rate": 1e-05, | |
| "loss": 0.027, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.15979117725555442, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0375, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.2305, | |
| "grad_norm": 0.2020059964338148, | |
| "learning_rate": 1e-05, | |
| "loss": 0.043, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.231, | |
| "grad_norm": 0.15074817260440432, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0241, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.2315, | |
| "grad_norm": 0.16521962645814686, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0447, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.232, | |
| "grad_norm": 0.14710027259702427, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0322, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.2325, | |
| "grad_norm": 0.1879875645942318, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0324, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.233, | |
| "grad_norm": 0.16624112738581265, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0308, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.2335, | |
| "grad_norm": 0.19050875236463863, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0342, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.234, | |
| "grad_norm": 0.11998525830480301, | |
| "learning_rate": 1e-05, | |
| "loss": 0.021, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.2345, | |
| "grad_norm": 0.1656701890014892, | |
| "learning_rate": 1e-05, | |
| "loss": 0.027, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.235, | |
| "grad_norm": 0.15307753326429366, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0452, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.2355, | |
| "grad_norm": 0.1897260442883158, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0245, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.236, | |
| "grad_norm": 0.11931107646228578, | |
| "learning_rate": 1e-05, | |
| "loss": 0.017, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.2365, | |
| "grad_norm": 0.18498950301005707, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0342, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.237, | |
| "grad_norm": 0.14355239628818517, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0221, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.2375, | |
| "grad_norm": 0.16525653932908532, | |
| "learning_rate": 1e-05, | |
| "loss": 0.029, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.238, | |
| "grad_norm": 0.1518153688638394, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0267, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.2385, | |
| "grad_norm": 0.15987321641272437, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0279, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.239, | |
| "grad_norm": 0.1442274823944727, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0349, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.2395, | |
| "grad_norm": 0.11710766704672448, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0179, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.15497604683020938, | |
| "learning_rate": 1e-05, | |
| "loss": 0.023, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_dev_acc": 0.544921875, | |
| "eval_dev_token": 5840.61328125, | |
| "eval_runtime": 373.4708, | |
| "eval_samples_per_second": 0.171, | |
| "eval_steps_per_second": 0.003, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.2405, | |
| "grad_norm": 0.13492229768745556, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0205, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.241, | |
| "grad_norm": 0.1704648731314998, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0353, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.2415, | |
| "grad_norm": 0.1491861836462168, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0365, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.242, | |
| "grad_norm": 0.17050828891525746, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0277, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.2425, | |
| "grad_norm": 0.17980691606220936, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0335, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.243, | |
| "grad_norm": 0.16998825524724584, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0362, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.2435, | |
| "grad_norm": 0.11641133365996917, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.244, | |
| "grad_norm": 0.14362674831456992, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0365, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.2445, | |
| "grad_norm": 0.14488123923452778, | |
| "learning_rate": 1e-05, | |
| "loss": 0.024, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.245, | |
| "grad_norm": 0.1517003378019991, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0271, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.2455, | |
| "grad_norm": 0.14967074987714707, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0294, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.246, | |
| "grad_norm": 0.15791993394836015, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0283, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.2465, | |
| "grad_norm": 0.13495006239387555, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0251, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.247, | |
| "grad_norm": 0.18930054102351096, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0373, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.2475, | |
| "grad_norm": 0.13152234060084034, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0233, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.248, | |
| "grad_norm": 0.1341531691510106, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0269, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.2485, | |
| "grad_norm": 0.13741586371551992, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0277, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.249, | |
| "grad_norm": 0.1554051684617337, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0276, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.2495, | |
| "grad_norm": 0.14814577647609775, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0235, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.14930720560226657, | |
| "learning_rate": 1e-05, | |
| "loss": 0.039, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.2505, | |
| "grad_norm": 0.1244942117603243, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0202, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 0.251, | |
| "grad_norm": 0.14244145658079232, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0209, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 0.2515, | |
| "grad_norm": 0.146145096145696, | |
| "learning_rate": 1e-05, | |
| "loss": 0.024, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 0.252, | |
| "grad_norm": 0.13594585715406687, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0269, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 0.2525, | |
| "grad_norm": 0.1490412459954878, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0345, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.253, | |
| "grad_norm": 0.11950170266380834, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 0.2535, | |
| "grad_norm": 0.18548215823845707, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0275, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 0.254, | |
| "grad_norm": 0.15108980653404058, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0197, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 0.2545, | |
| "grad_norm": 0.16504836098536718, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0289, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 0.255, | |
| "grad_norm": 0.15746261920489785, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0253, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.2555, | |
| "grad_norm": 0.14071771991438595, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0219, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 0.16079872072377113, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0204, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "eval_dev_acc": 0.56640625, | |
| "eval_dev_token": 5634.1015625, | |
| "eval_runtime": 361.9891, | |
| "eval_samples_per_second": 0.177, | |
| "eval_steps_per_second": 0.003, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.2565, | |
| "grad_norm": 0.13549471484008802, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 0.257, | |
| "grad_norm": 0.12072963489745359, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0302, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 0.2575, | |
| "grad_norm": 0.14026647684897994, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0194, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.258, | |
| "grad_norm": 0.1634484411344168, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0277, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 0.2585, | |
| "grad_norm": 0.15844211231505426, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0214, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 0.259, | |
| "grad_norm": 0.1567910605652928, | |
| "learning_rate": 1e-05, | |
| "loss": 0.024, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.2595, | |
| "grad_norm": 0.17902606156745304, | |
| "learning_rate": 1e-05, | |
| "loss": 0.033, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.12339744538286439, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0172, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.2605, | |
| "grad_norm": 0.13532209502494125, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0206, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 0.261, | |
| "grad_norm": 0.15623082886780087, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0213, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 0.2615, | |
| "grad_norm": 0.14428427308597647, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0201, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 0.262, | |
| "grad_norm": 0.14835567545470982, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0244, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 0.2625, | |
| "grad_norm": 0.14068070672711747, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0239, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.263, | |
| "grad_norm": 0.1460843289248216, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0223, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 0.2635, | |
| "grad_norm": 0.13777430449621855, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0229, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 0.264, | |
| "grad_norm": 0.15161607294549337, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0272, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 0.2645, | |
| "grad_norm": 0.13410519048089503, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 0.265, | |
| "grad_norm": 0.15931617673254456, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0244, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.2655, | |
| "grad_norm": 0.1410700523457689, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0251, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 0.266, | |
| "grad_norm": 0.11388951846034073, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0144, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 0.2665, | |
| "grad_norm": 0.12253780956369799, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0177, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 0.267, | |
| "grad_norm": 0.15575473599510573, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0192, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 0.2675, | |
| "grad_norm": 0.14690747155640696, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0222, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.268, | |
| "grad_norm": 0.13584546405544728, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0237, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 0.2685, | |
| "grad_norm": 0.13430763220790742, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0291, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 0.269, | |
| "grad_norm": 0.14208572873353734, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0187, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 0.2695, | |
| "grad_norm": 0.14058928149963162, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0199, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.15100703501541832, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0348, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.2705, | |
| "grad_norm": 0.12269452397268416, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0247, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 0.271, | |
| "grad_norm": 0.1364796501674048, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0227, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 0.2715, | |
| "grad_norm": 0.13163932605554884, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0262, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 0.272, | |
| "grad_norm": 0.13497428740182482, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0206, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.272, | |
| "eval_dev_acc": 0.59765625, | |
| "eval_dev_token": 5639.98828125, | |
| "eval_runtime": 359.2369, | |
| "eval_samples_per_second": 0.178, | |
| "eval_steps_per_second": 0.003, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 0.2725, | |
| "grad_norm": 0.13930735859181714, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0234, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.273, | |
| "grad_norm": 0.11985280096835198, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0269, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 0.2735, | |
| "grad_norm": 0.17031723198491708, | |
| "learning_rate": 1e-05, | |
| "loss": 0.028, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 0.274, | |
| "grad_norm": 0.17166197772315975, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0281, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 0.2745, | |
| "grad_norm": 0.1167335581681914, | |
| "learning_rate": 1e-05, | |
| "loss": 0.022, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 0.275, | |
| "grad_norm": 0.1443441971157384, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0198, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.2755, | |
| "grad_norm": 0.1268787923602722, | |
| "learning_rate": 1e-05, | |
| "loss": 0.017, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 0.276, | |
| "grad_norm": 0.11065296478824395, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0157, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 0.2765, | |
| "grad_norm": 0.12047907824944362, | |
| "learning_rate": 1e-05, | |
| "loss": 0.016, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 0.277, | |
| "grad_norm": 0.13956303855472266, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0216, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 0.2775, | |
| "grad_norm": 0.10533407777378404, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0137, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.278, | |
| "grad_norm": 0.11532013491755984, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0282, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 0.2785, | |
| "grad_norm": 0.11921463919727264, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0163, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 0.279, | |
| "grad_norm": 0.15645731769207732, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0241, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 0.2795, | |
| "grad_norm": 0.12096274696840706, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0167, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.149157783124579, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0193, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.2805, | |
| "grad_norm": 0.16982490839988412, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0283, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 0.281, | |
| "grad_norm": 0.12038107977310454, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0154, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 0.2815, | |
| "grad_norm": 0.16469919524412158, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0214, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 0.282, | |
| "grad_norm": 0.15827423056846177, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0216, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 0.2825, | |
| "grad_norm": 0.12058245559465251, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0141, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.283, | |
| "grad_norm": 0.163789727088167, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0241, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 0.2835, | |
| "grad_norm": 0.1390884369932456, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0221, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 0.284, | |
| "grad_norm": 0.14472941005878595, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 0.2845, | |
| "grad_norm": 0.15437454042645973, | |
| "learning_rate": 1e-05, | |
| "loss": 0.024, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 0.285, | |
| "grad_norm": 0.1207487307624573, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0172, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.2855, | |
| "grad_norm": 0.1502409849611173, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0298, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 0.286, | |
| "grad_norm": 0.16401355690597133, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0225, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 0.2865, | |
| "grad_norm": 0.15181464752177645, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0189, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 0.287, | |
| "grad_norm": 0.14560432645081878, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0186, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 0.2875, | |
| "grad_norm": 0.12603042660981642, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0155, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 0.11638577111126014, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "eval_dev_acc": 0.54296875, | |
| "eval_dev_token": 5801.126953125, | |
| "eval_runtime": 373.1703, | |
| "eval_samples_per_second": 0.172, | |
| "eval_steps_per_second": 0.003, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 0.2885, | |
| "grad_norm": 0.13420942083968396, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 0.289, | |
| "grad_norm": 0.12122809121871923, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0134, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 0.2895, | |
| "grad_norm": 0.13114866603642533, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0174, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.1498609312158644, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0214, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.2905, | |
| "grad_norm": 0.1527812218308566, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0194, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 0.291, | |
| "grad_norm": 0.14711876695343454, | |
| "learning_rate": 1e-05, | |
| "loss": 0.018, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 0.2915, | |
| "grad_norm": 0.11529220604038168, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0144, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 0.292, | |
| "grad_norm": 0.16180671831014115, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0165, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 0.2925, | |
| "grad_norm": 0.13621545825638848, | |
| "learning_rate": 1e-05, | |
| "loss": 0.015, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.293, | |
| "grad_norm": 0.15473239935591382, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 0.2935, | |
| "grad_norm": 0.15716799171541335, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0194, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 0.294, | |
| "grad_norm": 0.1684941322847538, | |
| "learning_rate": 1e-05, | |
| "loss": 0.022, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 0.2945, | |
| "grad_norm": 0.15453918821249785, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0188, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 0.295, | |
| "grad_norm": 0.140163345657633, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0208, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.2955, | |
| "grad_norm": 0.15010258665645038, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0215, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 0.296, | |
| "grad_norm": 0.14661643221841641, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0191, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.2965, | |
| "grad_norm": 0.15435066476462508, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0231, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 0.297, | |
| "grad_norm": 0.17094702806791945, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0251, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 0.2975, | |
| "grad_norm": 0.1371139566901347, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0186, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.298, | |
| "grad_norm": 0.11779673830033237, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0269, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 0.2985, | |
| "grad_norm": 0.11853976704548681, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0154, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 0.299, | |
| "grad_norm": 0.14881574569113099, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0246, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 0.2995, | |
| "grad_norm": 0.11792409287393274, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.13831559531762572, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0257, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.3005, | |
| "grad_norm": 0.13756632720301187, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0214, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 0.301, | |
| "grad_norm": 0.10998907458045305, | |
| "learning_rate": 1e-05, | |
| "loss": 0.015, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 0.3015, | |
| "grad_norm": 0.135955562101373, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0211, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 0.302, | |
| "grad_norm": 0.1214956422000124, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0219, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 0.3025, | |
| "grad_norm": 0.15757702522309963, | |
| "learning_rate": 1e-05, | |
| "loss": 0.018, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.303, | |
| "grad_norm": 0.1350858708023801, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0211, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.3035, | |
| "grad_norm": 0.10610433140412452, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0147, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 0.304, | |
| "grad_norm": 0.11514647079357257, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0192, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.304, | |
| "eval_dev_acc": 0.62109375, | |
| "eval_dev_token": 5367.916015625, | |
| "eval_runtime": 354.8095, | |
| "eval_samples_per_second": 0.18, | |
| "eval_steps_per_second": 0.003, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 0.3045, | |
| "grad_norm": 0.12603567923188372, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0188, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 0.305, | |
| "grad_norm": 0.14277125754270012, | |
| "learning_rate": 1e-05, | |
| "loss": 0.022, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.3055, | |
| "grad_norm": 0.12862855181841676, | |
| "learning_rate": 1e-05, | |
| "loss": 0.021, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 0.306, | |
| "grad_norm": 0.1227953424403543, | |
| "learning_rate": 1e-05, | |
| "loss": 0.018, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 0.3065, | |
| "grad_norm": 0.11646820367498804, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0131, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 0.307, | |
| "grad_norm": 0.14701145754992329, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0186, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 0.3075, | |
| "grad_norm": 0.1493073818813876, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0254, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.308, | |
| "grad_norm": 0.1352952895732537, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 0.3085, | |
| "grad_norm": 0.13007743097982305, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0171, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 0.309, | |
| "grad_norm": 0.1665432351262121, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0228, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 0.3095, | |
| "grad_norm": 0.16442931730443322, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0238, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.16320986192220768, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0238, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.3105, | |
| "grad_norm": 0.13880254871235365, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0179, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 0.311, | |
| "grad_norm": 0.13609379700738453, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0195, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 0.3115, | |
| "grad_norm": 0.1368415516519621, | |
| "learning_rate": 1e-05, | |
| "loss": 0.024, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 0.312, | |
| "grad_norm": 0.12821586481120512, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0191, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 0.3125, | |
| "grad_norm": 0.13644927854222083, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0154, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.313, | |
| "grad_norm": 0.15158164143556496, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0318, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 0.3135, | |
| "grad_norm": 0.12404550422721679, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0202, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 0.314, | |
| "grad_norm": 0.1235074023832298, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0167, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 0.3145, | |
| "grad_norm": 0.16094487436899907, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0227, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 0.315, | |
| "grad_norm": 0.11086598912590964, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0158, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.3155, | |
| "grad_norm": 0.1147741974179167, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0136, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 0.316, | |
| "grad_norm": 0.12346095617438974, | |
| "learning_rate": 1e-05, | |
| "loss": 0.017, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 0.3165, | |
| "grad_norm": 0.1235267138232638, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0131, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 0.317, | |
| "grad_norm": 0.11979162262432065, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0194, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 0.3175, | |
| "grad_norm": 0.12253729986288973, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0205, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.318, | |
| "grad_norm": 0.1374736081434109, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0207, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 0.3185, | |
| "grad_norm": 0.11667911740285354, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 0.319, | |
| "grad_norm": 0.13725799823509804, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0177, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 0.3195, | |
| "grad_norm": 0.1461325036101512, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0233, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.1486586288765987, | |
| "learning_rate": 1e-05, | |
| "loss": 0.022, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_dev_acc": 0.52734375, | |
| "eval_dev_token": 5585.80859375, | |
| "eval_runtime": 360.5581, | |
| "eval_samples_per_second": 0.178, | |
| "eval_steps_per_second": 0.003, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.3205, | |
| "grad_norm": 0.13037027619165104, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0181, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 0.321, | |
| "grad_norm": 0.1384777662387777, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0284, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 0.3215, | |
| "grad_norm": 0.11731142200376247, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0136, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 0.322, | |
| "grad_norm": 0.13199000719975476, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0147, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 0.3225, | |
| "grad_norm": 0.145679314202878, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0227, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.323, | |
| "grad_norm": 0.13813521110883425, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0173, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 0.3235, | |
| "grad_norm": 0.11216370610734963, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0125, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 0.324, | |
| "grad_norm": 0.09898218700430327, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0165, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 0.3245, | |
| "grad_norm": 0.13555813085878698, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0163, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 0.325, | |
| "grad_norm": 0.11552480540546263, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0161, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.3255, | |
| "grad_norm": 0.12028253181729011, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 0.326, | |
| "grad_norm": 0.11773491790468957, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0143, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 0.3265, | |
| "grad_norm": 0.11052002031571376, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0177, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 0.327, | |
| "grad_norm": 0.1251016102612686, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0134, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 0.3275, | |
| "grad_norm": 0.13329112874236815, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0148, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.328, | |
| "grad_norm": 0.11382501473889628, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0145, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 0.3285, | |
| "grad_norm": 0.1319495587226548, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0139, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 0.329, | |
| "grad_norm": 0.12070785233460224, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 0.3295, | |
| "grad_norm": 0.12142680030465443, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0159, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.12182666413282645, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0114, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.3305, | |
| "grad_norm": 0.13190056217824564, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0163, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 0.331, | |
| "grad_norm": 0.12804397999806036, | |
| "learning_rate": 1e-05, | |
| "loss": 0.016, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 0.3315, | |
| "grad_norm": 0.1461414693983946, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0179, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 0.332, | |
| "grad_norm": 0.1467248720157808, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0171, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 0.3325, | |
| "grad_norm": 0.1393703645756879, | |
| "learning_rate": 1e-05, | |
| "loss": 0.017, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.333, | |
| "grad_norm": 0.1677851073130961, | |
| "learning_rate": 1e-05, | |
| "loss": 0.033, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.3335, | |
| "grad_norm": 0.13245384270982133, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0163, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 0.334, | |
| "grad_norm": 0.12289205645457782, | |
| "learning_rate": 1e-05, | |
| "loss": 0.015, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 0.3345, | |
| "grad_norm": 0.13290806824539741, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0131, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 0.335, | |
| "grad_norm": 0.12256221521384754, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0138, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.3355, | |
| "grad_norm": 0.11871572464334247, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0156, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 0.336, | |
| "grad_norm": 0.1425273432040885, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0132, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.336, | |
| "eval_dev_acc": 0.5625, | |
| "eval_dev_token": 5492.04296875, | |
| "eval_runtime": 348.043, | |
| "eval_samples_per_second": 0.184, | |
| "eval_steps_per_second": 0.003, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 0.3365, | |
| "grad_norm": 0.12612838576017849, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0127, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 0.337, | |
| "grad_norm": 0.1294118778329942, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0161, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 0.3375, | |
| "grad_norm": 0.11771619437889824, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0142, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.338, | |
| "grad_norm": 0.13434389137713848, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0157, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 0.3385, | |
| "grad_norm": 0.2579148520419769, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0322, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 0.339, | |
| "grad_norm": 0.12876645046050272, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0127, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 0.3395, | |
| "grad_norm": 0.12336988858308351, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0114, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.11038801232074134, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0112, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.3405, | |
| "grad_norm": 0.13782079916676085, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0156, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 0.341, | |
| "grad_norm": 0.12593807949317973, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0179, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 0.3415, | |
| "grad_norm": 0.13416291611922937, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0167, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 0.342, | |
| "grad_norm": 0.14107509427243767, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0182, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 0.3425, | |
| "grad_norm": 0.13829290958101634, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0133, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.343, | |
| "grad_norm": 0.12072602797225344, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0135, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 0.3435, | |
| "grad_norm": 0.09808985286323638, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0109, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 0.344, | |
| "grad_norm": 0.1237155662701831, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0137, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 0.3445, | |
| "grad_norm": 0.11512509686864711, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0131, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 0.345, | |
| "grad_norm": 0.10310189822258317, | |
| "learning_rate": 1e-05, | |
| "loss": 0.011, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.3455, | |
| "grad_norm": 0.17008589258309467, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0219, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 0.346, | |
| "grad_norm": 0.12175425158539896, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 0.3465, | |
| "grad_norm": 0.11242731302801981, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0142, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 0.347, | |
| "grad_norm": 0.13975508336225442, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0149, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 0.3475, | |
| "grad_norm": 0.08820258618918772, | |
| "learning_rate": 1e-05, | |
| "loss": 0.008, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.348, | |
| "grad_norm": 0.1343553646574964, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 0.3485, | |
| "grad_norm": 0.13274844121268298, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0143, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 0.349, | |
| "grad_norm": 0.1412865550899799, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0164, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 0.3495, | |
| "grad_norm": 0.1471608978417448, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0144, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.13861936123681107, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0151, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.3505, | |
| "grad_norm": 0.11790173787959472, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0252, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 0.351, | |
| "grad_norm": 0.12166501219045824, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0145, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 0.3515, | |
| "grad_norm": 0.1082871078184047, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0095, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "grad_norm": 0.12226846848831563, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0141, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.352, | |
| "eval_dev_acc": 0.52734375, | |
| "eval_dev_token": 5760.470703125, | |
| "eval_runtime": 408.3746, | |
| "eval_samples_per_second": 0.157, | |
| "eval_steps_per_second": 0.002, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 0.3525, | |
| "grad_norm": 0.09666366634628189, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.353, | |
| "grad_norm": 0.11283217042776503, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0143, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 0.3535, | |
| "grad_norm": 0.12289011751838193, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0173, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 0.354, | |
| "grad_norm": 0.11335657340497375, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0193, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 0.3545, | |
| "grad_norm": 0.12878402333619293, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0198, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 0.355, | |
| "grad_norm": 0.09768170830344952, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0123, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.3555, | |
| "grad_norm": 0.11142711749521704, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0142, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 0.356, | |
| "grad_norm": 0.12263582497303413, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0125, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 0.3565, | |
| "grad_norm": 0.10602173709981424, | |
| "learning_rate": 1e-05, | |
| "loss": 0.013, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 0.357, | |
| "grad_norm": 0.09797092044857854, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 0.3575, | |
| "grad_norm": 0.10267028455310084, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0153, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.358, | |
| "grad_norm": 0.11247269902696218, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0124, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 0.3585, | |
| "grad_norm": 0.10192090643527008, | |
| "learning_rate": 1e-05, | |
| "loss": 0.01, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 0.359, | |
| "grad_norm": 0.15447030723754146, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0191, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 0.3595, | |
| "grad_norm": 0.12653173296866044, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0144, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.09798115770372441, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.3605, | |
| "grad_norm": 0.13673705245890774, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0176, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 0.361, | |
| "grad_norm": 0.10484924187213118, | |
| "learning_rate": 1e-05, | |
| "loss": 0.011, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 0.3615, | |
| "grad_norm": 0.12026390551992476, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0161, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 0.362, | |
| "grad_norm": 0.12248845158519388, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0135, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 0.3625, | |
| "grad_norm": 0.08242254382606763, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0103, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.363, | |
| "grad_norm": 0.1310711546429888, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0213, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 0.3635, | |
| "grad_norm": 0.11504216894290854, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0122, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 0.364, | |
| "grad_norm": 0.12368075631500317, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0184, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 0.3645, | |
| "grad_norm": 0.12437061544598803, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0152, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 0.365, | |
| "grad_norm": 0.13397286445240938, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0158, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.3655, | |
| "grad_norm": 0.0982171426550068, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0096, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 0.366, | |
| "grad_norm": 0.10415731829601732, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0105, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 0.3665, | |
| "grad_norm": 0.13524982542339864, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0183, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 0.367, | |
| "grad_norm": 0.09664339538491498, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0115, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 0.3675, | |
| "grad_norm": 0.0851015567710096, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0112, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.368, | |
| "grad_norm": 0.10062979284816816, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.368, | |
| "eval_dev_acc": 0.53515625, | |
| "eval_dev_token": 5898.23046875, | |
| "eval_runtime": 365.57, | |
| "eval_samples_per_second": 0.175, | |
| "eval_steps_per_second": 0.003, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 0.3685, | |
| "grad_norm": 0.11264161967062039, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0108, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 0.369, | |
| "grad_norm": 0.11222433927286389, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0104, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 0.3695, | |
| "grad_norm": 0.20055167671089585, | |
| "learning_rate": 1e-05, | |
| "loss": 0.013, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.10491538007846005, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0131, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.3705, | |
| "grad_norm": 0.09174967069041229, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0095, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 0.371, | |
| "grad_norm": 0.10582756451393364, | |
| "learning_rate": 1e-05, | |
| "loss": 0.012, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 0.3715, | |
| "grad_norm": 0.09194993895839906, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 0.372, | |
| "grad_norm": 0.08858743590625234, | |
| "learning_rate": 1e-05, | |
| "loss": 0.01, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 0.3725, | |
| "grad_norm": 0.10826764845042156, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0145, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.373, | |
| "grad_norm": 0.11033032167695773, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 0.3735, | |
| "grad_norm": 0.15107498232603372, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0157, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 0.374, | |
| "grad_norm": 0.11411131491498813, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0114, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 0.3745, | |
| "grad_norm": 0.12425138196940645, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0138, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 0.375, | |
| "grad_norm": 0.10647093971928946, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0107, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.3755, | |
| "grad_norm": 0.12413448325714936, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 0.376, | |
| "grad_norm": 0.105340917878943, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0103, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 0.3765, | |
| "grad_norm": 0.11802541191692037, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0127, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 0.377, | |
| "grad_norm": 0.09864810460683521, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0128, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 0.3775, | |
| "grad_norm": 0.12491513055109611, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0102, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.378, | |
| "grad_norm": 0.11778790720208179, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0093, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 0.3785, | |
| "grad_norm": 0.11902239371415295, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0115, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 0.379, | |
| "grad_norm": 0.1025498591924567, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0099, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 0.3795, | |
| "grad_norm": 0.14120840112868438, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0123, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.08964665828244849, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0116, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.3805, | |
| "grad_norm": 0.13533754381134197, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0142, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 0.381, | |
| "grad_norm": 0.11151852649444051, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0122, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 0.3815, | |
| "grad_norm": 0.12448616858204287, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0148, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 0.382, | |
| "grad_norm": 0.10160526390201502, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0102, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 0.3825, | |
| "grad_norm": 0.12329039775788013, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0226, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.383, | |
| "grad_norm": 0.1706851101549876, | |
| "learning_rate": 1e-05, | |
| "loss": 0.013, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 0.3835, | |
| "grad_norm": 0.11518698920716465, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0136, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 0.1130084278865893, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0151, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "eval_dev_acc": 0.505859375, | |
| "eval_dev_token": 5660.353515625, | |
| "eval_runtime": 362.2613, | |
| "eval_samples_per_second": 0.177, | |
| "eval_steps_per_second": 0.003, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.3845, | |
| "grad_norm": 0.12092852490034717, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0111, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 0.385, | |
| "grad_norm": 0.12041159291779986, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0157, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.3855, | |
| "grad_norm": 0.13135896730332378, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0149, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 0.386, | |
| "grad_norm": 0.09838622926936438, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 0.3865, | |
| "grad_norm": 0.1118823935585986, | |
| "learning_rate": 1e-05, | |
| "loss": 0.016, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 0.387, | |
| "grad_norm": 0.10549287704509497, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 0.3875, | |
| "grad_norm": 0.1232106501038667, | |
| "learning_rate": 1e-05, | |
| "loss": 0.013, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.388, | |
| "grad_norm": 0.12975165037101072, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0121, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 0.3885, | |
| "grad_norm": 0.13595262265606586, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0126, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 0.389, | |
| "grad_norm": 0.1371181387315353, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0123, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 0.3895, | |
| "grad_norm": 0.13359565264106144, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0153, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.11480062436980976, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0107, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.3905, | |
| "grad_norm": 0.09947922362782227, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0086, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 0.391, | |
| "grad_norm": 0.08734624126570777, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0076, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 0.3915, | |
| "grad_norm": 0.10267346744796824, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 0.392, | |
| "grad_norm": 0.09576192367034056, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0083, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 0.3925, | |
| "grad_norm": 0.14141610877259214, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0139, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.393, | |
| "grad_norm": 0.12071117391984963, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0115, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 0.3935, | |
| "grad_norm": 0.13172358788988653, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0129, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 0.394, | |
| "grad_norm": 0.1345424657278948, | |
| "learning_rate": 1e-05, | |
| "loss": 0.014, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 0.3945, | |
| "grad_norm": 0.1166885723717584, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0125, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 0.395, | |
| "grad_norm": 0.1513194581227132, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0145, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.3955, | |
| "grad_norm": 0.11105132556458801, | |
| "learning_rate": 1e-05, | |
| "loss": 0.011, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 0.396, | |
| "grad_norm": 0.0989302250949553, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0086, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 0.3965, | |
| "grad_norm": 0.1384321782028591, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0122, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 0.397, | |
| "grad_norm": 0.1439582094147518, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0142, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 0.3975, | |
| "grad_norm": 0.11905534302900607, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0102, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.398, | |
| "grad_norm": 0.1353943654211476, | |
| "learning_rate": 1e-05, | |
| "loss": 0.015, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 0.3985, | |
| "grad_norm": 0.1227008050574365, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0159, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 0.399, | |
| "grad_norm": 0.10084479670557596, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0103, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 0.3995, | |
| "grad_norm": 0.11430101131806415, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0128, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.12720193634725216, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0166, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_dev_acc": 0.49609375, | |
| "eval_dev_token": 5933.87109375, | |
| "eval_runtime": 371.4205, | |
| "eval_samples_per_second": 0.172, | |
| "eval_steps_per_second": 0.003, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.4005, | |
| "grad_norm": 0.10747534966369286, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 0.401, | |
| "grad_norm": 0.09755157150747987, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0094, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 0.4015, | |
| "grad_norm": 0.1093754763997552, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0136, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 0.402, | |
| "grad_norm": 0.12186788180512477, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0112, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 0.4025, | |
| "grad_norm": 0.12341274893925132, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0126, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.403, | |
| "grad_norm": 0.1000051489528446, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0171, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 0.4035, | |
| "grad_norm": 0.09210699513663904, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0097, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 0.404, | |
| "grad_norm": 0.12629053026155362, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0126, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 0.4045, | |
| "grad_norm": 0.11979707428750866, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0129, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 0.405, | |
| "grad_norm": 0.13240620090939892, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0173, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.4055, | |
| "grad_norm": 0.11509826968311627, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0133, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 0.406, | |
| "grad_norm": 0.09930674216692448, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0103, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 0.4065, | |
| "grad_norm": 0.12486374889735856, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0152, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 0.407, | |
| "grad_norm": 0.09439307719243419, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 0.4075, | |
| "grad_norm": 0.09010069214916468, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0101, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 0.408, | |
| "grad_norm": 0.10807091465236611, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0136, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 0.4085, | |
| "grad_norm": 0.09320554728801374, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0102, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 0.409, | |
| "grad_norm": 0.09578310039513883, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0105, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 0.4095, | |
| "grad_norm": 0.10795646602867415, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0141, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.11260765818079684, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0123, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.4105, | |
| "grad_norm": 0.13162780397133028, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0141, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 0.411, | |
| "grad_norm": 0.10884500486087925, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0122, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 0.4115, | |
| "grad_norm": 0.12472839162847292, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0135, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 0.412, | |
| "grad_norm": 0.13289735991638021, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0112, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 0.4125, | |
| "grad_norm": 0.12509974441211302, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0256, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.413, | |
| "grad_norm": 0.12014632147622897, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0113, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 0.4135, | |
| "grad_norm": 0.09172916013688245, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0082, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 0.414, | |
| "grad_norm": 0.09305774811224422, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0074, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 0.4145, | |
| "grad_norm": 0.12720506111352092, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0091, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 0.415, | |
| "grad_norm": 0.0815065287803298, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0061, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.4155, | |
| "grad_norm": 0.09730425117259746, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0079, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "grad_norm": 0.09579694307116488, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0098, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 0.416, | |
| "eval_dev_acc": 0.513671875, | |
| "eval_dev_token": 4446.53125, | |
| "eval_runtime": 329.208, | |
| "eval_samples_per_second": 0.194, | |
| "eval_steps_per_second": 0.003, | |
| "step": 832 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 2000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9223372036854775807, | |
| "save_steps": 32, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 224786117410816.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |