| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.18467220683287167, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0030778701138811943, | |
| "grad_norm": 0.47482776641845703, | |
| "learning_rate": 4.9999916942231176e-05, | |
| "loss": 1.1992, | |
| "num_input_tokens_seen": 42384, | |
| "step": 5, | |
| "train_runtime": 19.849, | |
| "train_tokens_per_second": 2135.325 | |
| }, | |
| { | |
| "epoch": 0.006155740227762389, | |
| "grad_norm": 0.24747319519519806, | |
| "learning_rate": 4.999957952099119e-05, | |
| "loss": 1.0247, | |
| "num_input_tokens_seen": 86000, | |
| "step": 10, | |
| "train_runtime": 39.0811, | |
| "train_tokens_per_second": 2200.554 | |
| }, | |
| { | |
| "epoch": 0.009233610341643583, | |
| "grad_norm": 0.10441822558641434, | |
| "learning_rate": 4.9998982548669973e-05, | |
| "loss": 0.8631, | |
| "num_input_tokens_seen": 124784, | |
| "step": 15, | |
| "train_runtime": 57.1682, | |
| "train_tokens_per_second": 2182.751 | |
| }, | |
| { | |
| "epoch": 0.012311480455524777, | |
| "grad_norm": 0.11058320850133896, | |
| "learning_rate": 4.999812603146542e-05, | |
| "loss": 0.9173, | |
| "num_input_tokens_seen": 165952, | |
| "step": 20, | |
| "train_runtime": 75.5733, | |
| "train_tokens_per_second": 2195.909 | |
| }, | |
| { | |
| "epoch": 0.015389350569405972, | |
| "grad_norm": 0.09879708290100098, | |
| "learning_rate": 4.9997009978270085e-05, | |
| "loss": 0.8722, | |
| "num_input_tokens_seen": 201584, | |
| "step": 25, | |
| "train_runtime": 92.5588, | |
| "train_tokens_per_second": 2177.903 | |
| }, | |
| { | |
| "epoch": 0.018467220683287166, | |
| "grad_norm": 0.11391397565603256, | |
| "learning_rate": 4.999563440067108e-05, | |
| "loss": 0.866, | |
| "num_input_tokens_seen": 240656, | |
| "step": 30, | |
| "train_runtime": 111.1373, | |
| "train_tokens_per_second": 2165.393 | |
| }, | |
| { | |
| "epoch": 0.02154509079716836, | |
| "grad_norm": 0.09722574800252914, | |
| "learning_rate": 4.9993999312949936e-05, | |
| "loss": 0.853, | |
| "num_input_tokens_seen": 287248, | |
| "step": 35, | |
| "train_runtime": 130.6684, | |
| "train_tokens_per_second": 2198.297 | |
| }, | |
| { | |
| "epoch": 0.024622960911049555, | |
| "grad_norm": 0.12133017182350159, | |
| "learning_rate": 4.99921047320825e-05, | |
| "loss": 0.88, | |
| "num_input_tokens_seen": 328448, | |
| "step": 40, | |
| "train_runtime": 149.193, | |
| "train_tokens_per_second": 2201.498 | |
| }, | |
| { | |
| "epoch": 0.027700831024930747, | |
| "grad_norm": 0.12174953520298004, | |
| "learning_rate": 4.9989950677738726e-05, | |
| "loss": 0.8503, | |
| "num_input_tokens_seen": 370512, | |
| "step": 45, | |
| "train_runtime": 168.2236, | |
| "train_tokens_per_second": 2202.497 | |
| }, | |
| { | |
| "epoch": 0.030778701138811943, | |
| "grad_norm": 0.12440948933362961, | |
| "learning_rate": 4.9987537172282466e-05, | |
| "loss": 0.8707, | |
| "num_input_tokens_seen": 409936, | |
| "step": 50, | |
| "train_runtime": 186.7223, | |
| "train_tokens_per_second": 2195.432 | |
| }, | |
| { | |
| "epoch": 0.03385657125269314, | |
| "grad_norm": 0.12797962129116058, | |
| "learning_rate": 4.998486424077127e-05, | |
| "loss": 0.8475, | |
| "num_input_tokens_seen": 450928, | |
| "step": 55, | |
| "train_runtime": 205.1107, | |
| "train_tokens_per_second": 2198.462 | |
| }, | |
| { | |
| "epoch": 0.03693444136657433, | |
| "grad_norm": 0.09793120622634888, | |
| "learning_rate": 4.9981931910956086e-05, | |
| "loss": 0.8409, | |
| "num_input_tokens_seen": 491152, | |
| "step": 60, | |
| "train_runtime": 223.3671, | |
| "train_tokens_per_second": 2198.856 | |
| }, | |
| { | |
| "epoch": 0.040012311480455524, | |
| "grad_norm": 0.10957301408052444, | |
| "learning_rate": 4.9978740213281005e-05, | |
| "loss": 0.8524, | |
| "num_input_tokens_seen": 532160, | |
| "step": 65, | |
| "train_runtime": 241.5682, | |
| "train_tokens_per_second": 2202.939 | |
| }, | |
| { | |
| "epoch": 0.04309018159433672, | |
| "grad_norm": 0.12283050268888474, | |
| "learning_rate": 4.9975289180882936e-05, | |
| "loss": 0.8944, | |
| "num_input_tokens_seen": 573392, | |
| "step": 70, | |
| "train_runtime": 259.8567, | |
| "train_tokens_per_second": 2206.57 | |
| }, | |
| { | |
| "epoch": 0.046168051708217916, | |
| "grad_norm": 0.1338459849357605, | |
| "learning_rate": 4.9971578849591225e-05, | |
| "loss": 0.8478, | |
| "num_input_tokens_seen": 613488, | |
| "step": 75, | |
| "train_runtime": 278.0456, | |
| "train_tokens_per_second": 2206.429 | |
| }, | |
| { | |
| "epoch": 0.04924592182209911, | |
| "grad_norm": 0.10969562828540802, | |
| "learning_rate": 4.996760925792737e-05, | |
| "loss": 0.8331, | |
| "num_input_tokens_seen": 656480, | |
| "step": 80, | |
| "train_runtime": 296.3508, | |
| "train_tokens_per_second": 2215.212 | |
| }, | |
| { | |
| "epoch": 0.0523237919359803, | |
| "grad_norm": 0.1411936730146408, | |
| "learning_rate": 4.996338044710452e-05, | |
| "loss": 0.8306, | |
| "num_input_tokens_seen": 698272, | |
| "step": 85, | |
| "train_runtime": 315.096, | |
| "train_tokens_per_second": 2216.061 | |
| }, | |
| { | |
| "epoch": 0.055401662049861494, | |
| "grad_norm": 0.1435951143503189, | |
| "learning_rate": 4.995889246102711e-05, | |
| "loss": 0.8318, | |
| "num_input_tokens_seen": 735376, | |
| "step": 90, | |
| "train_runtime": 332.4774, | |
| "train_tokens_per_second": 2211.807 | |
| }, | |
| { | |
| "epoch": 0.05847953216374269, | |
| "grad_norm": 0.15133805572986603, | |
| "learning_rate": 4.995414534629039e-05, | |
| "loss": 0.8674, | |
| "num_input_tokens_seen": 772544, | |
| "step": 95, | |
| "train_runtime": 350.9252, | |
| "train_tokens_per_second": 2201.449 | |
| }, | |
| { | |
| "epoch": 0.061557402277623886, | |
| "grad_norm": 0.10615513473749161, | |
| "learning_rate": 4.9949139152179944e-05, | |
| "loss": 0.901, | |
| "num_input_tokens_seen": 814928, | |
| "step": 100, | |
| "train_runtime": 369.5452, | |
| "train_tokens_per_second": 2205.219 | |
| }, | |
| { | |
| "epoch": 0.06463527239150507, | |
| "grad_norm": 0.10441838204860687, | |
| "learning_rate": 4.994387393067117e-05, | |
| "loss": 0.8522, | |
| "num_input_tokens_seen": 855104, | |
| "step": 105, | |
| "train_runtime": 388.7482, | |
| "train_tokens_per_second": 2199.635 | |
| }, | |
| { | |
| "epoch": 0.06771314250538628, | |
| "grad_norm": 0.1405644714832306, | |
| "learning_rate": 4.993834973642874e-05, | |
| "loss": 0.8614, | |
| "num_input_tokens_seen": 896272, | |
| "step": 110, | |
| "train_runtime": 406.8129, | |
| "train_tokens_per_second": 2203.155 | |
| }, | |
| { | |
| "epoch": 0.07079101261926747, | |
| "grad_norm": 0.11736899614334106, | |
| "learning_rate": 4.993256662680604e-05, | |
| "loss": 0.8371, | |
| "num_input_tokens_seen": 940864, | |
| "step": 115, | |
| "train_runtime": 425.836, | |
| "train_tokens_per_second": 2209.452 | |
| }, | |
| { | |
| "epoch": 0.07386888273314866, | |
| "grad_norm": 0.1269819736480713, | |
| "learning_rate": 4.992652466184458e-05, | |
| "loss": 0.9, | |
| "num_input_tokens_seen": 983360, | |
| "step": 120, | |
| "train_runtime": 444.5802, | |
| "train_tokens_per_second": 2211.884 | |
| }, | |
| { | |
| "epoch": 0.07694675284702986, | |
| "grad_norm": 0.10944321751594543, | |
| "learning_rate": 4.992022390427335e-05, | |
| "loss": 0.8629, | |
| "num_input_tokens_seen": 1025120, | |
| "step": 125, | |
| "train_runtime": 463.3805, | |
| "train_tokens_per_second": 2212.264 | |
| }, | |
| { | |
| "epoch": 0.08002462296091105, | |
| "grad_norm": 0.1438760608434677, | |
| "learning_rate": 4.99136644195082e-05, | |
| "loss": 0.8797, | |
| "num_input_tokens_seen": 1065360, | |
| "step": 130, | |
| "train_runtime": 480.9152, | |
| "train_tokens_per_second": 2215.276 | |
| }, | |
| { | |
| "epoch": 0.08310249307479224, | |
| "grad_norm": 0.13859188556671143, | |
| "learning_rate": 4.9906846275651125e-05, | |
| "loss": 0.8317, | |
| "num_input_tokens_seen": 1106112, | |
| "step": 135, | |
| "train_runtime": 498.8557, | |
| "train_tokens_per_second": 2217.299 | |
| }, | |
| { | |
| "epoch": 0.08618036318867343, | |
| "grad_norm": 0.1412304937839508, | |
| "learning_rate": 4.989976954348958e-05, | |
| "loss": 0.8422, | |
| "num_input_tokens_seen": 1149632, | |
| "step": 140, | |
| "train_runtime": 517.2481, | |
| "train_tokens_per_second": 2222.593 | |
| }, | |
| { | |
| "epoch": 0.08925823330255463, | |
| "grad_norm": 0.1513959765434265, | |
| "learning_rate": 4.989243429649573e-05, | |
| "loss": 0.8403, | |
| "num_input_tokens_seen": 1187824, | |
| "step": 145, | |
| "train_runtime": 535.2196, | |
| "train_tokens_per_second": 2219.321 | |
| }, | |
| { | |
| "epoch": 0.09233610341643583, | |
| "grad_norm": 0.13685107231140137, | |
| "learning_rate": 4.988484061082572e-05, | |
| "loss": 0.8961, | |
| "num_input_tokens_seen": 1229088, | |
| "step": 150, | |
| "train_runtime": 553.4777, | |
| "train_tokens_per_second": 2220.664 | |
| }, | |
| { | |
| "epoch": 0.09541397353031703, | |
| "grad_norm": 0.12999333441257477, | |
| "learning_rate": 4.987698856531884e-05, | |
| "loss": 0.814, | |
| "num_input_tokens_seen": 1273056, | |
| "step": 155, | |
| "train_runtime": 572.9621, | |
| "train_tokens_per_second": 2221.885 | |
| }, | |
| { | |
| "epoch": 0.09849184364419822, | |
| "grad_norm": 0.1481194943189621, | |
| "learning_rate": 4.986887824149674e-05, | |
| "loss": 0.8542, | |
| "num_input_tokens_seen": 1310288, | |
| "step": 160, | |
| "train_runtime": 589.8405, | |
| "train_tokens_per_second": 2221.427 | |
| }, | |
| { | |
| "epoch": 0.10156971375807941, | |
| "grad_norm": 0.1396579146385193, | |
| "learning_rate": 4.9860509723562573e-05, | |
| "loss": 0.8722, | |
| "num_input_tokens_seen": 1351312, | |
| "step": 165, | |
| "train_runtime": 607.8908, | |
| "train_tokens_per_second": 2222.952 | |
| }, | |
| { | |
| "epoch": 0.1046475838719606, | |
| "grad_norm": 0.12688206136226654, | |
| "learning_rate": 4.985188309840012e-05, | |
| "loss": 0.8587, | |
| "num_input_tokens_seen": 1396336, | |
| "step": 170, | |
| "train_runtime": 626.8902, | |
| "train_tokens_per_second": 2227.401 | |
| }, | |
| { | |
| "epoch": 0.1077254539858418, | |
| "grad_norm": 0.13825534284114838, | |
| "learning_rate": 4.984299845557287e-05, | |
| "loss": 0.8014, | |
| "num_input_tokens_seen": 1437984, | |
| "step": 175, | |
| "train_runtime": 645.2717, | |
| "train_tokens_per_second": 2228.494 | |
| }, | |
| { | |
| "epoch": 0.11080332409972299, | |
| "grad_norm": 0.12385495752096176, | |
| "learning_rate": 4.983385588732312e-05, | |
| "loss": 0.8385, | |
| "num_input_tokens_seen": 1475360, | |
| "step": 180, | |
| "train_runtime": 662.6866, | |
| "train_tokens_per_second": 2226.332 | |
| }, | |
| { | |
| "epoch": 0.11388119421360418, | |
| "grad_norm": 0.12246419489383698, | |
| "learning_rate": 4.982445548857102e-05, | |
| "loss": 0.8372, | |
| "num_input_tokens_seen": 1513152, | |
| "step": 185, | |
| "train_runtime": 680.3316, | |
| "train_tokens_per_second": 2224.139 | |
| }, | |
| { | |
| "epoch": 0.11695906432748537, | |
| "grad_norm": 0.12281708419322968, | |
| "learning_rate": 4.981479735691354e-05, | |
| "loss": 0.8446, | |
| "num_input_tokens_seen": 1556256, | |
| "step": 190, | |
| "train_runtime": 698.7652, | |
| "train_tokens_per_second": 2227.152 | |
| }, | |
| { | |
| "epoch": 0.12003693444136658, | |
| "grad_norm": 0.13810615241527557, | |
| "learning_rate": 4.980488159262353e-05, | |
| "loss": 0.8141, | |
| "num_input_tokens_seen": 1598832, | |
| "step": 195, | |
| "train_runtime": 717.4389, | |
| "train_tokens_per_second": 2228.527 | |
| }, | |
| { | |
| "epoch": 0.12311480455524777, | |
| "grad_norm": 0.15435761213302612, | |
| "learning_rate": 4.97947082986486e-05, | |
| "loss": 0.8398, | |
| "num_input_tokens_seen": 1641712, | |
| "step": 200, | |
| "train_runtime": 736.4476, | |
| "train_tokens_per_second": 2229.231 | |
| }, | |
| { | |
| "epoch": 0.12619267466912895, | |
| "grad_norm": 0.15266934037208557, | |
| "learning_rate": 4.978427758061009e-05, | |
| "loss": 0.8714, | |
| "num_input_tokens_seen": 1684544, | |
| "step": 205, | |
| "train_runtime": 756.7894, | |
| "train_tokens_per_second": 2225.908 | |
| }, | |
| { | |
| "epoch": 0.12927054478301014, | |
| "grad_norm": 0.13250574469566345, | |
| "learning_rate": 4.977358954680202e-05, | |
| "loss": 0.8116, | |
| "num_input_tokens_seen": 1723120, | |
| "step": 210, | |
| "train_runtime": 774.2541, | |
| "train_tokens_per_second": 2225.523 | |
| }, | |
| { | |
| "epoch": 0.13234841489689136, | |
| "grad_norm": 0.11579591035842896, | |
| "learning_rate": 4.9762644308189865e-05, | |
| "loss": 0.8204, | |
| "num_input_tokens_seen": 1768096, | |
| "step": 215, | |
| "train_runtime": 793.0535, | |
| "train_tokens_per_second": 2229.479 | |
| }, | |
| { | |
| "epoch": 0.13542628501077256, | |
| "grad_norm": 0.12654677033424377, | |
| "learning_rate": 4.9751441978409484e-05, | |
| "loss": 0.8334, | |
| "num_input_tokens_seen": 1808176, | |
| "step": 220, | |
| "train_runtime": 810.905, | |
| "train_tokens_per_second": 2229.825 | |
| }, | |
| { | |
| "epoch": 0.13850415512465375, | |
| "grad_norm": 0.15607228875160217, | |
| "learning_rate": 4.97399826737659e-05, | |
| "loss": 0.8177, | |
| "num_input_tokens_seen": 1848480, | |
| "step": 225, | |
| "train_runtime": 829.3126, | |
| "train_tokens_per_second": 2228.93 | |
| }, | |
| { | |
| "epoch": 0.14158202523853494, | |
| "grad_norm": 0.1535269021987915, | |
| "learning_rate": 4.972826651323211e-05, | |
| "loss": 0.807, | |
| "num_input_tokens_seen": 1889168, | |
| "step": 230, | |
| "train_runtime": 847.0449, | |
| "train_tokens_per_second": 2230.304 | |
| }, | |
| { | |
| "epoch": 0.14465989535241613, | |
| "grad_norm": 0.13663041591644287, | |
| "learning_rate": 4.971629361844785e-05, | |
| "loss": 0.8557, | |
| "num_input_tokens_seen": 1930784, | |
| "step": 235, | |
| "train_runtime": 865.4457, | |
| "train_tokens_per_second": 2230.971 | |
| }, | |
| { | |
| "epoch": 0.14773776546629733, | |
| "grad_norm": 0.1719619184732437, | |
| "learning_rate": 4.9704064113718326e-05, | |
| "loss": 0.8436, | |
| "num_input_tokens_seen": 1974176, | |
| "step": 240, | |
| "train_runtime": 883.9962, | |
| "train_tokens_per_second": 2233.24 | |
| }, | |
| { | |
| "epoch": 0.15081563558017852, | |
| "grad_norm": 0.15830345451831818, | |
| "learning_rate": 4.9691578126012905e-05, | |
| "loss": 0.853, | |
| "num_input_tokens_seen": 2013008, | |
| "step": 245, | |
| "train_runtime": 902.1681, | |
| "train_tokens_per_second": 2231.3 | |
| }, | |
| { | |
| "epoch": 0.1538935056940597, | |
| "grad_norm": 0.14472956955432892, | |
| "learning_rate": 4.967883578496385e-05, | |
| "loss": 0.874, | |
| "num_input_tokens_seen": 2053328, | |
| "step": 250, | |
| "train_runtime": 919.7822, | |
| "train_tokens_per_second": 2232.407 | |
| }, | |
| { | |
| "epoch": 0.1569713758079409, | |
| "grad_norm": 0.13329900801181793, | |
| "learning_rate": 4.966583722286491e-05, | |
| "loss": 0.8333, | |
| "num_input_tokens_seen": 2094992, | |
| "step": 255, | |
| "train_runtime": 937.9537, | |
| "train_tokens_per_second": 2233.577 | |
| }, | |
| { | |
| "epoch": 0.1600492459218221, | |
| "grad_norm": 0.1551639437675476, | |
| "learning_rate": 4.9652582574669995e-05, | |
| "loss": 0.864, | |
| "num_input_tokens_seen": 2135376, | |
| "step": 260, | |
| "train_runtime": 955.8955, | |
| "train_tokens_per_second": 2233.901 | |
| }, | |
| { | |
| "epoch": 0.1631271160357033, | |
| "grad_norm": 0.12460612505674362, | |
| "learning_rate": 4.963907197799174e-05, | |
| "loss": 0.8321, | |
| "num_input_tokens_seen": 2176624, | |
| "step": 265, | |
| "train_runtime": 974.3283, | |
| "train_tokens_per_second": 2233.974 | |
| }, | |
| { | |
| "epoch": 0.16620498614958448, | |
| "grad_norm": 0.13981439173221588, | |
| "learning_rate": 4.9625305573100115e-05, | |
| "loss": 0.8349, | |
| "num_input_tokens_seen": 2220496, | |
| "step": 270, | |
| "train_runtime": 992.5551, | |
| "train_tokens_per_second": 2237.151 | |
| }, | |
| { | |
| "epoch": 0.16928285626346568, | |
| "grad_norm": 0.15424658358097076, | |
| "learning_rate": 4.961128350292094e-05, | |
| "loss": 0.8248, | |
| "num_input_tokens_seen": 2263808, | |
| "step": 275, | |
| "train_runtime": 1010.9218, | |
| "train_tokens_per_second": 2239.35 | |
| }, | |
| { | |
| "epoch": 0.17236072637734687, | |
| "grad_norm": 0.14288848638534546, | |
| "learning_rate": 4.959700591303438e-05, | |
| "loss": 0.8488, | |
| "num_input_tokens_seen": 2311696, | |
| "step": 280, | |
| "train_runtime": 1030.8143, | |
| "train_tokens_per_second": 2242.592 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 0.15639737248420715, | |
| "learning_rate": 4.95824729516735e-05, | |
| "loss": 0.8602, | |
| "num_input_tokens_seen": 2353200, | |
| "step": 285, | |
| "train_runtime": 1049.2401, | |
| "train_tokens_per_second": 2242.766 | |
| }, | |
| { | |
| "epoch": 0.17851646660510925, | |
| "grad_norm": 0.1268603354692459, | |
| "learning_rate": 4.9567684769722664e-05, | |
| "loss": 0.8987, | |
| "num_input_tokens_seen": 2396000, | |
| "step": 290, | |
| "train_runtime": 1067.7324, | |
| "train_tokens_per_second": 2244.008 | |
| }, | |
| { | |
| "epoch": 0.18159433671899045, | |
| "grad_norm": 0.14782211184501648, | |
| "learning_rate": 4.9552641520716014e-05, | |
| "loss": 0.7794, | |
| "num_input_tokens_seen": 2438032, | |
| "step": 295, | |
| "train_runtime": 1085.9257, | |
| "train_tokens_per_second": 2245.119 | |
| }, | |
| { | |
| "epoch": 0.18467220683287167, | |
| "grad_norm": 0.15259359776973724, | |
| "learning_rate": 4.953734336083583e-05, | |
| "loss": 0.8648, | |
| "num_input_tokens_seen": 2478048, | |
| "step": 300, | |
| "train_runtime": 1103.5538, | |
| "train_tokens_per_second": 2245.516 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 4875, | |
| "num_input_tokens_seen": 2478048, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.765645728959693e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |