| { |
| "best_global_step": 203, |
| "best_metric": 0.1502237617969513, |
| "best_model_checkpoint": "saves_stability/lntuning/llama-3-8b-instruct/train_cb_1757340268/checkpoint-203", |
| "epoch": 10.0, |
| "eval_steps": 29, |
| "global_step": 570, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 8.419689178466797, |
| "learning_rate": 3.5087719298245615e-06, |
| "loss": 1.0048, |
| "num_input_tokens_seen": 3648, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 7.77525520324707, |
| "learning_rate": 7.894736842105263e-06, |
| "loss": 1.2235, |
| "num_input_tokens_seen": 6784, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 6.987992286682129, |
| "learning_rate": 1.2280701754385964e-05, |
| "loss": 0.9362, |
| "num_input_tokens_seen": 10496, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 7.276936054229736, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 1.1086, |
| "num_input_tokens_seen": 13728, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 7.182842254638672, |
| "learning_rate": 2.105263157894737e-05, |
| "loss": 0.8304, |
| "num_input_tokens_seen": 17632, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5087719298245614, |
| "eval_loss": 0.8174957633018494, |
| "eval_runtime": 0.6439, |
| "eval_samples_per_second": 38.823, |
| "eval_steps_per_second": 10.871, |
| "num_input_tokens_seen": 19872, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 6.833621025085449, |
| "learning_rate": 2.5438596491228074e-05, |
| "loss": 0.8297, |
| "num_input_tokens_seen": 20576, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 8.28726577758789, |
| "learning_rate": 2.9824561403508772e-05, |
| "loss": 0.8428, |
| "num_input_tokens_seen": 23232, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 3.570448637008667, |
| "learning_rate": 3.421052631578947e-05, |
| "loss": 0.6095, |
| "num_input_tokens_seen": 26016, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 3.467130184173584, |
| "learning_rate": 3.859649122807018e-05, |
| "loss": 0.4674, |
| "num_input_tokens_seen": 29344, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 4.636632919311523, |
| "learning_rate": 4.298245614035088e-05, |
| "loss": 0.4591, |
| "num_input_tokens_seen": 32512, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 3.2921230792999268, |
| "learning_rate": 4.736842105263158e-05, |
| "loss": 0.2874, |
| "num_input_tokens_seen": 35168, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0175438596491229, |
| "eval_loss": 0.30970117449760437, |
| "eval_runtime": 0.6537, |
| "eval_samples_per_second": 38.241, |
| "eval_steps_per_second": 10.708, |
| "num_input_tokens_seen": 36432, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 4.628859519958496, |
| "learning_rate": 4.999812487773597e-05, |
| "loss": 0.4322, |
| "num_input_tokens_seen": 37968, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 1.065915584564209, |
| "learning_rate": 4.997703298253406e-05, |
| "loss": 0.25, |
| "num_input_tokens_seen": 41168, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 0.8528752326965332, |
| "learning_rate": 4.993252512887069e-05, |
| "loss": 0.0878, |
| "num_input_tokens_seen": 44240, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 0.8932129740715027, |
| "learning_rate": 4.986464304284091e-05, |
| "loss": 0.1826, |
| "num_input_tokens_seen": 47120, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 2.2197389602661133, |
| "learning_rate": 4.977345036387331e-05, |
| "loss": 0.258, |
| "num_input_tokens_seen": 49968, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 2.0195207595825195, |
| "learning_rate": 4.965903258506806e-05, |
| "loss": 0.117, |
| "num_input_tokens_seen": 52496, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.526315789473684, |
| "eval_loss": 0.20078764855861664, |
| "eval_runtime": 0.6576, |
| "eval_samples_per_second": 38.015, |
| "eval_steps_per_second": 10.644, |
| "num_input_tokens_seen": 53680, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 0.4766016900539398, |
| "learning_rate": 4.952149697304716e-05, |
| "loss": 0.2009, |
| "num_input_tokens_seen": 55504, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 5.330173015594482, |
| "learning_rate": 4.9360972467392056e-05, |
| "loss": 0.1262, |
| "num_input_tokens_seen": 59504, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 3.807037591934204, |
| "learning_rate": 4.917760955976277e-05, |
| "loss": 0.1336, |
| "num_input_tokens_seen": 62096, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 7.197997093200684, |
| "learning_rate": 4.897158015281209e-05, |
| "loss": 0.1312, |
| "num_input_tokens_seen": 65808, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 0.5533033013343811, |
| "learning_rate": 4.874307739902689e-05, |
| "loss": 0.1662, |
| "num_input_tokens_seen": 68848, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 4.714714050292969, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 0.158, |
| "num_input_tokens_seen": 71488, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.0350877192982457, |
| "eval_loss": 0.18163363635540009, |
| "eval_runtime": 0.6528, |
| "eval_samples_per_second": 38.296, |
| "eval_steps_per_second": 10.723, |
| "num_input_tokens_seen": 72160, |
| "step": 116 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 0.2130383849143982, |
| "learning_rate": 4.821952960383649e-05, |
| "loss": 0.1309, |
| "num_input_tokens_seen": 74528, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 3.4489381313323975, |
| "learning_rate": 4.7924975388280524e-05, |
| "loss": 0.3158, |
| "num_input_tokens_seen": 78112, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 1.2480239868164062, |
| "learning_rate": 4.760892901743944e-05, |
| "loss": 0.0757, |
| "num_input_tokens_seen": 81280, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 0.8971278667449951, |
| "learning_rate": 4.727168678465988e-05, |
| "loss": 0.0189, |
| "num_input_tokens_seen": 84384, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 3.3730406761169434, |
| "learning_rate": 4.6913564854400595e-05, |
| "loss": 0.1842, |
| "num_input_tokens_seen": 88448, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 4.154754161834717, |
| "learning_rate": 4.6534898965828405e-05, |
| "loss": 0.0625, |
| "num_input_tokens_seen": 91904, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "eval_loss": 0.1618446558713913, |
| "eval_runtime": 0.6574, |
| "eval_samples_per_second": 38.03, |
| "eval_steps_per_second": 10.649, |
| "num_input_tokens_seen": 91904, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 0.3468534052371979, |
| "learning_rate": 4.613604411806285e-05, |
| "loss": 0.0286, |
| "num_input_tokens_seen": 94944, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 1.233839511871338, |
| "learning_rate": 4.5717374237364665e-05, |
| "loss": 0.1841, |
| "num_input_tokens_seen": 98368, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 2.2880122661590576, |
| "learning_rate": 4.5279281826580056e-05, |
| "loss": 0.1765, |
| "num_input_tokens_seen": 100992, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 0.87679123878479, |
| "learning_rate": 4.482217759716946e-05, |
| "loss": 0.0863, |
| "num_input_tokens_seen": 103936, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 7.344095706939697, |
| "learning_rate": 4.434649008416565e-05, |
| "loss": 0.362, |
| "num_input_tokens_seen": 107296, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0526315789473686, |
| "eval_loss": 0.16179159283638, |
| "eval_runtime": 0.6557, |
| "eval_samples_per_second": 38.125, |
| "eval_steps_per_second": 10.675, |
| "num_input_tokens_seen": 108856, |
| "step": 174 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 4.339635372161865, |
| "learning_rate": 4.385266524442241e-05, |
| "loss": 0.121, |
| "num_input_tokens_seen": 109720, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 0.19763872027397156, |
| "learning_rate": 4.334116603853007e-05, |
| "loss": 0.0961, |
| "num_input_tokens_seen": 112984, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 1.7084189653396606, |
| "learning_rate": 4.2812471996790206e-05, |
| "loss": 0.0222, |
| "num_input_tokens_seen": 116120, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 3.6600637435913086, |
| "learning_rate": 4.226707876965611e-05, |
| "loss": 0.1205, |
| "num_input_tokens_seen": 119192, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 1.23030686378479, |
| "learning_rate": 4.1705497663060767e-05, |
| "loss": 0.1413, |
| "num_input_tokens_seen": 122840, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 3.6347780227661133, |
| "learning_rate": 4.1128255159067665e-05, |
| "loss": 0.2499, |
| "num_input_tokens_seen": 126328, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.56140350877193, |
| "eval_loss": 0.1502237617969513, |
| "eval_runtime": 0.6543, |
| "eval_samples_per_second": 38.211, |
| "eval_steps_per_second": 10.699, |
| "num_input_tokens_seen": 128056, |
| "step": 203 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 0.6625131964683533, |
| "learning_rate": 4.053589242229412e-05, |
| "loss": 0.138, |
| "num_input_tokens_seen": 129272, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 0.4167878329753876, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 0.0504, |
| "num_input_tokens_seen": 131992, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.561962902545929, |
| "learning_rate": 3.930804126430513e-05, |
| "loss": 0.0656, |
| "num_input_tokens_seen": 135960, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.3668616712093353, |
| "learning_rate": 3.867370395306068e-05, |
| "loss": 0.2001, |
| "num_input_tokens_seen": 139032, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 2.85788631439209, |
| "learning_rate": 3.8026547549812665e-05, |
| "loss": 0.1953, |
| "num_input_tokens_seen": 142328, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 0.46621087193489075, |
| "learning_rate": 3.736717876343106e-05, |
| "loss": 0.0416, |
| "num_input_tokens_seen": 145096, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.0701754385964914, |
| "eval_loss": 0.15883901715278625, |
| "eval_runtime": 0.6566, |
| "eval_samples_per_second": 38.075, |
| "eval_steps_per_second": 10.661, |
| "num_input_tokens_seen": 146952, |
| "step": 232 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 7.326401233673096, |
| "learning_rate": 3.66962157518902e-05, |
| "loss": 0.2408, |
| "num_input_tokens_seen": 148616, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 2.0576703548431396, |
| "learning_rate": 3.601428754274584e-05, |
| "loss": 0.059, |
| "num_input_tokens_seen": 152008, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.06171449273824692, |
| "learning_rate": 3.532203344342212e-05, |
| "loss": 0.0619, |
| "num_input_tokens_seen": 155272, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 4.354040145874023, |
| "learning_rate": 3.4620102441861143e-05, |
| "loss": 0.1169, |
| "num_input_tokens_seen": 158440, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 7.2059855461120605, |
| "learning_rate": 3.390915259809696e-05, |
| "loss": 0.2469, |
| "num_input_tokens_seen": 161192, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 0.11705178767442703, |
| "learning_rate": 3.318985042732461e-05, |
| "loss": 0.0798, |
| "num_input_tokens_seen": 164520, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.578947368421053, |
| "eval_loss": 0.17170119285583496, |
| "eval_runtime": 0.6572, |
| "eval_samples_per_second": 38.039, |
| "eval_steps_per_second": 10.651, |
| "num_input_tokens_seen": 165128, |
| "step": 261 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.23191994428634644, |
| "learning_rate": 3.246287027504237e-05, |
| "loss": 0.0962, |
| "num_input_tokens_seen": 167336, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 2.4364829063415527, |
| "learning_rate": 3.172889368485311e-05, |
| "loss": 0.0787, |
| "num_input_tokens_seen": 170216, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 0.5115949511528015, |
| "learning_rate": 3.0988608759517475e-05, |
| "loss": 0.0783, |
| "num_input_tokens_seen": 173192, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 1.0266733169555664, |
| "learning_rate": 3.0242709515857758e-05, |
| "loss": 0.0813, |
| "num_input_tokens_seen": 177320, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.2932557165622711, |
| "learning_rate": 2.949189523411747e-05, |
| "loss": 0.1067, |
| "num_input_tokens_seen": 180120, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 3.702965497970581, |
| "learning_rate": 2.8736869802386364e-05, |
| "loss": 0.0694, |
| "num_input_tokens_seen": 183224, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "eval_loss": 0.18254603445529938, |
| "eval_runtime": 0.6644, |
| "eval_samples_per_second": 37.63, |
| "eval_steps_per_second": 10.536, |
| "num_input_tokens_seen": 183224, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.27445188164711, |
| "learning_rate": 2.797834105670559e-05, |
| "loss": 0.1348, |
| "num_input_tokens_seen": 186264, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 1.4116119146347046, |
| "learning_rate": 2.7217020117471793e-05, |
| "loss": 0.1762, |
| "num_input_tokens_seen": 189912, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 1.2603745460510254, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 0.0784, |
| "num_input_tokens_seen": 193048, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.3875841200351715, |
| "learning_rate": 2.5688858559204053e-05, |
| "loss": 0.2787, |
| "num_input_tokens_seen": 196248, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.2111719846725464, |
| "learning_rate": 2.492345059102164e-05, |
| "loss": 0.009, |
| "num_input_tokens_seen": 200152, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.5964912280701755, |
| "eval_loss": 0.17512007057666779, |
| "eval_runtime": 0.6626, |
| "eval_samples_per_second": 37.732, |
| "eval_steps_per_second": 10.565, |
| "num_input_tokens_seen": 202424, |
| "step": 319 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 3.805328607559204, |
| "learning_rate": 2.4158114387879616e-05, |
| "loss": 0.1108, |
| "num_input_tokens_seen": 203096, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 3.2345309257507324, |
| "learning_rate": 2.3393567452163252e-05, |
| "loss": 0.0487, |
| "num_input_tokens_seen": 206328, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 2.567685842514038, |
| "learning_rate": 2.2630526546319914e-05, |
| "loss": 0.0622, |
| "num_input_tokens_seen": 209080, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 3.76271915435791, |
| "learning_rate": 2.186970702089457e-05, |
| "loss": 0.0812, |
| "num_input_tokens_seen": 212760, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.10636137425899506, |
| "learning_rate": 2.111182214388893e-05, |
| "loss": 0.0558, |
| "num_input_tokens_seen": 215160, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 2.1713950634002686, |
| "learning_rate": 2.0357582432072957e-05, |
| "loss": 0.0798, |
| "num_input_tokens_seen": 218336, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.105263157894737, |
| "eval_loss": 0.18006631731987, |
| "eval_runtime": 0.6561, |
| "eval_samples_per_second": 38.101, |
| "eval_steps_per_second": 10.668, |
| "num_input_tokens_seen": 220000, |
| "step": 348 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.2751257121562958, |
| "learning_rate": 1.9607694984875754e-05, |
| "loss": 0.0206, |
| "num_input_tokens_seen": 221280, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 4.440626621246338, |
| "learning_rate": 1.8862862821480025e-05, |
| "loss": 0.257, |
| "num_input_tokens_seen": 224256, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 3.3203134536743164, |
| "learning_rate": 1.8123784221741964e-05, |
| "loss": 0.2628, |
| "num_input_tokens_seen": 227392, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 1.7902897596359253, |
| "learning_rate": 1.73911520715541e-05, |
| "loss": 0.0131, |
| "num_input_tokens_seen": 230432, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 0.14445427060127258, |
| "learning_rate": 1.666565321326512e-05, |
| "loss": 0.0575, |
| "num_input_tokens_seen": 233568, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 5.153473854064941, |
| "learning_rate": 1.5947967801765345e-05, |
| "loss": 0.1092, |
| "num_input_tokens_seen": 236736, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.614035087719298, |
| "eval_loss": 0.1764644831418991, |
| "eval_runtime": 0.653, |
| "eval_samples_per_second": 38.286, |
| "eval_steps_per_second": 10.72, |
| "num_input_tokens_seen": 238272, |
| "step": 377 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 0.7887470722198486, |
| "learning_rate": 1.5238768666841907e-05, |
| "loss": 0.018, |
| "num_input_tokens_seen": 240288, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 0.19865164160728455, |
| "learning_rate": 1.4538720682400969e-05, |
| "loss": 0.011, |
| "num_input_tokens_seen": 243392, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 6.901030540466309, |
| "learning_rate": 1.3848480143148839e-05, |
| "loss": 0.2135, |
| "num_input_tokens_seen": 246496, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 1.5162415504455566, |
| "learning_rate": 1.3168694149315796e-05, |
| "loss": 0.0369, |
| "num_input_tokens_seen": 250080, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.20012938976287842, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.0329, |
| "num_input_tokens_seen": 252592, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 4.714991569519043, |
| "learning_rate": 1.1843024595699805e-05, |
| "loss": 0.0968, |
| "num_input_tokens_seen": 255568, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.12280701754386, |
| "eval_loss": 0.1832893192768097, |
| "eval_runtime": 0.6544, |
| "eval_samples_per_second": 38.201, |
| "eval_steps_per_second": 10.696, |
| "num_input_tokens_seen": 255984, |
| "step": 406 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.15544845163822174, |
| "learning_rate": 1.1198383850594758e-05, |
| "loss": 0.0685, |
| "num_input_tokens_seen": 258608, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 0.14175483584403992, |
| "learning_rate": 1.0566682115126344e-05, |
| "loss": 0.0217, |
| "num_input_tokens_seen": 262256, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 1.0567383766174316, |
| "learning_rate": 9.948511609419675e-06, |
| "loss": 0.1069, |
| "num_input_tokens_seen": 265488, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.6136623620986938, |
| "learning_rate": 9.344451868077353e-06, |
| "loss": 0.1459, |
| "num_input_tokens_seen": 268848, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 2.0627381801605225, |
| "learning_rate": 8.755069196866014e-06, |
| "loss": 0.0596, |
| "num_input_tokens_seen": 272016, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 1.0522947311401367, |
| "learning_rate": 8.180916141804906e-06, |
| "loss": 0.0135, |
| "num_input_tokens_seen": 275536, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "eval_loss": 0.1947774887084961, |
| "eval_runtime": 0.6657, |
| "eval_samples_per_second": 37.555, |
| "eval_steps_per_second": 10.515, |
| "num_input_tokens_seen": 275536, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.5640615820884705, |
| "learning_rate": 7.622530971154199e-06, |
| "loss": 0.0671, |
| "num_input_tokens_seen": 279024, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 1.5433474779129028, |
| "learning_rate": 7.080437170788723e-06, |
| "loss": 0.1542, |
| "num_input_tokens_seen": 281776, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 0.48249372839927673, |
| "learning_rate": 6.555142953430158e-06, |
| "loss": 0.2425, |
| "num_input_tokens_seen": 284880, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 0.408993124961853, |
| "learning_rate": 6.0471407821978135e-06, |
| "loss": 0.0552, |
| "num_input_tokens_seen": 288272, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 2.1185598373413086, |
| "learning_rate": 5.556906908924655e-06, |
| "loss": 0.0669, |
| "num_input_tokens_seen": 290800, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.140350877192983, |
| "eval_loss": 0.1933337152004242, |
| "eval_runtime": 0.6572, |
| "eval_samples_per_second": 38.042, |
| "eval_steps_per_second": 10.652, |
| "num_input_tokens_seen": 293296, |
| "step": 464 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 1.9985065460205078, |
| "learning_rate": 5.084900927671393e-06, |
| "loss": 0.2398, |
| "num_input_tokens_seen": 293712, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 0.08363039046525955, |
| "learning_rate": 4.631565343857239e-06, |
| "loss": 0.0206, |
| "num_input_tokens_seen": 297680, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 0.6890261769294739, |
| "learning_rate": 4.19732515941125e-06, |
| "loss": 0.1069, |
| "num_input_tokens_seen": 300880, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 3.5768940448760986, |
| "learning_rate": 3.7825874743331907e-06, |
| "loss": 0.0852, |
| "num_input_tokens_seen": 304144, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 0.23990033566951752, |
| "learning_rate": 3.3877411050374424e-06, |
| "loss": 0.011, |
| "num_input_tokens_seen": 307408, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 0.3000926077365875, |
| "learning_rate": 3.013156219837776e-06, |
| "loss": 0.0877, |
| "num_input_tokens_seen": 310384, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.649122807017545, |
| "eval_loss": 0.18926918506622314, |
| "eval_runtime": 0.6575, |
| "eval_samples_per_second": 38.025, |
| "eval_steps_per_second": 10.647, |
| "num_input_tokens_seen": 312304, |
| "step": 493 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 0.137564018368721, |
| "learning_rate": 2.659183991914696e-06, |
| "loss": 0.0644, |
| "num_input_tokens_seen": 313360, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.5924885272979736, |
| "learning_rate": 2.326156270090735e-06, |
| "loss": 0.0884, |
| "num_input_tokens_seen": 316368, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.40410029888153076, |
| "learning_rate": 2.0143852677223075e-06, |
| "loss": 0.1376, |
| "num_input_tokens_seen": 319536, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.06116305664181709, |
| "learning_rate": 1.7241632699998123e-06, |
| "loss": 0.0524, |
| "num_input_tokens_seen": 322928, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 0.36351004242897034, |
| "learning_rate": 1.4557623599303903e-06, |
| "loss": 0.0214, |
| "num_input_tokens_seen": 325280, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.26245543360710144, |
| "learning_rate": 1.2094341632602064e-06, |
| "loss": 0.0715, |
| "num_input_tokens_seen": 327808, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.157894736842104, |
| "eval_loss": 0.1936434656381607, |
| "eval_runtime": 0.6554, |
| "eval_samples_per_second": 38.146, |
| "eval_steps_per_second": 10.681, |
| "num_input_tokens_seen": 329216, |
| "step": 522 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 0.2851159870624542, |
| "learning_rate": 9.85409612575411e-07, |
| "loss": 0.1842, |
| "num_input_tokens_seen": 331040, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 4.474898338317871, |
| "learning_rate": 7.838987308029427e-07, |
| "loss": 0.1086, |
| "num_input_tokens_seen": 333824, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.4796180725097656, |
| "learning_rate": 6.050904343141095e-07, |
| "loss": 0.1407, |
| "num_input_tokens_seen": 336192, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 0.060535792261362076, |
| "learning_rate": 4.491523558155714e-07, |
| "loss": 0.0321, |
| "num_input_tokens_seen": 340224, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 0.09705488383769989, |
| "learning_rate": 3.162306871937387e-07, |
| "loss": 0.0553, |
| "num_input_tokens_seen": 343008, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 0.0951201319694519, |
| "learning_rate": 2.064500424599436e-07, |
| "loss": 0.0497, |
| "num_input_tokens_seen": 346240, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.666666666666666, |
| "eval_loss": 0.189773827791214, |
| "eval_runtime": 0.6539, |
| "eval_samples_per_second": 38.231, |
| "eval_steps_per_second": 10.705, |
| "num_input_tokens_seen": 346944, |
| "step": 551 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 4.818851947784424, |
| "learning_rate": 1.1991334092484318e-07, |
| "loss": 0.118, |
| "num_input_tokens_seen": 349472, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 3.700317621231079, |
| "learning_rate": 5.6701710711626334e-08, |
| "loss": 0.1018, |
| "num_input_tokens_seen": 353088, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 2.023979425430298, |
| "learning_rate": 1.6874412698408836e-08, |
| "loss": 0.0546, |
| "num_input_tokens_seen": 357088, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 0.08079128712415695, |
| "learning_rate": 4.687849611939576e-10, |
| "loss": 0.0584, |
| "num_input_tokens_seen": 359824, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "num_input_tokens_seen": 359824, |
| "step": 570, |
| "total_flos": 1.6203286888316928e+16, |
| "train_loss": 0.1786263900956041, |
| "train_runtime": 154.5799, |
| "train_samples_per_second": 14.556, |
| "train_steps_per_second": 3.687 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 570, |
| "num_input_tokens_seen": 359824, |
| "num_train_epochs": 10, |
| "save_steps": 29, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.6203286888316928e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|