| { |
| "best_global_step": 551, |
| "best_metric": 0.15309593081474304, |
| "best_model_checkpoint": "saves_stability/ia3/llama-3-8b-instruct/train_cb_1757340244/checkpoint-551", |
| "epoch": 10.0, |
| "eval_steps": 29, |
| "global_step": 570, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 3.394319772720337, |
| "learning_rate": 3.5087719298245615e-06, |
| "loss": 1.2197, |
| "num_input_tokens_seen": 3040, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 3.263885259628296, |
| "learning_rate": 7.894736842105263e-06, |
| "loss": 1.0398, |
| "num_input_tokens_seen": 5984, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 2.7331926822662354, |
| "learning_rate": 1.2280701754385964e-05, |
| "loss": 0.9772, |
| "num_input_tokens_seen": 9952, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 3.308185577392578, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 1.3916, |
| "num_input_tokens_seen": 13120, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 3.6212713718414307, |
| "learning_rate": 2.105263157894737e-05, |
| "loss": 1.267, |
| "num_input_tokens_seen": 16128, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5087719298245614, |
| "eval_loss": 1.2371762990951538, |
| "eval_runtime": 0.7318, |
| "eval_samples_per_second": 34.161, |
| "eval_steps_per_second": 9.565, |
| "num_input_tokens_seen": 18528, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 2.8578758239746094, |
| "learning_rate": 2.5438596491228074e-05, |
| "loss": 1.1807, |
| "num_input_tokens_seen": 18944, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 2.337688446044922, |
| "learning_rate": 2.9824561403508772e-05, |
| "loss": 1.2567, |
| "num_input_tokens_seen": 21984, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 3.031054735183716, |
| "learning_rate": 3.421052631578947e-05, |
| "loss": 1.0979, |
| "num_input_tokens_seen": 25632, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 3.210573434829712, |
| "learning_rate": 3.859649122807018e-05, |
| "loss": 1.2998, |
| "num_input_tokens_seen": 28416, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 3.8593454360961914, |
| "learning_rate": 4.298245614035088e-05, |
| "loss": 1.4261, |
| "num_input_tokens_seen": 31264, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 3.177037477493286, |
| "learning_rate": 4.736842105263158e-05, |
| "loss": 1.094, |
| "num_input_tokens_seen": 34528, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0175438596491229, |
| "eval_loss": 1.2371762990951538, |
| "eval_runtime": 0.7316, |
| "eval_samples_per_second": 34.171, |
| "eval_steps_per_second": 9.568, |
| "num_input_tokens_seen": 35960, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 2.3723669052124023, |
| "learning_rate": 4.999812487773597e-05, |
| "loss": 0.8734, |
| "num_input_tokens_seen": 37464, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 2.726174831390381, |
| "learning_rate": 4.997703298253406e-05, |
| "loss": 1.1847, |
| "num_input_tokens_seen": 40568, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 2.8476409912109375, |
| "learning_rate": 4.993252512887069e-05, |
| "loss": 1.3064, |
| "num_input_tokens_seen": 43576, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 3.090651035308838, |
| "learning_rate": 4.986464304284091e-05, |
| "loss": 1.1122, |
| "num_input_tokens_seen": 46392, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 2.7106504440307617, |
| "learning_rate": 4.977345036387331e-05, |
| "loss": 0.8953, |
| "num_input_tokens_seen": 49560, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 2.570406913757324, |
| "learning_rate": 4.965903258506806e-05, |
| "loss": 0.8597, |
| "num_input_tokens_seen": 52184, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.526315789473684, |
| "eval_loss": 0.6111657023429871, |
| "eval_runtime": 0.7382, |
| "eval_samples_per_second": 33.866, |
| "eval_steps_per_second": 9.483, |
| "num_input_tokens_seen": 53272, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 2.1131107807159424, |
| "learning_rate": 4.952149697304716e-05, |
| "loss": 0.4943, |
| "num_input_tokens_seen": 55128, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 1.775753378868103, |
| "learning_rate": 4.9360972467392056e-05, |
| "loss": 0.4578, |
| "num_input_tokens_seen": 58456, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 1.2537349462509155, |
| "learning_rate": 4.917760955976277e-05, |
| "loss": 0.2953, |
| "num_input_tokens_seen": 61272, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 1.8986181020736694, |
| "learning_rate": 4.897158015281209e-05, |
| "loss": 0.2738, |
| "num_input_tokens_seen": 64952, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 0.7511826753616333, |
| "learning_rate": 4.874307739902689e-05, |
| "loss": 0.1952, |
| "num_input_tokens_seen": 67896, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 1.0001667737960815, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 0.1453, |
| "num_input_tokens_seen": 70592, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.0350877192982457, |
| "eval_loss": 0.20721164345741272, |
| "eval_runtime": 0.7372, |
| "eval_samples_per_second": 33.912, |
| "eval_steps_per_second": 9.495, |
| "num_input_tokens_seen": 71200, |
| "step": 116 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 2.6419103145599365, |
| "learning_rate": 4.821952960383649e-05, |
| "loss": 0.1425, |
| "num_input_tokens_seen": 73728, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 0.7555724382400513, |
| "learning_rate": 4.7924975388280524e-05, |
| "loss": 0.1835, |
| "num_input_tokens_seen": 76992, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 1.0654090642929077, |
| "learning_rate": 4.760892901743944e-05, |
| "loss": 0.1573, |
| "num_input_tokens_seen": 79712, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 1.6974315643310547, |
| "learning_rate": 4.727168678465988e-05, |
| "loss": 0.1462, |
| "num_input_tokens_seen": 82944, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 1.7174067497253418, |
| "learning_rate": 4.6913564854400595e-05, |
| "loss": 0.1192, |
| "num_input_tokens_seen": 85568, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 2.9056649208068848, |
| "learning_rate": 4.6534898965828405e-05, |
| "loss": 0.2769, |
| "num_input_tokens_seen": 89088, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "eval_loss": 0.18597088754177094, |
| "eval_runtime": 0.7351, |
| "eval_samples_per_second": 34.01, |
| "eval_steps_per_second": 9.523, |
| "num_input_tokens_seen": 89088, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 1.8056362867355347, |
| "learning_rate": 4.613604411806285e-05, |
| "loss": 0.3309, |
| "num_input_tokens_seen": 92128, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 0.9146686792373657, |
| "learning_rate": 4.5717374237364665e-05, |
| "loss": 0.1127, |
| "num_input_tokens_seen": 95520, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 1.6094197034835815, |
| "learning_rate": 4.5279281826580056e-05, |
| "loss": 0.166, |
| "num_input_tokens_seen": 98848, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 1.7702735662460327, |
| "learning_rate": 4.482217759716946e-05, |
| "loss": 0.1407, |
| "num_input_tokens_seen": 101856, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 0.5128080248832703, |
| "learning_rate": 4.434649008416565e-05, |
| "loss": 0.1871, |
| "num_input_tokens_seen": 105632, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0526315789473686, |
| "eval_loss": 0.17730657756328583, |
| "eval_runtime": 0.738, |
| "eval_samples_per_second": 33.874, |
| "eval_steps_per_second": 9.485, |
| "num_input_tokens_seen": 107504, |
| "step": 174 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 1.009621500968933, |
| "learning_rate": 4.385266524442241e-05, |
| "loss": 0.0748, |
| "num_input_tokens_seen": 108208, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 1.7452389001846313, |
| "learning_rate": 4.334116603853007e-05, |
| "loss": 0.1682, |
| "num_input_tokens_seen": 111280, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 0.2519210875034332, |
| "learning_rate": 4.2812471996790206e-05, |
| "loss": 0.0678, |
| "num_input_tokens_seen": 114512, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 1.879989504814148, |
| "learning_rate": 4.226707876965611e-05, |
| "loss": 0.0788, |
| "num_input_tokens_seen": 117616, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 0.6779692769050598, |
| "learning_rate": 4.1705497663060767e-05, |
| "loss": 0.1204, |
| "num_input_tokens_seen": 121456, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 0.5581347942352295, |
| "learning_rate": 4.1128255159067665e-05, |
| "loss": 0.1082, |
| "num_input_tokens_seen": 124816, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.56140350877193, |
| "eval_loss": 0.1677931696176529, |
| "eval_runtime": 0.7372, |
| "eval_samples_per_second": 33.914, |
| "eval_steps_per_second": 9.496, |
| "num_input_tokens_seen": 126384, |
| "step": 203 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 0.37570345401763916, |
| "learning_rate": 4.053589242229412e-05, |
| "loss": 0.1023, |
| "num_input_tokens_seen": 128048, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 2.6839077472686768, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 0.4408, |
| "num_input_tokens_seen": 131184, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.11496905237436295, |
| "learning_rate": 3.930804126430513e-05, |
| "loss": 0.1777, |
| "num_input_tokens_seen": 134480, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.05168064311146736, |
| "learning_rate": 3.867370395306068e-05, |
| "loss": 0.0608, |
| "num_input_tokens_seen": 137616, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 2.7214698791503906, |
| "learning_rate": 3.8026547549812665e-05, |
| "loss": 0.3076, |
| "num_input_tokens_seen": 140496, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 1.3248543739318848, |
| "learning_rate": 3.736717876343106e-05, |
| "loss": 0.2455, |
| "num_input_tokens_seen": 142832, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.0701754385964914, |
| "eval_loss": 0.16621671617031097, |
| "eval_runtime": 0.7398, |
| "eval_samples_per_second": 33.794, |
| "eval_steps_per_second": 9.462, |
| "num_input_tokens_seen": 143952, |
| "step": 232 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 0.202377587556839, |
| "learning_rate": 3.66962157518902e-05, |
| "loss": 0.0524, |
| "num_input_tokens_seen": 145936, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 0.4693661630153656, |
| "learning_rate": 3.601428754274584e-05, |
| "loss": 0.0582, |
| "num_input_tokens_seen": 148496, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.17534907162189484, |
| "learning_rate": 3.532203344342212e-05, |
| "loss": 0.2415, |
| "num_input_tokens_seen": 151920, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 0.35421478748321533, |
| "learning_rate": 3.4620102441861143e-05, |
| "loss": 0.0976, |
| "num_input_tokens_seen": 155760, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 0.2788299322128296, |
| "learning_rate": 3.390915259809696e-05, |
| "loss": 0.1553, |
| "num_input_tokens_seen": 158608, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 1.8037238121032715, |
| "learning_rate": 3.318985042732461e-05, |
| "loss": 0.1149, |
| "num_input_tokens_seen": 161232, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.578947368421053, |
| "eval_loss": 0.15594005584716797, |
| "eval_runtime": 0.7357, |
| "eval_samples_per_second": 33.981, |
| "eval_steps_per_second": 9.515, |
| "num_input_tokens_seen": 161840, |
| "step": 261 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.6825562119483948, |
| "learning_rate": 3.246287027504237e-05, |
| "loss": 0.0795, |
| "num_input_tokens_seen": 164336, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 2.3811163902282715, |
| "learning_rate": 3.172889368485311e-05, |
| "loss": 0.1587, |
| "num_input_tokens_seen": 167568, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 0.1356663554906845, |
| "learning_rate": 3.0988608759517475e-05, |
| "loss": 0.1847, |
| "num_input_tokens_seen": 170672, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 2.9171602725982666, |
| "learning_rate": 3.0242709515857758e-05, |
| "loss": 0.3833, |
| "num_input_tokens_seen": 173648, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.017362266778945923, |
| "learning_rate": 2.949189523411747e-05, |
| "loss": 0.1889, |
| "num_input_tokens_seen": 176200, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 0.6922072172164917, |
| "learning_rate": 2.8736869802386364e-05, |
| "loss": 0.0873, |
| "num_input_tokens_seen": 179816, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "eval_loss": 0.159979447722435, |
| "eval_runtime": 0.7368, |
| "eval_samples_per_second": 33.931, |
| "eval_steps_per_second": 9.501, |
| "num_input_tokens_seen": 179816, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.5496197938919067, |
| "learning_rate": 2.797834105670559e-05, |
| "loss": 0.0859, |
| "num_input_tokens_seen": 182920, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 0.9509873390197754, |
| "learning_rate": 2.7217020117471793e-05, |
| "loss": 0.2256, |
| "num_input_tokens_seen": 185928, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 1.8133375644683838, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 0.1903, |
| "num_input_tokens_seen": 188392, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.5444093942642212, |
| "learning_rate": 2.5688858559204053e-05, |
| "loss": 0.2589, |
| "num_input_tokens_seen": 191624, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.41618645191192627, |
| "learning_rate": 2.492345059102164e-05, |
| "loss": 0.3196, |
| "num_input_tokens_seen": 194376, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.5964912280701755, |
| "eval_loss": 0.15433594584465027, |
| "eval_runtime": 0.7365, |
| "eval_samples_per_second": 33.945, |
| "eval_steps_per_second": 9.505, |
| "num_input_tokens_seen": 197416, |
| "step": 319 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 0.13511960208415985, |
| "learning_rate": 2.4158114387879616e-05, |
| "loss": 0.0743, |
| "num_input_tokens_seen": 198088, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 1.0964667797088623, |
| "learning_rate": 2.3393567452163252e-05, |
| "loss": 0.1532, |
| "num_input_tokens_seen": 200680, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 0.22159311175346375, |
| "learning_rate": 2.2630526546319914e-05, |
| "loss": 0.0363, |
| "num_input_tokens_seen": 203784, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 1.4879790544509888, |
| "learning_rate": 2.186970702089457e-05, |
| "loss": 0.1023, |
| "num_input_tokens_seen": 207112, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.7014191150665283, |
| "learning_rate": 2.111182214388893e-05, |
| "loss": 0.0611, |
| "num_input_tokens_seen": 209800, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 0.7323847413063049, |
| "learning_rate": 2.0357582432072957e-05, |
| "loss": 0.1822, |
| "num_input_tokens_seen": 212896, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.105263157894737, |
| "eval_loss": 0.1605823040008545, |
| "eval_runtime": 0.7331, |
| "eval_samples_per_second": 34.103, |
| "eval_steps_per_second": 9.549, |
| "num_input_tokens_seen": 214432, |
| "step": 348 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.7143074870109558, |
| "learning_rate": 1.9607694984875754e-05, |
| "loss": 0.1271, |
| "num_input_tokens_seen": 215456, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 0.8039895296096802, |
| "learning_rate": 1.8862862821480025e-05, |
| "loss": 0.1462, |
| "num_input_tokens_seen": 218944, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 0.7685566544532776, |
| "learning_rate": 1.8123784221741964e-05, |
| "loss": 0.0618, |
| "num_input_tokens_seen": 221824, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 1.4941657781600952, |
| "learning_rate": 1.73911520715541e-05, |
| "loss": 0.1965, |
| "num_input_tokens_seen": 225568, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 0.2337864488363266, |
| "learning_rate": 1.666565321326512e-05, |
| "loss": 0.0557, |
| "num_input_tokens_seen": 228960, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 2.8704099655151367, |
| "learning_rate": 1.5947967801765345e-05, |
| "loss": 0.2386, |
| "num_input_tokens_seen": 231936, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.614035087719298, |
| "eval_loss": 0.15757624804973602, |
| "eval_runtime": 0.7336, |
| "eval_samples_per_second": 34.079, |
| "eval_steps_per_second": 9.542, |
| "num_input_tokens_seen": 233280, |
| "step": 377 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 0.3365307152271271, |
| "learning_rate": 1.5238768666841907e-05, |
| "loss": 0.1238, |
| "num_input_tokens_seen": 234944, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 1.4165607690811157, |
| "learning_rate": 1.4538720682400969e-05, |
| "loss": 0.1057, |
| "num_input_tokens_seen": 238080, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 0.3722606301307678, |
| "learning_rate": 1.3848480143148839e-05, |
| "loss": 0.2568, |
| "num_input_tokens_seen": 241120, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 0.2593587338924408, |
| "learning_rate": 1.3168694149315796e-05, |
| "loss": 0.056, |
| "num_input_tokens_seen": 244800, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.8891854882240295, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.157, |
| "num_input_tokens_seen": 247408, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 0.7847668528556824, |
| "learning_rate": 1.1843024595699805e-05, |
| "loss": 0.214, |
| "num_input_tokens_seen": 250512, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.12280701754386, |
| "eval_loss": 0.1556614488363266, |
| "eval_runtime": 0.7367, |
| "eval_samples_per_second": 33.935, |
| "eval_steps_per_second": 9.502, |
| "num_input_tokens_seen": 251120, |
| "step": 406 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.5270536541938782, |
| "learning_rate": 1.1198383850594758e-05, |
| "loss": 0.0793, |
| "num_input_tokens_seen": 254352, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 1.1878958940505981, |
| "learning_rate": 1.0566682115126344e-05, |
| "loss": 0.3267, |
| "num_input_tokens_seen": 257264, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 0.22754035890102386, |
| "learning_rate": 9.948511609419675e-06, |
| "loss": 0.0604, |
| "num_input_tokens_seen": 260048, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.3346598446369171, |
| "learning_rate": 9.344451868077353e-06, |
| "loss": 0.0854, |
| "num_input_tokens_seen": 263568, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 0.10311973094940186, |
| "learning_rate": 8.755069196866014e-06, |
| "loss": 0.1736, |
| "num_input_tokens_seen": 266640, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 1.4336066246032715, |
| "learning_rate": 8.180916141804906e-06, |
| "loss": 0.1322, |
| "num_input_tokens_seen": 270128, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "eval_loss": 0.1574099361896515, |
| "eval_runtime": 0.7367, |
| "eval_samples_per_second": 33.936, |
| "eval_steps_per_second": 9.502, |
| "num_input_tokens_seen": 270128, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.7538064122200012, |
| "learning_rate": 7.622530971154199e-06, |
| "loss": 0.163, |
| "num_input_tokens_seen": 273136, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 0.5714812278747559, |
| "learning_rate": 7.080437170788723e-06, |
| "loss": 0.1428, |
| "num_input_tokens_seen": 276464, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 0.392402321100235, |
| "learning_rate": 6.555142953430158e-06, |
| "loss": 0.0635, |
| "num_input_tokens_seen": 278960, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 1.288957118988037, |
| "learning_rate": 6.0471407821978135e-06, |
| "loss": 0.0987, |
| "num_input_tokens_seen": 282512, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 1.1208373308181763, |
| "learning_rate": 5.556906908924655e-06, |
| "loss": 0.1589, |
| "num_input_tokens_seen": 285496, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.140350877192983, |
| "eval_loss": 0.15771926939487457, |
| "eval_runtime": 0.738, |
| "eval_samples_per_second": 33.875, |
| "eval_steps_per_second": 9.485, |
| "num_input_tokens_seen": 288216, |
| "step": 464 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 0.34755808115005493, |
| "learning_rate": 5.084900927671393e-06, |
| "loss": 0.0444, |
| "num_input_tokens_seen": 289080, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 0.5992547273635864, |
| "learning_rate": 4.631565343857239e-06, |
| "loss": 0.0801, |
| "num_input_tokens_seen": 292120, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 1.0459365844726562, |
| "learning_rate": 4.19732515941125e-06, |
| "loss": 0.2671, |
| "num_input_tokens_seen": 295384, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 1.2793313264846802, |
| "learning_rate": 3.7825874743331907e-06, |
| "loss": 0.1749, |
| "num_input_tokens_seen": 298712, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 1.0729869604110718, |
| "learning_rate": 3.3877411050374424e-06, |
| "loss": 0.1823, |
| "num_input_tokens_seen": 301688, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 2.855426073074341, |
| "learning_rate": 3.013156219837776e-06, |
| "loss": 0.2013, |
| "num_input_tokens_seen": 304504, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.649122807017545, |
| "eval_loss": 0.15657605230808258, |
| "eval_runtime": 0.7386, |
| "eval_samples_per_second": 33.848, |
| "eval_steps_per_second": 9.477, |
| "num_input_tokens_seen": 306648, |
| "step": 493 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 0.09677641093730927, |
| "learning_rate": 2.659183991914696e-06, |
| "loss": 0.0597, |
| "num_input_tokens_seen": 308056, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.3569319546222687, |
| "learning_rate": 2.326156270090735e-06, |
| "loss": 0.0724, |
| "num_input_tokens_seen": 310584, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.8980002999305725, |
| "learning_rate": 2.0143852677223075e-06, |
| "loss": 0.1204, |
| "num_input_tokens_seen": 313528, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.42282891273498535, |
| "learning_rate": 1.7241632699998123e-06, |
| "loss": 0.2146, |
| "num_input_tokens_seen": 316472, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 1.1091337203979492, |
| "learning_rate": 1.4557623599303903e-06, |
| "loss": 0.0786, |
| "num_input_tokens_seen": 318912, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.2835777997970581, |
| "learning_rate": 1.2094341632602064e-06, |
| "loss": 0.1012, |
| "num_input_tokens_seen": 321888, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.157894736842104, |
| "eval_loss": 0.15575329959392548, |
| "eval_runtime": 0.7381, |
| "eval_samples_per_second": 33.871, |
| "eval_steps_per_second": 9.484, |
| "num_input_tokens_seen": 323296, |
| "step": 522 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 1.0463840961456299, |
| "learning_rate": 9.85409612575411e-07, |
| "loss": 0.1006, |
| "num_input_tokens_seen": 325248, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 1.839460849761963, |
| "learning_rate": 7.838987308029427e-07, |
| "loss": 0.0767, |
| "num_input_tokens_seen": 328224, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.2222977578639984, |
| "learning_rate": 6.050904343141095e-07, |
| "loss": 0.059, |
| "num_input_tokens_seen": 330880, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 0.17684368789196014, |
| "learning_rate": 4.491523558155714e-07, |
| "loss": 0.28, |
| "num_input_tokens_seen": 334176, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 1.298222303390503, |
| "learning_rate": 3.162306871937387e-07, |
| "loss": 0.092, |
| "num_input_tokens_seen": 337280, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 0.5709711909294128, |
| "learning_rate": 2.064500424599436e-07, |
| "loss": 0.108, |
| "num_input_tokens_seen": 340480, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.666666666666666, |
| "eval_loss": 0.15309593081474304, |
| "eval_runtime": 0.741, |
| "eval_samples_per_second": 33.74, |
| "eval_steps_per_second": 9.447, |
| "num_input_tokens_seen": 340960, |
| "step": 551 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 2.52823805809021, |
| "learning_rate": 1.1991334092484318e-07, |
| "loss": 0.208, |
| "num_input_tokens_seen": 343328, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 0.4858960211277008, |
| "learning_rate": 5.6701710711626334e-08, |
| "loss": 0.2923, |
| "num_input_tokens_seen": 345984, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 0.5549158453941345, |
| "learning_rate": 1.6874412698408836e-08, |
| "loss": 0.1776, |
| "num_input_tokens_seen": 349280, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 1.092591404914856, |
| "learning_rate": 4.687849611939576e-10, |
| "loss": 0.0937, |
| "num_input_tokens_seen": 352296, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "num_input_tokens_seen": 352296, |
| "step": 570, |
| "total_flos": 1.586414523875328e+16, |
| "train_loss": 0.3075454626951301, |
| "train_runtime": 153.4793, |
| "train_samples_per_second": 14.66, |
| "train_steps_per_second": 3.714 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 570, |
| "num_input_tokens_seen": 352296, |
| "num_train_epochs": 10, |
| "save_steps": 29, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.586414523875328e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|