| { |
| "best_global_step": 232, |
| "best_metric": 0.036776382476091385, |
| "best_model_checkpoint": "saves_stability/p-tuning/llama-3-8b-instruct/train_cb_1757340168/checkpoint-232", |
| "epoch": 10.0, |
| "eval_steps": 29, |
| "global_step": 570, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.08771929824561403, |
| "grad_norm": 246.364501953125, |
| "learning_rate": 3.5087719298245615e-06, |
| "loss": 8.0729, |
| "num_input_tokens_seen": 3200, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.17543859649122806, |
| "grad_norm": 49.566925048828125, |
| "learning_rate": 7.894736842105263e-06, |
| "loss": 1.9463, |
| "num_input_tokens_seen": 6400, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.2631578947368421, |
| "grad_norm": 10.775127410888672, |
| "learning_rate": 1.2280701754385964e-05, |
| "loss": 0.6031, |
| "num_input_tokens_seen": 9312, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.3508771929824561, |
| "grad_norm": 9.173545837402344, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 0.2836, |
| "num_input_tokens_seen": 12384, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.43859649122807015, |
| "grad_norm": 36.368595123291016, |
| "learning_rate": 2.105263157894737e-05, |
| "loss": 0.2601, |
| "num_input_tokens_seen": 15360, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.5087719298245614, |
| "eval_loss": 0.23696745932102203, |
| "eval_runtime": 0.9004, |
| "eval_samples_per_second": 27.766, |
| "eval_steps_per_second": 7.774, |
| "num_input_tokens_seen": 18048, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.5263157894736842, |
| "grad_norm": 22.77605628967285, |
| "learning_rate": 2.5438596491228074e-05, |
| "loss": 0.3487, |
| "num_input_tokens_seen": 18496, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.6140350877192983, |
| "grad_norm": 6.796410083770752, |
| "learning_rate": 2.9824561403508772e-05, |
| "loss": 0.1889, |
| "num_input_tokens_seen": 23008, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.7017543859649122, |
| "grad_norm": 60.28683853149414, |
| "learning_rate": 3.421052631578947e-05, |
| "loss": 0.5959, |
| "num_input_tokens_seen": 26336, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7894736842105263, |
| "grad_norm": 45.21583938598633, |
| "learning_rate": 3.859649122807018e-05, |
| "loss": 0.5759, |
| "num_input_tokens_seen": 29568, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8771929824561403, |
| "grad_norm": 11.374096870422363, |
| "learning_rate": 4.298245614035088e-05, |
| "loss": 0.2359, |
| "num_input_tokens_seen": 32864, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9649122807017544, |
| "grad_norm": 10.98805046081543, |
| "learning_rate": 4.736842105263158e-05, |
| "loss": 0.3128, |
| "num_input_tokens_seen": 35712, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0175438596491229, |
| "eval_loss": 0.4423607885837555, |
| "eval_runtime": 0.9038, |
| "eval_samples_per_second": 27.66, |
| "eval_steps_per_second": 7.745, |
| "num_input_tokens_seen": 36928, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.0526315789473684, |
| "grad_norm": 12.87694263458252, |
| "learning_rate": 4.999812487773597e-05, |
| "loss": 0.2068, |
| "num_input_tokens_seen": 37824, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1403508771929824, |
| "grad_norm": 6.1870036125183105, |
| "learning_rate": 4.997703298253406e-05, |
| "loss": 0.247, |
| "num_input_tokens_seen": 40576, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.2280701754385965, |
| "grad_norm": 5.793450355529785, |
| "learning_rate": 4.993252512887069e-05, |
| "loss": 0.2448, |
| "num_input_tokens_seen": 43360, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3157894736842106, |
| "grad_norm": 8.36977767944336, |
| "learning_rate": 4.986464304284091e-05, |
| "loss": 0.2155, |
| "num_input_tokens_seen": 45824, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.4035087719298245, |
| "grad_norm": 10.006280899047852, |
| "learning_rate": 4.977345036387331e-05, |
| "loss": 0.1287, |
| "num_input_tokens_seen": 49312, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.4912280701754386, |
| "grad_norm": 133.27513122558594, |
| "learning_rate": 4.965903258506806e-05, |
| "loss": 1.7629, |
| "num_input_tokens_seen": 52704, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.526315789473684, |
| "eval_loss": 0.5561293363571167, |
| "eval_runtime": 0.9082, |
| "eval_samples_per_second": 27.526, |
| "eval_steps_per_second": 7.707, |
| "num_input_tokens_seen": 54176, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.5789473684210527, |
| "grad_norm": 14.203841209411621, |
| "learning_rate": 4.952149697304716e-05, |
| "loss": 0.5176, |
| "num_input_tokens_seen": 56352, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.6666666666666665, |
| "grad_norm": 141.86170959472656, |
| "learning_rate": 4.9360972467392056e-05, |
| "loss": 0.3207, |
| "num_input_tokens_seen": 58944, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.7543859649122808, |
| "grad_norm": 14.194103240966797, |
| "learning_rate": 4.917760955976277e-05, |
| "loss": 0.5147, |
| "num_input_tokens_seen": 62176, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8421052631578947, |
| "grad_norm": 11.15134334564209, |
| "learning_rate": 4.897158015281209e-05, |
| "loss": 0.3268, |
| "num_input_tokens_seen": 65504, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9298245614035088, |
| "grad_norm": 16.564889907836914, |
| "learning_rate": 4.874307739902689e-05, |
| "loss": 0.2571, |
| "num_input_tokens_seen": 69312, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.017543859649123, |
| "grad_norm": 4.9545817375183105, |
| "learning_rate": 4.849231551964771e-05, |
| "loss": 0.3378, |
| "num_input_tokens_seen": 72464, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.0350877192982457, |
| "eval_loss": 0.20199881494045258, |
| "eval_runtime": 0.9071, |
| "eval_samples_per_second": 27.561, |
| "eval_steps_per_second": 7.717, |
| "num_input_tokens_seen": 73136, |
| "step": 116 |
| }, |
| { |
| "epoch": 2.1052631578947367, |
| "grad_norm": 8.720367431640625, |
| "learning_rate": 4.821952960383649e-05, |
| "loss": 0.1158, |
| "num_input_tokens_seen": 75472, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.192982456140351, |
| "grad_norm": 5.152599334716797, |
| "learning_rate": 4.7924975388280524e-05, |
| "loss": 0.1046, |
| "num_input_tokens_seen": 78480, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.280701754385965, |
| "grad_norm": 0.22107002139091492, |
| "learning_rate": 4.760892901743944e-05, |
| "loss": 0.1558, |
| "num_input_tokens_seen": 81840, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.3684210526315788, |
| "grad_norm": 13.14340591430664, |
| "learning_rate": 4.727168678465988e-05, |
| "loss": 0.1763, |
| "num_input_tokens_seen": 85456, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.456140350877193, |
| "grad_norm": 0.03462052717804909, |
| "learning_rate": 4.6913564854400595e-05, |
| "loss": 0.3756, |
| "num_input_tokens_seen": 88304, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "grad_norm": 10.07657527923584, |
| "learning_rate": 4.6534898965828405e-05, |
| "loss": 0.3303, |
| "num_input_tokens_seen": 91216, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.543859649122807, |
| "eval_loss": 0.16141179203987122, |
| "eval_runtime": 0.9129, |
| "eval_samples_per_second": 27.385, |
| "eval_steps_per_second": 7.668, |
| "num_input_tokens_seen": 91216, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.6315789473684212, |
| "grad_norm": 14.401127815246582, |
| "learning_rate": 4.613604411806285e-05, |
| "loss": 0.1186, |
| "num_input_tokens_seen": 94512, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.719298245614035, |
| "grad_norm": 2.1707918643951416, |
| "learning_rate": 4.5717374237364665e-05, |
| "loss": 0.053, |
| "num_input_tokens_seen": 98000, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.807017543859649, |
| "grad_norm": 0.5812787413597107, |
| "learning_rate": 4.5279281826580056e-05, |
| "loss": 0.2145, |
| "num_input_tokens_seen": 101232, |
| "step": 160 |
| }, |
| { |
| "epoch": 2.8947368421052633, |
| "grad_norm": 0.14216437935829163, |
| "learning_rate": 4.482217759716946e-05, |
| "loss": 0.0657, |
| "num_input_tokens_seen": 104752, |
| "step": 165 |
| }, |
| { |
| "epoch": 2.982456140350877, |
| "grad_norm": 1.3072863817214966, |
| "learning_rate": 4.434649008416565e-05, |
| "loss": 0.0453, |
| "num_input_tokens_seen": 108080, |
| "step": 170 |
| }, |
| { |
| "epoch": 3.0526315789473686, |
| "eval_loss": 0.2379472255706787, |
| "eval_runtime": 0.9072, |
| "eval_samples_per_second": 27.559, |
| "eval_steps_per_second": 7.716, |
| "num_input_tokens_seen": 110696, |
| "step": 174 |
| }, |
| { |
| "epoch": 3.0701754385964914, |
| "grad_norm": 0.8564387559890747, |
| "learning_rate": 4.385266524442241e-05, |
| "loss": 0.544, |
| "num_input_tokens_seen": 111464, |
| "step": 175 |
| }, |
| { |
| "epoch": 3.1578947368421053, |
| "grad_norm": 0.11828005313873291, |
| "learning_rate": 4.334116603853007e-05, |
| "loss": 0.3419, |
| "num_input_tokens_seen": 114056, |
| "step": 180 |
| }, |
| { |
| "epoch": 3.245614035087719, |
| "grad_norm": 11.512298583984375, |
| "learning_rate": 4.2812471996790206e-05, |
| "loss": 0.2339, |
| "num_input_tokens_seen": 116872, |
| "step": 185 |
| }, |
| { |
| "epoch": 3.3333333333333335, |
| "grad_norm": 1.017046570777893, |
| "learning_rate": 4.226707876965611e-05, |
| "loss": 0.0893, |
| "num_input_tokens_seen": 120424, |
| "step": 190 |
| }, |
| { |
| "epoch": 3.4210526315789473, |
| "grad_norm": 10.220879554748535, |
| "learning_rate": 4.1705497663060767e-05, |
| "loss": 0.1081, |
| "num_input_tokens_seen": 124008, |
| "step": 195 |
| }, |
| { |
| "epoch": 3.5087719298245617, |
| "grad_norm": 0.5506229400634766, |
| "learning_rate": 4.1128255159067665e-05, |
| "loss": 0.116, |
| "num_input_tokens_seen": 127496, |
| "step": 200 |
| }, |
| { |
| "epoch": 3.56140350877193, |
| "eval_loss": 0.10014169663190842, |
| "eval_runtime": 0.9092, |
| "eval_samples_per_second": 27.497, |
| "eval_steps_per_second": 7.699, |
| "num_input_tokens_seen": 129448, |
| "step": 203 |
| }, |
| { |
| "epoch": 3.5964912280701755, |
| "grad_norm": 4.8370184898376465, |
| "learning_rate": 4.053589242229412e-05, |
| "loss": 0.05, |
| "num_input_tokens_seen": 130888, |
| "step": 205 |
| }, |
| { |
| "epoch": 3.6842105263157894, |
| "grad_norm": 20.419565200805664, |
| "learning_rate": 3.9928964792569655e-05, |
| "loss": 0.3039, |
| "num_input_tokens_seen": 133896, |
| "step": 210 |
| }, |
| { |
| "epoch": 3.7719298245614032, |
| "grad_norm": 0.8667486310005188, |
| "learning_rate": 3.930804126430513e-05, |
| "loss": 0.1435, |
| "num_input_tokens_seen": 137160, |
| "step": 215 |
| }, |
| { |
| "epoch": 3.8596491228070176, |
| "grad_norm": 0.30487579107284546, |
| "learning_rate": 3.867370395306068e-05, |
| "loss": 0.0368, |
| "num_input_tokens_seen": 140328, |
| "step": 220 |
| }, |
| { |
| "epoch": 3.9473684210526314, |
| "grad_norm": 1.794018030166626, |
| "learning_rate": 3.8026547549812665e-05, |
| "loss": 0.0058, |
| "num_input_tokens_seen": 143368, |
| "step": 225 |
| }, |
| { |
| "epoch": 4.035087719298246, |
| "grad_norm": 8.562941551208496, |
| "learning_rate": 3.736717876343106e-05, |
| "loss": 0.1798, |
| "num_input_tokens_seen": 145832, |
| "step": 230 |
| }, |
| { |
| "epoch": 4.0701754385964914, |
| "eval_loss": 0.036776382476091385, |
| "eval_runtime": 0.9089, |
| "eval_samples_per_second": 27.506, |
| "eval_steps_per_second": 7.702, |
| "num_input_tokens_seen": 147176, |
| "step": 232 |
| }, |
| { |
| "epoch": 4.12280701754386, |
| "grad_norm": 4.4760518074035645, |
| "learning_rate": 3.66962157518902e-05, |
| "loss": 0.0416, |
| "num_input_tokens_seen": 148712, |
| "step": 235 |
| }, |
| { |
| "epoch": 4.2105263157894735, |
| "grad_norm": 1.3935602903366089, |
| "learning_rate": 3.601428754274584e-05, |
| "loss": 0.0841, |
| "num_input_tokens_seen": 151752, |
| "step": 240 |
| }, |
| { |
| "epoch": 4.298245614035087, |
| "grad_norm": 0.30636581778526306, |
| "learning_rate": 3.532203344342212e-05, |
| "loss": 0.1876, |
| "num_input_tokens_seen": 154760, |
| "step": 245 |
| }, |
| { |
| "epoch": 4.385964912280702, |
| "grad_norm": 3.755957841873169, |
| "learning_rate": 3.4620102441861143e-05, |
| "loss": 0.1858, |
| "num_input_tokens_seen": 157896, |
| "step": 250 |
| }, |
| { |
| "epoch": 4.473684210526316, |
| "grad_norm": 0.09167452156543732, |
| "learning_rate": 3.390915259809696e-05, |
| "loss": 0.0475, |
| "num_input_tokens_seen": 160520, |
| "step": 255 |
| }, |
| { |
| "epoch": 4.56140350877193, |
| "grad_norm": 0.08018083870410919, |
| "learning_rate": 3.318985042732461e-05, |
| "loss": 0.2801, |
| "num_input_tokens_seen": 163880, |
| "step": 260 |
| }, |
| { |
| "epoch": 4.578947368421053, |
| "eval_loss": 0.11944518983364105, |
| "eval_runtime": 0.9101, |
| "eval_samples_per_second": 27.47, |
| "eval_steps_per_second": 7.691, |
| "num_input_tokens_seen": 164424, |
| "step": 261 |
| }, |
| { |
| "epoch": 4.649122807017544, |
| "grad_norm": 0.07898657023906708, |
| "learning_rate": 3.246287027504237e-05, |
| "loss": 0.1213, |
| "num_input_tokens_seen": 167464, |
| "step": 265 |
| }, |
| { |
| "epoch": 4.7368421052631575, |
| "grad_norm": 0.8377333879470825, |
| "learning_rate": 3.172889368485311e-05, |
| "loss": 0.0034, |
| "num_input_tokens_seen": 170792, |
| "step": 270 |
| }, |
| { |
| "epoch": 4.824561403508772, |
| "grad_norm": 4.984710216522217, |
| "learning_rate": 3.0988608759517475e-05, |
| "loss": 0.0195, |
| "num_input_tokens_seen": 173480, |
| "step": 275 |
| }, |
| { |
| "epoch": 4.912280701754386, |
| "grad_norm": 0.026097286492586136, |
| "learning_rate": 3.0242709515857758e-05, |
| "loss": 0.0018, |
| "num_input_tokens_seen": 177544, |
| "step": 280 |
| }, |
| { |
| "epoch": 5.0, |
| "grad_norm": 0.031160056591033936, |
| "learning_rate": 2.949189523411747e-05, |
| "loss": 0.0738, |
| "num_input_tokens_seen": 180504, |
| "step": 285 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "grad_norm": 0.029113443568348885, |
| "learning_rate": 2.8736869802386364e-05, |
| "loss": 0.0358, |
| "num_input_tokens_seen": 183416, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.087719298245614, |
| "eval_loss": 0.07741459459066391, |
| "eval_runtime": 0.9076, |
| "eval_samples_per_second": 27.545, |
| "eval_steps_per_second": 7.712, |
| "num_input_tokens_seen": 183416, |
| "step": 290 |
| }, |
| { |
| "epoch": 5.175438596491228, |
| "grad_norm": 0.16369421780109406, |
| "learning_rate": 2.797834105670559e-05, |
| "loss": 0.013, |
| "num_input_tokens_seen": 186712, |
| "step": 295 |
| }, |
| { |
| "epoch": 5.2631578947368425, |
| "grad_norm": 0.2860761284828186, |
| "learning_rate": 2.7217020117471793e-05, |
| "loss": 0.0459, |
| "num_input_tokens_seen": 189912, |
| "step": 300 |
| }, |
| { |
| "epoch": 5.350877192982456, |
| "grad_norm": 0.5779882073402405, |
| "learning_rate": 2.6453620722761896e-05, |
| "loss": 0.054, |
| "num_input_tokens_seen": 193624, |
| "step": 305 |
| }, |
| { |
| "epoch": 5.43859649122807, |
| "grad_norm": 0.03386834263801575, |
| "learning_rate": 2.5688858559204053e-05, |
| "loss": 0.017, |
| "num_input_tokens_seen": 197720, |
| "step": 310 |
| }, |
| { |
| "epoch": 5.526315789473684, |
| "grad_norm": 0.3514273166656494, |
| "learning_rate": 2.492345059102164e-05, |
| "loss": 0.2572, |
| "num_input_tokens_seen": 200856, |
| "step": 315 |
| }, |
| { |
| "epoch": 5.5964912280701755, |
| "eval_loss": 0.0888652354478836, |
| "eval_runtime": 0.909, |
| "eval_samples_per_second": 27.502, |
| "eval_steps_per_second": 7.701, |
| "num_input_tokens_seen": 203256, |
| "step": 319 |
| }, |
| { |
| "epoch": 5.614035087719298, |
| "grad_norm": 3.447037696838379, |
| "learning_rate": 2.4158114387879616e-05, |
| "loss": 0.1036, |
| "num_input_tokens_seen": 203800, |
| "step": 320 |
| }, |
| { |
| "epoch": 5.701754385964913, |
| "grad_norm": 0.24111364781856537, |
| "learning_rate": 2.3393567452163252e-05, |
| "loss": 0.1138, |
| "num_input_tokens_seen": 207064, |
| "step": 325 |
| }, |
| { |
| "epoch": 5.7894736842105265, |
| "grad_norm": 2.426321029663086, |
| "learning_rate": 2.2630526546319914e-05, |
| "loss": 0.0398, |
| "num_input_tokens_seen": 210168, |
| "step": 330 |
| }, |
| { |
| "epoch": 5.87719298245614, |
| "grad_norm": 0.14726001024246216, |
| "learning_rate": 2.186970702089457e-05, |
| "loss": 0.0054, |
| "num_input_tokens_seen": 213080, |
| "step": 335 |
| }, |
| { |
| "epoch": 5.964912280701754, |
| "grad_norm": 0.016804184764623642, |
| "learning_rate": 2.111182214388893e-05, |
| "loss": 0.0109, |
| "num_input_tokens_seen": 216152, |
| "step": 340 |
| }, |
| { |
| "epoch": 6.052631578947368, |
| "grad_norm": 0.14003737270832062, |
| "learning_rate": 2.0357582432072957e-05, |
| "loss": 0.0011, |
| "num_input_tokens_seen": 218736, |
| "step": 345 |
| }, |
| { |
| "epoch": 6.105263157894737, |
| "eval_loss": 0.110874705016613, |
| "eval_runtime": 0.9068, |
| "eval_samples_per_second": 27.569, |
| "eval_steps_per_second": 7.719, |
| "num_input_tokens_seen": 220912, |
| "step": 348 |
| }, |
| { |
| "epoch": 6.140350877192983, |
| "grad_norm": 0.7981622219085693, |
| "learning_rate": 1.9607694984875754e-05, |
| "loss": 0.1037, |
| "num_input_tokens_seen": 222160, |
| "step": 350 |
| }, |
| { |
| "epoch": 6.228070175438597, |
| "grad_norm": 0.006725190207362175, |
| "learning_rate": 1.8862862821480025e-05, |
| "loss": 0.0163, |
| "num_input_tokens_seen": 225904, |
| "step": 355 |
| }, |
| { |
| "epoch": 6.315789473684211, |
| "grad_norm": 0.01605769619345665, |
| "learning_rate": 1.8123784221741964e-05, |
| "loss": 0.0047, |
| "num_input_tokens_seen": 228912, |
| "step": 360 |
| }, |
| { |
| "epoch": 6.4035087719298245, |
| "grad_norm": 8.218822479248047, |
| "learning_rate": 1.73911520715541e-05, |
| "loss": 0.0418, |
| "num_input_tokens_seen": 232496, |
| "step": 365 |
| }, |
| { |
| "epoch": 6.491228070175438, |
| "grad_norm": 0.012366142123937607, |
| "learning_rate": 1.666565321326512e-05, |
| "loss": 0.0109, |
| "num_input_tokens_seen": 235792, |
| "step": 370 |
| }, |
| { |
| "epoch": 6.578947368421053, |
| "grad_norm": 0.011026023887097836, |
| "learning_rate": 1.5947967801765345e-05, |
| "loss": 0.0696, |
| "num_input_tokens_seen": 238960, |
| "step": 375 |
| }, |
| { |
| "epoch": 6.614035087719298, |
| "eval_loss": 0.09308334439992905, |
| "eval_runtime": 0.9106, |
| "eval_samples_per_second": 27.453, |
| "eval_steps_per_second": 7.687, |
| "num_input_tokens_seen": 240336, |
| "step": 377 |
| }, |
| { |
| "epoch": 6.666666666666667, |
| "grad_norm": 1.693655014038086, |
| "learning_rate": 1.5238768666841907e-05, |
| "loss": 0.0098, |
| "num_input_tokens_seen": 242064, |
| "step": 380 |
| }, |
| { |
| "epoch": 6.754385964912281, |
| "grad_norm": 0.3411250412464142, |
| "learning_rate": 1.4538720682400969e-05, |
| "loss": 0.0396, |
| "num_input_tokens_seen": 245328, |
| "step": 385 |
| }, |
| { |
| "epoch": 6.842105263157895, |
| "grad_norm": 0.03175486996769905, |
| "learning_rate": 1.3848480143148839e-05, |
| "loss": 0.0549, |
| "num_input_tokens_seen": 248144, |
| "step": 390 |
| }, |
| { |
| "epoch": 6.9298245614035086, |
| "grad_norm": 0.30593249201774597, |
| "learning_rate": 1.3168694149315796e-05, |
| "loss": 0.0017, |
| "num_input_tokens_seen": 250928, |
| "step": 395 |
| }, |
| { |
| "epoch": 7.017543859649122, |
| "grad_norm": 0.05055861920118332, |
| "learning_rate": 1.2500000000000006e-05, |
| "loss": 0.002, |
| "num_input_tokens_seen": 254168, |
| "step": 400 |
| }, |
| { |
| "epoch": 7.105263157894737, |
| "grad_norm": 0.06664076447486877, |
| "learning_rate": 1.1843024595699805e-05, |
| "loss": 0.0251, |
| "num_input_tokens_seen": 257080, |
| "step": 405 |
| }, |
| { |
| "epoch": 7.12280701754386, |
| "eval_loss": 0.08810269832611084, |
| "eval_runtime": 0.9051, |
| "eval_samples_per_second": 27.622, |
| "eval_steps_per_second": 7.734, |
| "num_input_tokens_seen": 257848, |
| "step": 406 |
| }, |
| { |
| "epoch": 7.192982456140351, |
| "grad_norm": 0.023075159639120102, |
| "learning_rate": 1.1198383850594758e-05, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 260696, |
| "step": 410 |
| }, |
| { |
| "epoch": 7.280701754385965, |
| "grad_norm": 0.02422865480184555, |
| "learning_rate": 1.0566682115126344e-05, |
| "loss": 0.0012, |
| "num_input_tokens_seen": 263672, |
| "step": 415 |
| }, |
| { |
| "epoch": 7.368421052631579, |
| "grad_norm": 0.0777999609708786, |
| "learning_rate": 9.948511609419675e-06, |
| "loss": 0.0183, |
| "num_input_tokens_seen": 266296, |
| "step": 420 |
| }, |
| { |
| "epoch": 7.456140350877193, |
| "grad_norm": 0.02744252234697342, |
| "learning_rate": 9.344451868077353e-06, |
| "loss": 0.0006, |
| "num_input_tokens_seen": 270200, |
| "step": 425 |
| }, |
| { |
| "epoch": 7.543859649122807, |
| "grad_norm": 0.004234077874571085, |
| "learning_rate": 8.755069196866014e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 273624, |
| "step": 430 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "grad_norm": 0.016323991119861603, |
| "learning_rate": 8.180916141804906e-06, |
| "loss": 0.006, |
| "num_input_tokens_seen": 276824, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.631578947368421, |
| "eval_loss": 0.08048205077648163, |
| "eval_runtime": 0.906, |
| "eval_samples_per_second": 27.594, |
| "eval_steps_per_second": 7.726, |
| "num_input_tokens_seen": 276824, |
| "step": 435 |
| }, |
| { |
| "epoch": 7.719298245614035, |
| "grad_norm": 0.007384572643786669, |
| "learning_rate": 7.622530971154199e-06, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 279896, |
| "step": 440 |
| }, |
| { |
| "epoch": 7.807017543859649, |
| "grad_norm": 0.039172444492578506, |
| "learning_rate": 7.080437170788723e-06, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 282872, |
| "step": 445 |
| }, |
| { |
| "epoch": 7.894736842105263, |
| "grad_norm": 1.4211935997009277, |
| "learning_rate": 6.555142953430158e-06, |
| "loss": 0.0038, |
| "num_input_tokens_seen": 286456, |
| "step": 450 |
| }, |
| { |
| "epoch": 7.982456140350877, |
| "grad_norm": 1.4576354026794434, |
| "learning_rate": 6.0471407821978135e-06, |
| "loss": 0.0071, |
| "num_input_tokens_seen": 289208, |
| "step": 455 |
| }, |
| { |
| "epoch": 8.070175438596491, |
| "grad_norm": 0.01859288290143013, |
| "learning_rate": 5.556906908924655e-06, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 291944, |
| "step": 460 |
| }, |
| { |
| "epoch": 8.140350877192983, |
| "eval_loss": 0.10053229331970215, |
| "eval_runtime": 0.9071, |
| "eval_samples_per_second": 27.559, |
| "eval_steps_per_second": 7.717, |
| "num_input_tokens_seen": 294504, |
| "step": 464 |
| }, |
| { |
| "epoch": 8.157894736842104, |
| "grad_norm": 0.29108142852783203, |
| "learning_rate": 5.084900927671393e-06, |
| "loss": 0.0014, |
| "num_input_tokens_seen": 295400, |
| "step": 465 |
| }, |
| { |
| "epoch": 8.24561403508772, |
| "grad_norm": 0.5007472634315491, |
| "learning_rate": 4.631565343857239e-06, |
| "loss": 0.0025, |
| "num_input_tokens_seen": 298792, |
| "step": 470 |
| }, |
| { |
| "epoch": 8.333333333333334, |
| "grad_norm": 0.12529756128787994, |
| "learning_rate": 4.19732515941125e-06, |
| "loss": 0.0011, |
| "num_input_tokens_seen": 302024, |
| "step": 475 |
| }, |
| { |
| "epoch": 8.421052631578947, |
| "grad_norm": 0.061395179480314255, |
| "learning_rate": 3.7825874743331907e-06, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 305352, |
| "step": 480 |
| }, |
| { |
| "epoch": 8.508771929824562, |
| "grad_norm": 0.019067196175456047, |
| "learning_rate": 3.3877411050374424e-06, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 308072, |
| "step": 485 |
| }, |
| { |
| "epoch": 8.596491228070175, |
| "grad_norm": 0.03797854110598564, |
| "learning_rate": 3.013156219837776e-06, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 311144, |
| "step": 490 |
| }, |
| { |
| "epoch": 8.649122807017545, |
| "eval_loss": 0.0856977105140686, |
| "eval_runtime": 0.9056, |
| "eval_samples_per_second": 27.606, |
| "eval_steps_per_second": 7.73, |
| "num_input_tokens_seen": 313576, |
| "step": 493 |
| }, |
| { |
| "epoch": 8.68421052631579, |
| "grad_norm": 0.06051577627658844, |
| "learning_rate": 2.659183991914696e-06, |
| "loss": 0.0083, |
| "num_input_tokens_seen": 314504, |
| "step": 495 |
| }, |
| { |
| "epoch": 8.771929824561404, |
| "grad_norm": 0.004321712534874678, |
| "learning_rate": 2.326156270090735e-06, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 317736, |
| "step": 500 |
| }, |
| { |
| "epoch": 8.859649122807017, |
| "grad_norm": 0.036203958094120026, |
| "learning_rate": 2.0143852677223075e-06, |
| "loss": 0.0029, |
| "num_input_tokens_seen": 321640, |
| "step": 505 |
| }, |
| { |
| "epoch": 8.947368421052632, |
| "grad_norm": 0.00752919539809227, |
| "learning_rate": 1.7241632699998123e-06, |
| "loss": 0.0008, |
| "num_input_tokens_seen": 324552, |
| "step": 510 |
| }, |
| { |
| "epoch": 9.035087719298245, |
| "grad_norm": 0.03985239192843437, |
| "learning_rate": 1.4557623599303903e-06, |
| "loss": 0.001, |
| "num_input_tokens_seen": 327328, |
| "step": 515 |
| }, |
| { |
| "epoch": 9.12280701754386, |
| "grad_norm": 0.019234662875533104, |
| "learning_rate": 1.2094341632602064e-06, |
| "loss": 0.0015, |
| "num_input_tokens_seen": 330592, |
| "step": 520 |
| }, |
| { |
| "epoch": 9.157894736842104, |
| "eval_loss": 0.09537296742200851, |
| "eval_runtime": 0.9091, |
| "eval_samples_per_second": 27.499, |
| "eval_steps_per_second": 7.7, |
| "num_input_tokens_seen": 332256, |
| "step": 522 |
| }, |
| { |
| "epoch": 9.210526315789474, |
| "grad_norm": 0.18579529225826263, |
| "learning_rate": 9.85409612575411e-07, |
| "loss": 0.0011, |
| "num_input_tokens_seen": 334080, |
| "step": 525 |
| }, |
| { |
| "epoch": 9.298245614035087, |
| "grad_norm": 0.019816020503640175, |
| "learning_rate": 7.838987308029427e-07, |
| "loss": 0.001, |
| "num_input_tokens_seen": 336928, |
| "step": 530 |
| }, |
| { |
| "epoch": 9.385964912280702, |
| "grad_norm": 0.008153663016855717, |
| "learning_rate": 6.050904343141095e-07, |
| "loss": 0.0003, |
| "num_input_tokens_seen": 340160, |
| "step": 535 |
| }, |
| { |
| "epoch": 9.473684210526315, |
| "grad_norm": 0.7912395596504211, |
| "learning_rate": 4.491523558155714e-07, |
| "loss": 0.0026, |
| "num_input_tokens_seen": 343424, |
| "step": 540 |
| }, |
| { |
| "epoch": 9.56140350877193, |
| "grad_norm": 0.0477537140250206, |
| "learning_rate": 3.162306871937387e-07, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 346816, |
| "step": 545 |
| }, |
| { |
| "epoch": 9.649122807017545, |
| "grad_norm": 0.07864265143871307, |
| "learning_rate": 2.064500424599436e-07, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 349376, |
| "step": 550 |
| }, |
| { |
| "epoch": 9.666666666666666, |
| "eval_loss": 0.09962736070156097, |
| "eval_runtime": 0.9076, |
| "eval_samples_per_second": 27.546, |
| "eval_steps_per_second": 7.713, |
| "num_input_tokens_seen": 350336, |
| "step": 551 |
| }, |
| { |
| "epoch": 9.736842105263158, |
| "grad_norm": 0.016740398481488228, |
| "learning_rate": 1.1991334092484318e-07, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 353152, |
| "step": 555 |
| }, |
| { |
| "epoch": 9.824561403508772, |
| "grad_norm": 0.009569455869495869, |
| "learning_rate": 5.6701710711626334e-08, |
| "loss": 0.0004, |
| "num_input_tokens_seen": 355904, |
| "step": 560 |
| }, |
| { |
| "epoch": 9.912280701754385, |
| "grad_norm": 0.017078828066587448, |
| "learning_rate": 1.6874412698408836e-08, |
| "loss": 0.0009, |
| "num_input_tokens_seen": 358912, |
| "step": 565 |
| }, |
| { |
| "epoch": 10.0, |
| "grad_norm": 0.033932194113731384, |
| "learning_rate": 4.687849611939576e-10, |
| "loss": 0.0016, |
| "num_input_tokens_seen": 361992, |
| "step": 570 |
| }, |
| { |
| "epoch": 10.0, |
| "num_input_tokens_seen": 361992, |
| "step": 570, |
| "total_flos": 1.6300336151199744e+16, |
| "train_loss": 0.21586008377173066, |
| "train_runtime": 185.0165, |
| "train_samples_per_second": 12.161, |
| "train_steps_per_second": 3.081 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 570, |
| "num_input_tokens_seen": 361992, |
| "num_train_epochs": 10, |
| "save_steps": 29, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.6300336151199744e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|