| { | |
| "best_global_step": 377, | |
| "best_metric": 0.15596222877502441, | |
| "best_model_checkpoint": "saves/p-tuning/llama-3-8b-instruct/train_cb_1754652158/checkpoint-377", | |
| "epoch": 10.0, | |
| "eval_steps": 29, | |
| "global_step": 570, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08771929824561403, | |
| "grad_norm": 59.72950744628906, | |
| "learning_rate": 3.5087719298245615e-06, | |
| "loss": 3.7311, | |
| "num_input_tokens_seen": 3552, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.17543859649122806, | |
| "grad_norm": 85.96363067626953, | |
| "learning_rate": 7.894736842105263e-06, | |
| "loss": 3.2007, | |
| "num_input_tokens_seen": 7264, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 155.43385314941406, | |
| "learning_rate": 1.2280701754385964e-05, | |
| "loss": 2.1438, | |
| "num_input_tokens_seen": 10528, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 61.96742630004883, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.8229, | |
| "num_input_tokens_seen": 14720, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.43859649122807015, | |
| "grad_norm": 62.340431213378906, | |
| "learning_rate": 2.105263157894737e-05, | |
| "loss": 0.7984, | |
| "num_input_tokens_seen": 18016, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.5087719298245614, | |
| "eval_loss": 0.7324875593185425, | |
| "eval_runtime": 0.8441, | |
| "eval_samples_per_second": 29.617, | |
| "eval_steps_per_second": 8.293, | |
| "num_input_tokens_seen": 20064, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 128.35720825195312, | |
| "learning_rate": 2.5438596491228074e-05, | |
| "loss": 1.0221, | |
| "num_input_tokens_seen": 20640, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6140350877192983, | |
| "grad_norm": 25.795623779296875, | |
| "learning_rate": 2.9824561403508772e-05, | |
| "loss": 0.4402, | |
| "num_input_tokens_seen": 24800, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 11.380049705505371, | |
| "learning_rate": 3.421052631578947e-05, | |
| "loss": 0.3711, | |
| "num_input_tokens_seen": 28064, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 31.992555618286133, | |
| "learning_rate": 3.859649122807018e-05, | |
| "loss": 0.3899, | |
| "num_input_tokens_seen": 30944, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8771929824561403, | |
| "grad_norm": 26.026948928833008, | |
| "learning_rate": 4.298245614035088e-05, | |
| "loss": 0.5316, | |
| "num_input_tokens_seen": 33664, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9649122807017544, | |
| "grad_norm": 322.9638977050781, | |
| "learning_rate": 4.736842105263158e-05, | |
| "loss": 1.5006, | |
| "num_input_tokens_seen": 36320, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 1.0175438596491229, | |
| "eval_loss": 0.5227728486061096, | |
| "eval_runtime": 0.8526, | |
| "eval_samples_per_second": 29.324, | |
| "eval_steps_per_second": 8.211, | |
| "num_input_tokens_seen": 37832, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 67.29098510742188, | |
| "learning_rate": 4.999812487773597e-05, | |
| "loss": 0.349, | |
| "num_input_tokens_seen": 39080, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.1403508771929824, | |
| "grad_norm": 40.01487731933594, | |
| "learning_rate": 4.997703298253406e-05, | |
| "loss": 0.9668, | |
| "num_input_tokens_seen": 42536, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.2280701754385965, | |
| "grad_norm": 18.30699920654297, | |
| "learning_rate": 4.993252512887069e-05, | |
| "loss": 0.7583, | |
| "num_input_tokens_seen": 45608, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 163.50320434570312, | |
| "learning_rate": 4.986464304284091e-05, | |
| "loss": 1.4897, | |
| "num_input_tokens_seen": 49352, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.4035087719298245, | |
| "grad_norm": 44.9167594909668, | |
| "learning_rate": 4.977345036387331e-05, | |
| "loss": 0.4684, | |
| "num_input_tokens_seen": 52328, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.4912280701754386, | |
| "grad_norm": 3.2939820289611816, | |
| "learning_rate": 4.965903258506806e-05, | |
| "loss": 0.3837, | |
| "num_input_tokens_seen": 56328, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.526315789473684, | |
| "eval_loss": 0.5208507776260376, | |
| "eval_runtime": 0.853, | |
| "eval_samples_per_second": 29.308, | |
| "eval_steps_per_second": 8.206, | |
| "num_input_tokens_seen": 57288, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 15.485786437988281, | |
| "learning_rate": 4.952149697304716e-05, | |
| "loss": 0.634, | |
| "num_input_tokens_seen": 59048, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 4.415175437927246, | |
| "learning_rate": 4.9360972467392056e-05, | |
| "loss": 0.2586, | |
| "num_input_tokens_seen": 62504, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.7543859649122808, | |
| "grad_norm": 12.429476737976074, | |
| "learning_rate": 4.917760955976277e-05, | |
| "loss": 0.2382, | |
| "num_input_tokens_seen": 65832, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 9.031868934631348, | |
| "learning_rate": 4.897158015281209e-05, | |
| "loss": 0.2652, | |
| "num_input_tokens_seen": 68808, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.9298245614035088, | |
| "grad_norm": 6.544041633605957, | |
| "learning_rate": 4.874307739902689e-05, | |
| "loss": 0.233, | |
| "num_input_tokens_seen": 71848, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.017543859649123, | |
| "grad_norm": 2.9446492195129395, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.2138, | |
| "num_input_tokens_seen": 74040, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 2.0350877192982457, | |
| "eval_loss": 0.23195452988147736, | |
| "eval_runtime": 0.8523, | |
| "eval_samples_per_second": 29.332, | |
| "eval_steps_per_second": 8.213, | |
| "num_input_tokens_seen": 74520, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 20.17977523803711, | |
| "learning_rate": 4.821952960383649e-05, | |
| "loss": 0.1899, | |
| "num_input_tokens_seen": 77272, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.192982456140351, | |
| "grad_norm": 9.810575485229492, | |
| "learning_rate": 4.7924975388280524e-05, | |
| "loss": 0.2362, | |
| "num_input_tokens_seen": 80280, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.280701754385965, | |
| "grad_norm": 12.129171371459961, | |
| "learning_rate": 4.760892901743944e-05, | |
| "loss": 0.3055, | |
| "num_input_tokens_seen": 83480, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 9.584630966186523, | |
| "learning_rate": 4.727168678465988e-05, | |
| "loss": 0.4557, | |
| "num_input_tokens_seen": 86232, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.456140350877193, | |
| "grad_norm": 5.297253131866455, | |
| "learning_rate": 4.6913564854400595e-05, | |
| "loss": 0.2885, | |
| "num_input_tokens_seen": 89656, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.543859649122807, | |
| "grad_norm": 5.190830707550049, | |
| "learning_rate": 4.6534898965828405e-05, | |
| "loss": 0.342, | |
| "num_input_tokens_seen": 93080, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.543859649122807, | |
| "eval_loss": 0.24903666973114014, | |
| "eval_runtime": 0.8518, | |
| "eval_samples_per_second": 29.349, | |
| "eval_steps_per_second": 8.218, | |
| "num_input_tokens_seen": 93080, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 6.145389080047607, | |
| "learning_rate": 4.613604411806285e-05, | |
| "loss": 0.2368, | |
| "num_input_tokens_seen": 96440, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.719298245614035, | |
| "grad_norm": 14.407633781433105, | |
| "learning_rate": 4.5717374237364665e-05, | |
| "loss": 0.2185, | |
| "num_input_tokens_seen": 100280, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.807017543859649, | |
| "grad_norm": 13.796713829040527, | |
| "learning_rate": 4.5279281826580056e-05, | |
| "loss": 0.4247, | |
| "num_input_tokens_seen": 103512, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 5.334498405456543, | |
| "learning_rate": 4.482217759716946e-05, | |
| "loss": 0.3187, | |
| "num_input_tokens_seen": 106168, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.982456140350877, | |
| "grad_norm": 2.5159807205200195, | |
| "learning_rate": 4.434649008416565e-05, | |
| "loss": 0.231, | |
| "num_input_tokens_seen": 109624, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 3.0526315789473686, | |
| "eval_loss": 0.16980019211769104, | |
| "eval_runtime": 0.8481, | |
| "eval_samples_per_second": 29.479, | |
| "eval_steps_per_second": 8.254, | |
| "num_input_tokens_seen": 111928, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 3.0701754385964914, | |
| "grad_norm": 9.546319007873535, | |
| "learning_rate": 4.385266524442241e-05, | |
| "loss": 0.1977, | |
| "num_input_tokens_seen": 112472, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 3.1578947368421053, | |
| "grad_norm": 6.8020195960998535, | |
| "learning_rate": 4.334116603853007e-05, | |
| "loss": 0.31, | |
| "num_input_tokens_seen": 115576, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 3.245614035087719, | |
| "grad_norm": 167.5514678955078, | |
| "learning_rate": 4.2812471996790206e-05, | |
| "loss": 0.3518, | |
| "num_input_tokens_seen": 119000, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 6.39103364944458, | |
| "learning_rate": 4.226707876965611e-05, | |
| "loss": 0.2729, | |
| "num_input_tokens_seen": 122360, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 3.4210526315789473, | |
| "grad_norm": 7.074615955352783, | |
| "learning_rate": 4.1705497663060767e-05, | |
| "loss": 0.2196, | |
| "num_input_tokens_seen": 125496, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 3.5087719298245617, | |
| "grad_norm": 5.8528361320495605, | |
| "learning_rate": 4.1128255159067665e-05, | |
| "loss": 0.3391, | |
| "num_input_tokens_seen": 128760, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 3.56140350877193, | |
| "eval_loss": 0.2744066119194031, | |
| "eval_runtime": 0.8498, | |
| "eval_samples_per_second": 29.417, | |
| "eval_steps_per_second": 8.237, | |
| "num_input_tokens_seen": 131160, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 3.5964912280701755, | |
| "grad_norm": 15.246909141540527, | |
| "learning_rate": 4.053589242229412e-05, | |
| "loss": 0.2077, | |
| "num_input_tokens_seen": 132248, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 3.6842105263157894, | |
| "grad_norm": 3.061079263687134, | |
| "learning_rate": 3.9928964792569655e-05, | |
| "loss": 0.2695, | |
| "num_input_tokens_seen": 135160, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 3.7719298245614032, | |
| "grad_norm": 3.2342686653137207, | |
| "learning_rate": 3.930804126430513e-05, | |
| "loss": 0.2965, | |
| "num_input_tokens_seen": 138200, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 3.8596491228070176, | |
| "grad_norm": 5.967337608337402, | |
| "learning_rate": 3.867370395306068e-05, | |
| "loss": 0.1714, | |
| "num_input_tokens_seen": 141752, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.9473684210526314, | |
| "grad_norm": 11.130901336669922, | |
| "learning_rate": 3.8026547549812665e-05, | |
| "loss": 0.3448, | |
| "num_input_tokens_seen": 146008, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 4.035087719298246, | |
| "grad_norm": 3.796250104904175, | |
| "learning_rate": 3.736717876343106e-05, | |
| "loss": 0.2601, | |
| "num_input_tokens_seen": 148648, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 4.0701754385964914, | |
| "eval_loss": 0.16180922091007233, | |
| "eval_runtime": 0.8511, | |
| "eval_samples_per_second": 29.374, | |
| "eval_steps_per_second": 8.225, | |
| "num_input_tokens_seen": 150056, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 4.12280701754386, | |
| "grad_norm": 5.916079521179199, | |
| "learning_rate": 3.66962157518902e-05, | |
| "loss": 0.2069, | |
| "num_input_tokens_seen": 151656, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 4.2105263157894735, | |
| "grad_norm": 3.373704671859741, | |
| "learning_rate": 3.601428754274584e-05, | |
| "loss": 0.1738, | |
| "num_input_tokens_seen": 154280, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 4.298245614035087, | |
| "grad_norm": 7.352924346923828, | |
| "learning_rate": 3.532203344342212e-05, | |
| "loss": 0.2579, | |
| "num_input_tokens_seen": 157160, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 4.385964912280702, | |
| "grad_norm": 5.744953632354736, | |
| "learning_rate": 3.4620102441861143e-05, | |
| "loss": 0.227, | |
| "num_input_tokens_seen": 160072, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 4.473684210526316, | |
| "grad_norm": 8.015254020690918, | |
| "learning_rate": 3.390915259809696e-05, | |
| "loss": 0.1874, | |
| "num_input_tokens_seen": 163752, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 4.56140350877193, | |
| "grad_norm": 7.88545560836792, | |
| "learning_rate": 3.318985042732461e-05, | |
| "loss": 0.1828, | |
| "num_input_tokens_seen": 166600, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 4.578947368421053, | |
| "eval_loss": 0.25386178493499756, | |
| "eval_runtime": 0.8502, | |
| "eval_samples_per_second": 29.403, | |
| "eval_steps_per_second": 8.233, | |
| "num_input_tokens_seen": 167208, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 4.649122807017544, | |
| "grad_norm": 4.994500160217285, | |
| "learning_rate": 3.246287027504237e-05, | |
| "loss": 0.2042, | |
| "num_input_tokens_seen": 169704, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 4.7368421052631575, | |
| "grad_norm": 5.937520503997803, | |
| "learning_rate": 3.172889368485311e-05, | |
| "loss": 0.1841, | |
| "num_input_tokens_seen": 173000, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 4.824561403508772, | |
| "grad_norm": 10.352025032043457, | |
| "learning_rate": 3.0988608759517475e-05, | |
| "loss": 0.2559, | |
| "num_input_tokens_seen": 177128, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 4.912280701754386, | |
| "grad_norm": 16.774810791015625, | |
| "learning_rate": 3.0242709515857758e-05, | |
| "loss": 0.2189, | |
| "num_input_tokens_seen": 180680, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 19.19439697265625, | |
| "learning_rate": 2.949189523411747e-05, | |
| "loss": 0.1958, | |
| "num_input_tokens_seen": 183280, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 5.087719298245614, | |
| "grad_norm": 9.190163612365723, | |
| "learning_rate": 2.8736869802386364e-05, | |
| "loss": 0.3148, | |
| "num_input_tokens_seen": 186160, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 5.087719298245614, | |
| "eval_loss": 0.25446251034736633, | |
| "eval_runtime": 0.8549, | |
| "eval_samples_per_second": 29.243, | |
| "eval_steps_per_second": 8.188, | |
| "num_input_tokens_seen": 186160, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 5.175438596491228, | |
| "grad_norm": 6.987154960632324, | |
| "learning_rate": 2.797834105670559e-05, | |
| "loss": 0.203, | |
| "num_input_tokens_seen": 189584, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 5.2631578947368425, | |
| "grad_norm": 7.0422539710998535, | |
| "learning_rate": 2.7217020117471793e-05, | |
| "loss": 0.2338, | |
| "num_input_tokens_seen": 193296, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 5.350877192982456, | |
| "grad_norm": 6.827476501464844, | |
| "learning_rate": 2.6453620722761896e-05, | |
| "loss": 0.2074, | |
| "num_input_tokens_seen": 197328, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 5.43859649122807, | |
| "grad_norm": 2.4718358516693115, | |
| "learning_rate": 2.5688858559204053e-05, | |
| "loss": 0.1303, | |
| "num_input_tokens_seen": 200560, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 5.526315789473684, | |
| "grad_norm": 12.62780475616455, | |
| "learning_rate": 2.492345059102164e-05, | |
| "loss": 0.1062, | |
| "num_input_tokens_seen": 203632, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 5.5964912280701755, | |
| "eval_loss": 0.21558518707752228, | |
| "eval_runtime": 0.8533, | |
| "eval_samples_per_second": 29.299, | |
| "eval_steps_per_second": 8.204, | |
| "num_input_tokens_seen": 206000, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 5.614035087719298, | |
| "grad_norm": 5.2489752769470215, | |
| "learning_rate": 2.4158114387879616e-05, | |
| "loss": 0.1738, | |
| "num_input_tokens_seen": 206480, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 5.701754385964913, | |
| "grad_norm": 9.622069358825684, | |
| "learning_rate": 2.3393567452163252e-05, | |
| "loss": 0.2403, | |
| "num_input_tokens_seen": 209232, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 5.7894736842105265, | |
| "grad_norm": 2.9533114433288574, | |
| "learning_rate": 2.2630526546319914e-05, | |
| "loss": 0.3923, | |
| "num_input_tokens_seen": 213168, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 5.87719298245614, | |
| "grad_norm": 6.1617937088012695, | |
| "learning_rate": 2.186970702089457e-05, | |
| "loss": 0.159, | |
| "num_input_tokens_seen": 216752, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 5.964912280701754, | |
| "grad_norm": 6.799952507019043, | |
| "learning_rate": 2.111182214388893e-05, | |
| "loss": 0.1847, | |
| "num_input_tokens_seen": 219536, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 6.052631578947368, | |
| "grad_norm": 6.728596210479736, | |
| "learning_rate": 2.0357582432072957e-05, | |
| "loss": 0.0974, | |
| "num_input_tokens_seen": 222272, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 6.105263157894737, | |
| "eval_loss": 0.2205836921930313, | |
| "eval_runtime": 0.8503, | |
| "eval_samples_per_second": 29.402, | |
| "eval_steps_per_second": 8.233, | |
| "num_input_tokens_seen": 224064, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 6.140350877192983, | |
| "grad_norm": 3.4464352130889893, | |
| "learning_rate": 1.9607694984875754e-05, | |
| "loss": 0.3393, | |
| "num_input_tokens_seen": 225664, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 6.228070175438597, | |
| "grad_norm": 4.825738906860352, | |
| "learning_rate": 1.8862862821480025e-05, | |
| "loss": 0.1941, | |
| "num_input_tokens_seen": 229408, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 6.315789473684211, | |
| "grad_norm": 4.794721603393555, | |
| "learning_rate": 1.8123784221741964e-05, | |
| "loss": 0.3113, | |
| "num_input_tokens_seen": 232832, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 6.4035087719298245, | |
| "grad_norm": 2.929058313369751, | |
| "learning_rate": 1.73911520715541e-05, | |
| "loss": 0.0671, | |
| "num_input_tokens_seen": 236288, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 6.491228070175438, | |
| "grad_norm": 0.9531017541885376, | |
| "learning_rate": 1.666565321326512e-05, | |
| "loss": 0.1522, | |
| "num_input_tokens_seen": 239296, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 6.578947368421053, | |
| "grad_norm": 10.87993049621582, | |
| "learning_rate": 1.5947967801765345e-05, | |
| "loss": 0.1758, | |
| "num_input_tokens_seen": 242848, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 6.614035087719298, | |
| "eval_loss": 0.15596222877502441, | |
| "eval_runtime": 0.8511, | |
| "eval_samples_per_second": 29.374, | |
| "eval_steps_per_second": 8.225, | |
| "num_input_tokens_seen": 243840, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 6.666666666666667, | |
| "grad_norm": 6.758040904998779, | |
| "learning_rate": 1.5238768666841907e-05, | |
| "loss": 0.1513, | |
| "num_input_tokens_seen": 245344, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 6.754385964912281, | |
| "grad_norm": 4.473215103149414, | |
| "learning_rate": 1.4538720682400969e-05, | |
| "loss": 0.2004, | |
| "num_input_tokens_seen": 248256, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 6.842105263157895, | |
| "grad_norm": 5.833415985107422, | |
| "learning_rate": 1.3848480143148839e-05, | |
| "loss": 0.2411, | |
| "num_input_tokens_seen": 250976, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 6.9298245614035086, | |
| "grad_norm": 4.30411958694458, | |
| "learning_rate": 1.3168694149315796e-05, | |
| "loss": 0.075, | |
| "num_input_tokens_seen": 254944, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 7.017543859649122, | |
| "grad_norm": 2.6023566722869873, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.0674, | |
| "num_input_tokens_seen": 257888, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 7.105263157894737, | |
| "grad_norm": 1.8268622159957886, | |
| "learning_rate": 1.1843024595699805e-05, | |
| "loss": 0.0921, | |
| "num_input_tokens_seen": 261088, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 7.12280701754386, | |
| "eval_loss": 0.19613634049892426, | |
| "eval_runtime": 0.849, | |
| "eval_samples_per_second": 29.446, | |
| "eval_steps_per_second": 8.245, | |
| "num_input_tokens_seen": 261504, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 7.192982456140351, | |
| "grad_norm": 10.325926780700684, | |
| "learning_rate": 1.1198383850594758e-05, | |
| "loss": 0.0531, | |
| "num_input_tokens_seen": 263840, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 7.280701754385965, | |
| "grad_norm": 12.008359909057617, | |
| "learning_rate": 1.0566682115126344e-05, | |
| "loss": 0.2284, | |
| "num_input_tokens_seen": 267424, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 7.368421052631579, | |
| "grad_norm": 9.924817085266113, | |
| "learning_rate": 9.948511609419675e-06, | |
| "loss": 0.3437, | |
| "num_input_tokens_seen": 270720, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 7.456140350877193, | |
| "grad_norm": 2.3891053199768066, | |
| "learning_rate": 9.344451868077353e-06, | |
| "loss": 0.0494, | |
| "num_input_tokens_seen": 273504, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 7.543859649122807, | |
| "grad_norm": 6.745932102203369, | |
| "learning_rate": 8.755069196866014e-06, | |
| "loss": 0.1256, | |
| "num_input_tokens_seen": 276608, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 7.631578947368421, | |
| "grad_norm": 11.279396057128906, | |
| "learning_rate": 8.180916141804906e-06, | |
| "loss": 0.2705, | |
| "num_input_tokens_seen": 280352, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 7.631578947368421, | |
| "eval_loss": 0.23887619376182556, | |
| "eval_runtime": 0.8494, | |
| "eval_samples_per_second": 29.432, | |
| "eval_steps_per_second": 8.241, | |
| "num_input_tokens_seen": 280352, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 7.719298245614035, | |
| "grad_norm": 3.9152004718780518, | |
| "learning_rate": 7.622530971154199e-06, | |
| "loss": 0.2732, | |
| "num_input_tokens_seen": 283808, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 7.807017543859649, | |
| "grad_norm": 8.49035930633545, | |
| "learning_rate": 7.080437170788723e-06, | |
| "loss": 0.1001, | |
| "num_input_tokens_seen": 286688, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 7.894736842105263, | |
| "grad_norm": 5.865164279937744, | |
| "learning_rate": 6.555142953430158e-06, | |
| "loss": 0.1136, | |
| "num_input_tokens_seen": 290240, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 7.982456140350877, | |
| "grad_norm": 0.9978966116905212, | |
| "learning_rate": 6.0471407821978135e-06, | |
| "loss": 0.1173, | |
| "num_input_tokens_seen": 293568, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 8.070175438596491, | |
| "grad_norm": 5.051326751708984, | |
| "learning_rate": 5.556906908924655e-06, | |
| "loss": 0.0502, | |
| "num_input_tokens_seen": 296720, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 8.140350877192983, | |
| "eval_loss": 0.16461919248104095, | |
| "eval_runtime": 0.8517, | |
| "eval_samples_per_second": 29.353, | |
| "eval_steps_per_second": 8.219, | |
| "num_input_tokens_seen": 299344, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 8.157894736842104, | |
| "grad_norm": 1.7852911949157715, | |
| "learning_rate": 5.084900927671393e-06, | |
| "loss": 0.3081, | |
| "num_input_tokens_seen": 300112, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 8.24561403508772, | |
| "grad_norm": 1.0807344913482666, | |
| "learning_rate": 4.631565343857239e-06, | |
| "loss": 0.0428, | |
| "num_input_tokens_seen": 303664, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 8.333333333333334, | |
| "grad_norm": 5.201423645019531, | |
| "learning_rate": 4.19732515941125e-06, | |
| "loss": 0.0843, | |
| "num_input_tokens_seen": 306480, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 8.421052631578947, | |
| "grad_norm": 19.143348693847656, | |
| "learning_rate": 3.7825874743331907e-06, | |
| "loss": 0.1734, | |
| "num_input_tokens_seen": 309744, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 8.508771929824562, | |
| "grad_norm": 1.025292992591858, | |
| "learning_rate": 3.3877411050374424e-06, | |
| "loss": 0.1019, | |
| "num_input_tokens_seen": 313360, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 8.596491228070175, | |
| "grad_norm": 5.185237884521484, | |
| "learning_rate": 3.013156219837776e-06, | |
| "loss": 0.1646, | |
| "num_input_tokens_seen": 316784, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 8.649122807017545, | |
| "eval_loss": 0.2208017259836197, | |
| "eval_runtime": 0.8544, | |
| "eval_samples_per_second": 29.261, | |
| "eval_steps_per_second": 8.193, | |
| "num_input_tokens_seen": 318672, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 8.68421052631579, | |
| "grad_norm": 0.6018063426017761, | |
| "learning_rate": 2.659183991914696e-06, | |
| "loss": 0.0134, | |
| "num_input_tokens_seen": 319760, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 8.771929824561404, | |
| "grad_norm": 1.9732301235198975, | |
| "learning_rate": 2.326156270090735e-06, | |
| "loss": 0.0183, | |
| "num_input_tokens_seen": 322512, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 8.859649122807017, | |
| "grad_norm": 0.3454095423221588, | |
| "learning_rate": 2.0143852677223075e-06, | |
| "loss": 0.163, | |
| "num_input_tokens_seen": 325936, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 8.947368421052632, | |
| "grad_norm": 6.823419570922852, | |
| "learning_rate": 1.7241632699998123e-06, | |
| "loss": 0.234, | |
| "num_input_tokens_seen": 329360, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 9.035087719298245, | |
| "grad_norm": 0.5328010320663452, | |
| "learning_rate": 1.4557623599303903e-06, | |
| "loss": 0.1047, | |
| "num_input_tokens_seen": 332168, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 9.12280701754386, | |
| "grad_norm": 8.00861644744873, | |
| "learning_rate": 1.2094341632602064e-06, | |
| "loss": 0.1549, | |
| "num_input_tokens_seen": 336296, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 9.157894736842104, | |
| "eval_loss": 0.19831164181232452, | |
| "eval_runtime": 0.8581, | |
| "eval_samples_per_second": 29.136, | |
| "eval_steps_per_second": 8.158, | |
| "num_input_tokens_seen": 337480, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 9.210526315789474, | |
| "grad_norm": 10.439408302307129, | |
| "learning_rate": 9.85409612575411e-07, | |
| "loss": 0.1365, | |
| "num_input_tokens_seen": 339080, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 9.298245614035087, | |
| "grad_norm": 7.713301181793213, | |
| "learning_rate": 7.838987308029427e-07, | |
| "loss": 0.0859, | |
| "num_input_tokens_seen": 342568, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 9.385964912280702, | |
| "grad_norm": 0.42152801156044006, | |
| "learning_rate": 6.050904343141095e-07, | |
| "loss": 0.067, | |
| "num_input_tokens_seen": 345576, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 9.473684210526315, | |
| "grad_norm": 18.922243118286133, | |
| "learning_rate": 4.491523558155714e-07, | |
| "loss": 0.1141, | |
| "num_input_tokens_seen": 348392, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 9.56140350877193, | |
| "grad_norm": 6.063596725463867, | |
| "learning_rate": 3.162306871937387e-07, | |
| "loss": 0.0688, | |
| "num_input_tokens_seen": 351912, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 9.649122807017545, | |
| "grad_norm": 0.5159357786178589, | |
| "learning_rate": 2.064500424599436e-07, | |
| "loss": 0.072, | |
| "num_input_tokens_seen": 355432, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 9.666666666666666, | |
| "eval_loss": 0.2247486412525177, | |
| "eval_runtime": 0.8497, | |
| "eval_samples_per_second": 29.422, | |
| "eval_steps_per_second": 8.238, | |
| "num_input_tokens_seen": 356456, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 9.736842105263158, | |
| "grad_norm": 1.148868203163147, | |
| "learning_rate": 1.1991334092484318e-07, | |
| "loss": 0.0824, | |
| "num_input_tokens_seen": 358760, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 9.824561403508772, | |
| "grad_norm": 1.0751643180847168, | |
| "learning_rate": 5.6701710711626334e-08, | |
| "loss": 0.141, | |
| "num_input_tokens_seen": 362088, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 9.912280701754385, | |
| "grad_norm": 4.292356491088867, | |
| "learning_rate": 1.6874412698408836e-08, | |
| "loss": 0.0604, | |
| "num_input_tokens_seen": 365544, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.1350361406803131, | |
| "learning_rate": 4.687849611939576e-10, | |
| "loss": 0.3128, | |
| "num_input_tokens_seen": 367864, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "num_input_tokens_seen": 367864, | |
| "step": 570, | |
| "total_flos": 1.6564749657243648e+16, | |
| "train_loss": 0.34152856785477254, | |
| "train_runtime": 183.4948, | |
| "train_samples_per_second": 12.262, | |
| "train_steps_per_second": 3.106 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 570, | |
| "num_input_tokens_seen": 367864, | |
| "num_train_epochs": 10, | |
| "save_steps": 29, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.6564749657243648e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |