| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 20.0, | |
| "global_step": 10000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0002954545454545454, | |
| "loss": 0.71, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00028787878787878786, | |
| "loss": 0.5325, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.5033329725265503, | |
| "eval_runtime": 9.0003, | |
| "eval_samples_per_second": 888.861, | |
| "eval_steps_per_second": 27.777, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.0002803030303030303, | |
| "loss": 0.4322, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 0.0002727272727272727, | |
| "loss": 0.441, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.4973089098930359, | |
| "eval_runtime": 9.0771, | |
| "eval_samples_per_second": 881.341, | |
| "eval_steps_per_second": 27.542, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 0.0002651515151515151, | |
| "loss": 0.352, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 0.00025757575757575756, | |
| "loss": 0.372, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.5114700198173523, | |
| "eval_runtime": 9.3118, | |
| "eval_samples_per_second": 859.12, | |
| "eval_steps_per_second": 26.848, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 0.00025, | |
| "loss": 0.2921, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 0.0002424242424242424, | |
| "loss": 0.3107, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.5385135412216187, | |
| "eval_runtime": 9.7573, | |
| "eval_samples_per_second": 819.896, | |
| "eval_steps_per_second": 25.622, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 0.00023484848484848483, | |
| "loss": 0.2439, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 0.00022727272727272725, | |
| "loss": 0.2619, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.5607308149337769, | |
| "eval_runtime": 9.0142, | |
| "eval_samples_per_second": 887.492, | |
| "eval_steps_per_second": 27.734, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 5.5, | |
| "learning_rate": 0.00021969696969696969, | |
| "loss": 0.2105, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 0.0002121212121212121, | |
| "loss": 0.2257, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.5820582509040833, | |
| "eval_runtime": 22.0785, | |
| "eval_samples_per_second": 362.343, | |
| "eval_steps_per_second": 11.323, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 6.5, | |
| "learning_rate": 0.0002045454545454545, | |
| "loss": 0.1812, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 0.00019696969696969695, | |
| "loss": 0.1947, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.6063645482063293, | |
| "eval_runtime": 22.1706, | |
| "eval_samples_per_second": 360.838, | |
| "eval_steps_per_second": 11.276, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "learning_rate": 0.00018939393939393937, | |
| "loss": 0.1597, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 0.0001818181818181818, | |
| "loss": 0.1715, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.6192964911460876, | |
| "eval_runtime": 22.493, | |
| "eval_samples_per_second": 355.666, | |
| "eval_steps_per_second": 11.115, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 8.5, | |
| "learning_rate": 0.00017424242424242422, | |
| "loss": 0.1439, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 0.00016666666666666666, | |
| "loss": 0.1535, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.6339118480682373, | |
| "eval_runtime": 17.7948, | |
| "eval_samples_per_second": 449.57, | |
| "eval_steps_per_second": 14.049, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 9.5, | |
| "learning_rate": 0.00015909090909090907, | |
| "loss": 0.1322, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 0.00015151515151515152, | |
| "loss": 0.1403, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.648122251033783, | |
| "eval_runtime": 22.0949, | |
| "eval_samples_per_second": 362.074, | |
| "eval_steps_per_second": 11.315, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "learning_rate": 0.00014393939393939393, | |
| "loss": 0.1229, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 0.00013636363636363634, | |
| "loss": 0.1297, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 0.6578399538993835, | |
| "eval_runtime": 22.4161, | |
| "eval_samples_per_second": 356.887, | |
| "eval_steps_per_second": 11.153, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 11.5, | |
| "learning_rate": 0.00012878787878787878, | |
| "loss": 0.1158, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 0.0001212121212121212, | |
| "loss": 0.1217, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 0.6666226983070374, | |
| "eval_runtime": 17.8264, | |
| "eval_samples_per_second": 448.773, | |
| "eval_steps_per_second": 14.024, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "learning_rate": 0.00011363636363636362, | |
| "loss": 0.1106, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 0.00010606060606060605, | |
| "loss": 0.1152, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 0.6757141351699829, | |
| "eval_runtime": 15.3201, | |
| "eval_samples_per_second": 522.191, | |
| "eval_steps_per_second": 16.318, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 13.5, | |
| "learning_rate": 9.848484848484848e-05, | |
| "loss": 0.1061, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 9.09090909090909e-05, | |
| "loss": 0.1101, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 0.687734842300415, | |
| "eval_runtime": 21.8339, | |
| "eval_samples_per_second": 366.402, | |
| "eval_steps_per_second": 11.45, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 14.5, | |
| "learning_rate": 8.333333333333333e-05, | |
| "loss": 0.1024, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 7.575757575757576e-05, | |
| "loss": 0.1059, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 0.6898844242095947, | |
| "eval_runtime": 22.4671, | |
| "eval_samples_per_second": 356.076, | |
| "eval_steps_per_second": 11.127, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 15.5, | |
| "learning_rate": 6.818181818181817e-05, | |
| "loss": 0.0991, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 6.06060606060606e-05, | |
| "loss": 0.1022, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 0.6962941884994507, | |
| "eval_runtime": 17.8764, | |
| "eval_samples_per_second": 447.518, | |
| "eval_steps_per_second": 13.985, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 16.5, | |
| "learning_rate": 5.3030303030303025e-05, | |
| "loss": 0.0963, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 4.545454545454545e-05, | |
| "loss": 0.0991, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 0.702376127243042, | |
| "eval_runtime": 21.7707, | |
| "eval_samples_per_second": 367.466, | |
| "eval_steps_per_second": 11.483, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "learning_rate": 3.790909090909091e-05, | |
| "loss": 0.0939, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 3.033333333333333e-05, | |
| "loss": 0.0961, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 0.7053780555725098, | |
| "eval_runtime": 22.1712, | |
| "eval_samples_per_second": 360.829, | |
| "eval_steps_per_second": 11.276, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 18.5, | |
| "learning_rate": 2.2757575757575757e-05, | |
| "loss": 0.0914, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 1.518181818181818e-05, | |
| "loss": 0.0933, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 0.7104560732841492, | |
| "eval_runtime": 22.4548, | |
| "eval_samples_per_second": 356.271, | |
| "eval_steps_per_second": 11.133, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 19.5, | |
| "learning_rate": 7.6060606060606056e-06, | |
| "loss": 0.0893, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 3.03030303030303e-08, | |
| "loss": 0.0901, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 0.7170030474662781, | |
| "eval_runtime": 17.8822, | |
| "eval_samples_per_second": 447.372, | |
| "eval_steps_per_second": 13.98, | |
| "step": 10000 | |
| } | |
| ], | |
| "max_steps": 10000, | |
| "num_train_epochs": 20, | |
| "total_flos": 4.180672512e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |