| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 16599, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.9096331104283392e-05, | |
| "loss": 1.8932, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.819266220856678e-05, | |
| "loss": 1.2013, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.7288993312850172e-05, | |
| "loss": 1.1212, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.6385324417133564e-05, | |
| "loss": 1.0282, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 2.5481655521416952e-05, | |
| "loss": 0.9943, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.4577986625700344e-05, | |
| "loss": 0.9387, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2.3674317729983735e-05, | |
| "loss": 0.8737, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.2770648834267124e-05, | |
| "loss": 0.9147, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.1866979938550515e-05, | |
| "loss": 0.9023, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 2.0963311042833907e-05, | |
| "loss": 0.8754, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 2.0059642147117295e-05, | |
| "loss": 0.8672, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.9155973251400687e-05, | |
| "loss": 0.6576, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 1.825230435568408e-05, | |
| "loss": 0.6301, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 1.7348635459967467e-05, | |
| "loss": 0.6397, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.644496656425086e-05, | |
| "loss": 0.6494, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 1.554129766853425e-05, | |
| "loss": 0.6597, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.463762877281764e-05, | |
| "loss": 0.6255, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.373395987710103e-05, | |
| "loss": 0.6429, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 1.283029098138442e-05, | |
| "loss": 0.6227, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.1926622085667812e-05, | |
| "loss": 0.6501, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.1022953189951202e-05, | |
| "loss": 0.622, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 1.0119284294234592e-05, | |
| "loss": 0.6297, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 9.215615398517983e-06, | |
| "loss": 0.4721, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 8.311946502801373e-06, | |
| "loss": 0.4574, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 7.408277607084765e-06, | |
| "loss": 0.4401, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 6.504608711368155e-06, | |
| "loss": 0.443, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 5.6009398156515455e-06, | |
| "loss": 0.4516, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 4.697270919934936e-06, | |
| "loss": 0.4649, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 3.7936020242183263e-06, | |
| "loss": 0.4485, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.889933128501717e-06, | |
| "loss": 0.4442, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 1.9862642327851074e-06, | |
| "loss": 0.4249, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 1.0825953370684982e-06, | |
| "loss": 0.4623, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 1.7892644135188868e-07, | |
| "loss": 0.4516, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 16599, | |
| "total_flos": 5.204482670991974e+16, | |
| "train_loss": 0.7134747434232642, | |
| "train_runtime": 6555.6242, | |
| "train_samples_per_second": 40.511, | |
| "train_steps_per_second": 2.532 | |
| } | |
| ], | |
| "max_steps": 16599, | |
| "num_train_epochs": 3, | |
| "total_flos": 5.204482670991974e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |