| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 16599, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.9096331104283392e-05, | |
| "loss": 3.2794, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.819266220856678e-05, | |
| "loss": 2.2324, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.7288993312850172e-05, | |
| "loss": 1.9171, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.6385324417133564e-05, | |
| "loss": 1.6619, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 2.5481655521416952e-05, | |
| "loss": 1.5888, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.4577986625700344e-05, | |
| "loss": 1.4678, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2.3674317729983735e-05, | |
| "loss": 1.3724, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.2770648834267124e-05, | |
| "loss": 1.4076, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.1866979938550515e-05, | |
| "loss": 1.3559, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 2.0963311042833907e-05, | |
| "loss": 1.2931, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 2.0059642147117295e-05, | |
| "loss": 1.3135, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.9155973251400687e-05, | |
| "loss": 1.1764, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 1.825230435568408e-05, | |
| "loss": 1.1188, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 1.7348635459967467e-05, | |
| "loss": 1.1272, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.644496656425086e-05, | |
| "loss": 1.1424, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 1.554129766853425e-05, | |
| "loss": 1.1509, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.463762877281764e-05, | |
| "loss": 1.0994, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.373395987710103e-05, | |
| "loss": 1.1085, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 1.283029098138442e-05, | |
| "loss": 1.0516, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.1926622085667812e-05, | |
| "loss": 1.1125, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.1022953189951202e-05, | |
| "loss": 1.0775, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 1.0119284294234592e-05, | |
| "loss": 1.0903, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 9.215615398517983e-06, | |
| "loss": 0.9665, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 8.311946502801373e-06, | |
| "loss": 0.958, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 7.408277607084765e-06, | |
| "loss": 0.9539, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 6.504608711368155e-06, | |
| "loss": 0.9495, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 5.6009398156515455e-06, | |
| "loss": 0.9543, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 4.697270919934936e-06, | |
| "loss": 0.9697, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 3.7936020242183263e-06, | |
| "loss": 0.973, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.889933128501717e-06, | |
| "loss": 0.9563, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 1.9862642327851074e-06, | |
| "loss": 0.9312, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 1.0825953370684982e-06, | |
| "loss": 0.9779, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 1.7892644135188868e-07, | |
| "loss": 0.966, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 16599, | |
| "total_flos": 5819517971269632.0, | |
| "train_loss": 1.262162847499961, | |
| "train_runtime": 2635.5654, | |
| "train_samples_per_second": 100.765, | |
| "train_steps_per_second": 6.298 | |
| } | |
| ], | |
| "max_steps": 16599, | |
| "num_train_epochs": 3, | |
| "total_flos": 5819517971269632.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |