| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 17046, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.8533380265164854e-05, | |
| "loss": 1.6132, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.70667605303297e-05, | |
| "loss": 1.2696, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.560014079549454e-05, | |
| "loss": 1.1996, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 4.4133521060659395e-05, | |
| "loss": 1.1562, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.2666901325824246e-05, | |
| "loss": 1.1215, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.120028159098909e-05, | |
| "loss": 1.1017, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 3.9733661856153936e-05, | |
| "loss": 1.0863, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 3.826704212131879e-05, | |
| "loss": 1.0715, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 3.680042238648363e-05, | |
| "loss": 1.0573, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 3.533380265164848e-05, | |
| "loss": 1.0442, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 3.386718291681333e-05, | |
| "loss": 1.038, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 3.240056318197818e-05, | |
| "loss": 0.9828, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.0933943447143024e-05, | |
| "loss": 0.9562, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 2.9467323712307876e-05, | |
| "loss": 0.9515, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 2.800070397747272e-05, | |
| "loss": 0.9498, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 2.653408424263757e-05, | |
| "loss": 0.9454, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 2.506746450780242e-05, | |
| "loss": 0.9432, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 2.3600844772967265e-05, | |
| "loss": 0.941, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 2.2134225038132113e-05, | |
| "loss": 0.935, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 2.0667605303296964e-05, | |
| "loss": 0.931, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 1.920098556846181e-05, | |
| "loss": 0.9285, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 1.773436583362666e-05, | |
| "loss": 0.9286, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 1.6267746098791505e-05, | |
| "loss": 0.905, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 1.4801126363956355e-05, | |
| "loss": 0.8564, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 1.3334506629121201e-05, | |
| "loss": 0.8565, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 1.186788689428605e-05, | |
| "loss": 0.8558, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.0401267159450897e-05, | |
| "loss": 0.8552, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 8.934647424615746e-06, | |
| "loss": 0.8532, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 7.468027689780594e-06, | |
| "loss": 0.8504, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 6.001407954945442e-06, | |
| "loss": 0.8485, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 4.53478822011029e-06, | |
| "loss": 0.8468, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 3.068168485275138e-06, | |
| "loss": 0.8472, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 1.601548750439986e-06, | |
| "loss": 0.8467, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 1.34929015604834e-07, | |
| "loss": 0.8469, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 17046, | |
| "total_flos": 1.6212930654633984e+17, | |
| "train_loss": 0.9826037574650801, | |
| "train_runtime": 24274.8201, | |
| "train_samples_per_second": 1.404, | |
| "train_steps_per_second": 0.702 | |
| } | |
| ], | |
| "max_steps": 17046, | |
| "num_train_epochs": 3, | |
| "total_flos": 1.6212930654633984e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |