| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "global_step": 11250, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.7777777777777784e-05, | |
| "loss": 0.7338, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.555555555555556e-05, | |
| "loss": 0.1426, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.3333333333333334e-05, | |
| "loss": 0.0418, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.111111111111111e-05, | |
| "loss": 0.0146, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.0263, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 3.6666666666666666e-05, | |
| "loss": 0.0448, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 3.444444444444445e-05, | |
| "loss": 0.0153, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 3.222222222222223e-05, | |
| "loss": 0.0115, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0194, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 2.777777777777778e-05, | |
| "loss": 0.0112, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 2.5555555555555554e-05, | |
| "loss": 0.0423, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 2.3333333333333336e-05, | |
| "loss": 0.0059, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 2.111111111111111e-05, | |
| "loss": 0.0053, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.888888888888889e-05, | |
| "loss": 0.0029, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 0.0145, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 1.4444444444444444e-05, | |
| "loss": 0.0017, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.2222222222222222e-05, | |
| "loss": 0.0021, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0042, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 7.777777777777777e-06, | |
| "loss": 0.0049, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 5.555555555555556e-06, | |
| "loss": 0.002, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.0058, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.1111111111111112e-06, | |
| "loss": 0.0035, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 11250, | |
| "total_flos": 5.753298461184e+16, | |
| "train_runtime": 4566.3645, | |
| "train_samples_per_second": 2.464 | |
| } | |
| ], | |
| "max_steps": 11250, | |
| "num_train_epochs": 1, | |
| "total_flos": 5.753298461184e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |