| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 8304, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.8193641618497108e-05, | |
| "loss": 2.0414, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.6387283236994222e-05, | |
| "loss": 1.3717, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.458092485549133e-05, | |
| "loss": 1.2339, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 2.277456647398844e-05, | |
| "loss": 1.1651, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 2.0968208092485548e-05, | |
| "loss": 1.1147, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.9161849710982662e-05, | |
| "loss": 1.0155, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.735549132947977e-05, | |
| "loss": 0.9151, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 1.5549132947976876e-05, | |
| "loss": 0.9212, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.3742774566473989e-05, | |
| "loss": 0.8868, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.19364161849711e-05, | |
| "loss": 0.8968, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 1.0130057803468209e-05, | |
| "loss": 0.8776, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 8.323699421965318e-06, | |
| "loss": 0.747, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 6.517341040462428e-06, | |
| "loss": 0.7403, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 4.710982658959537e-06, | |
| "loss": 0.741, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.9046242774566473e-06, | |
| "loss": 0.7416, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 1.0982658959537572e-06, | |
| "loss": 0.7207, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 8304, | |
| "total_flos": 2.6036288468176896e+16, | |
| "train_loss": 0.9979496314576136, | |
| "train_runtime": 2629.392, | |
| "train_samples_per_second": 101.051, | |
| "train_steps_per_second": 3.158 | |
| } | |
| ], | |
| "max_steps": 8304, | |
| "num_train_epochs": 3, | |
| "total_flos": 2.6036288468176896e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |