| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 27.020833333333332, | |
| "global_step": 700, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 4.9999999999999996e-06, | |
| "loss": 10.9507, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00025, | |
| "loss": 8.5124, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 0.0005, | |
| "loss": 7.4735, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "learning_rate": 0.0005833333333333333, | |
| "loss": 7.3737, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 7.02, | |
| "learning_rate": 0.0005555555555555556, | |
| "loss": 7.1503, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 9.02, | |
| "learning_rate": 0.0005277777777777777, | |
| "loss": 6.9697, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 11.02, | |
| "learning_rate": 0.0005, | |
| "loss": 6.6856, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 13.02, | |
| "learning_rate": 0.00047222222222222224, | |
| "loss": 6.3809, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 15.02, | |
| "learning_rate": 0.00044444444444444436, | |
| "loss": 6.0381, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 17.02, | |
| "learning_rate": 0.00041666666666666664, | |
| "loss": 5.6691, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 19.02, | |
| "learning_rate": 0.00038888888888888887, | |
| "loss": 5.3832, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 21.02, | |
| "learning_rate": 0.0003611111111111111, | |
| "loss": 5.1423, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 23.02, | |
| "learning_rate": 0.0003333333333333333, | |
| "loss": 4.9255, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 25.02, | |
| "learning_rate": 0.00030555555555555555, | |
| "loss": 4.7399, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 27.02, | |
| "learning_rate": 0.0002777777777777778, | |
| "loss": 4.582, | |
| "step": 700 | |
| } | |
| ], | |
| "max_steps": 1200, | |
| "num_train_epochs": 9223372036854775807, | |
| "total_flos": 1.8997498478592e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |