| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "global_step": 618, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00013225806451612905, | |
| "loss": 1.6666, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00019280575539568347, | |
| "loss": 1.3369, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00017805755395683455, | |
| "loss": 1.2132, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.00016330935251798563, | |
| "loss": 1.18, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0001485611510791367, | |
| "loss": 1.1685, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00013381294964028776, | |
| "loss": 1.1396, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00011906474820143884, | |
| "loss": 1.1346, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 0.00010431654676258992, | |
| "loss": 1.112, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 8.9568345323741e-05, | |
| "loss": 1.1125, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 7.48201438848921e-05, | |
| "loss": 1.0992, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 6.007194244604317e-05, | |
| "loss": 1.0891, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 4.532374100719425e-05, | |
| "loss": 1.082, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 3.0575539568345324e-05, | |
| "loss": 1.0769, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 1.5827338129496403e-05, | |
| "loss": 1.0697, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 1.0791366906474822e-06, | |
| "loss": 1.0784, | |
| "step": 615 | |
| } | |
| ], | |
| "max_steps": 618, | |
| "num_train_epochs": 3, | |
| "total_flos": 5.117633148380774e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |