| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 22.0, | |
| "global_step": 215380, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.9500000000000004e-05, | |
| "loss": 5.4101, | |
| "step": 3896 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.7774, | |
| "step": 7792 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 4.85e-05, | |
| "loss": 3.4166, | |
| "step": 11688 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 4.8e-05, | |
| "loss": 3.2281, | |
| "step": 15584 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 4.75e-05, | |
| "loss": 3.105, | |
| "step": 19480 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 4.7e-05, | |
| "loss": 3.0041, | |
| "step": 23376 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 4.6500000000000005e-05, | |
| "loss": 2.9418, | |
| "step": 27272 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 2.8758, | |
| "step": 31168 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 4.55e-05, | |
| "loss": 2.8427, | |
| "step": 35064 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 4.5e-05, | |
| "loss": 2.791, | |
| "step": 38960 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 2.7628, | |
| "step": 42856 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 2.7277, | |
| "step": 46752 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 4.35e-05, | |
| "loss": 2.7199, | |
| "step": 50648 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 4.3e-05, | |
| "loss": 2.6665, | |
| "step": 54544 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 4.25e-05, | |
| "loss": 2.6518, | |
| "step": 58440 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 4.2e-05, | |
| "loss": 2.6451, | |
| "step": 62336 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 4.15e-05, | |
| "loss": 2.6108, | |
| "step": 66232 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 4.1e-05, | |
| "loss": 2.5972, | |
| "step": 70128 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 4.05e-05, | |
| "loss": 2.5859, | |
| "step": 74024 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 2.9427, | |
| "step": 78320 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 4.55e-05, | |
| "loss": 2.9183, | |
| "step": 88110 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 4.5e-05, | |
| "loss": 2.8809, | |
| "step": 97900 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "learning_rate": 4.4500000000000004e-05, | |
| "loss": 2.8521, | |
| "step": 107690 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 2.8272, | |
| "step": 117480 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "learning_rate": 4.35e-05, | |
| "loss": 2.8104, | |
| "step": 127270 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "learning_rate": 4.3e-05, | |
| "loss": 2.7856, | |
| "step": 137060 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 4.25e-05, | |
| "loss": 2.7625, | |
| "step": 146850 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "learning_rate": 4.2e-05, | |
| "loss": 2.7459, | |
| "step": 156640 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "learning_rate": 4.15e-05, | |
| "loss": 2.734, | |
| "step": 166430 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "learning_rate": 4.1e-05, | |
| "loss": 2.7209, | |
| "step": 176220 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "learning_rate": 4.05e-05, | |
| "loss": 2.7129, | |
| "step": 186010 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 4e-05, | |
| "loss": 2.6932, | |
| "step": 195800 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "learning_rate": 3.9500000000000005e-05, | |
| "loss": 2.6885, | |
| "step": 205590 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "learning_rate": 3.9000000000000006e-05, | |
| "loss": 2.679, | |
| "step": 215380 | |
| } | |
| ], | |
| "max_steps": 979000, | |
| "num_train_epochs": 100, | |
| "total_flos": 5.872346596487864e+23, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |