| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 3, | |
| "global_step": 9000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.7248471372984996e-05, | |
| "loss": 3.6424, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.4469149527515285e-05, | |
| "loss": 3.4737, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.168982768204558e-05, | |
| "loss": 3.4762, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 3.891050583657588e-05, | |
| "loss": 3.3944, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 3.613118399110617e-05, | |
| "loss": 3.3814, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 3.3351862145636464e-05, | |
| "loss": 3.3406, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 3.057254030016676e-05, | |
| "loss": 2.7112, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 2.7793218454697057e-05, | |
| "loss": 2.7357, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 2.501389660922735e-05, | |
| "loss": 2.7325, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 2.2234574763757643e-05, | |
| "loss": 2.6802, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 1.945525291828794e-05, | |
| "loss": 2.7204, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.6675931072818232e-05, | |
| "loss": 2.6908, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 1.3896609227348528e-05, | |
| "loss": 2.259, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 1.1117287381878821e-05, | |
| "loss": 2.2829, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 8.337965536409116e-06, | |
| "loss": 2.2751, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 5.558643690939411e-06, | |
| "loss": 2.2862, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 2.7793218454697053e-06, | |
| "loss": 2.2833, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.2729, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 9000, | |
| "total_flos": 2160942262272000.0, | |
| "train_loss": 2.813265679253472, | |
| "train_runtime": 715.4534, | |
| "train_samples_per_second": 50.318, | |
| "train_steps_per_second": 12.579 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 9000, | |
| "num_train_epochs": 3, | |
| "save_steps": 5000, | |
| "total_flos": 2160942262272000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |