| { | |
| "best_metric": 0.8615721366809934, | |
| "best_model_checkpoint": "./output/relation_extraction_bert_base_uncased/checkpoint-963", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 1070, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.2511, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.7907449232122132, | |
| "eval_loss": 0.6098188161849976, | |
| "eval_runtime": 2.957, | |
| "eval_samples_per_second": 405.813, | |
| "eval_steps_per_second": 6.425, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 4e-05, | |
| "loss": 0.437, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.8452978511771352, | |
| "eval_loss": 0.5166792869567871, | |
| "eval_runtime": 2.8484, | |
| "eval_samples_per_second": 421.296, | |
| "eval_steps_per_second": 6.671, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 3.5e-05, | |
| "loss": 0.1921, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.8552983432267793, | |
| "eval_loss": 0.5445510745048523, | |
| "eval_runtime": 2.9775, | |
| "eval_samples_per_second": 403.017, | |
| "eval_steps_per_second": 6.381, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 3e-05, | |
| "loss": 0.0795, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 0.8526607872188615, | |
| "eval_loss": 0.7041279077529907, | |
| "eval_runtime": 2.8817, | |
| "eval_samples_per_second": 416.422, | |
| "eval_steps_per_second": 6.593, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0334, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1": 0.8567909830709438, | |
| "eval_loss": 0.7774203419685364, | |
| "eval_runtime": 2.9813, | |
| "eval_samples_per_second": 402.511, | |
| "eval_steps_per_second": 6.373, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0124, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_f1": 0.8490240342835185, | |
| "eval_loss": 0.808628261089325, | |
| "eval_runtime": 2.6913, | |
| "eval_samples_per_second": 445.878, | |
| "eval_steps_per_second": 7.06, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.0056, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_f1": 0.860117487560933, | |
| "eval_loss": 0.8614501953125, | |
| "eval_runtime": 2.7072, | |
| "eval_samples_per_second": 443.256, | |
| "eval_steps_per_second": 7.018, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 1e-05, | |
| "loss": 0.003, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_f1": 0.8613618380674043, | |
| "eval_loss": 0.9245680570602417, | |
| "eval_runtime": 2.7083, | |
| "eval_samples_per_second": 443.078, | |
| "eval_steps_per_second": 7.015, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 5e-06, | |
| "loss": 0.0014, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_f1": 0.8615721366809934, | |
| "eval_loss": 0.9300327301025391, | |
| "eval_runtime": 2.9509, | |
| "eval_samples_per_second": 406.653, | |
| "eval_steps_per_second": 6.439, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.0011, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_f1": 0.8613345463465772, | |
| "eval_loss": 0.9421382546424866, | |
| "eval_runtime": 2.9527, | |
| "eval_samples_per_second": 406.409, | |
| "eval_steps_per_second": 6.435, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 1070, | |
| "total_flos": 4443650073600000.0, | |
| "train_loss": 0.2016566672197012, | |
| "train_runtime": 574.5493, | |
| "train_samples_per_second": 118.354, | |
| "train_steps_per_second": 1.862 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 1070, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 4443650073600000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |