| { | |
| "best_metric": 0.4291906813901389, | |
| "best_model_checkpoint": "./output/relation_extraction_roberta_base/checkpoint-963", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 1070, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.5e-05, | |
| "loss": 2.8498, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.053951570618237285, | |
| "eval_loss": 2.6697919368743896, | |
| "eval_runtime": 2.5903, | |
| "eval_samples_per_second": 463.265, | |
| "eval_steps_per_second": 7.335, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 4e-05, | |
| "loss": 2.4514, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.15570882245647932, | |
| "eval_loss": 2.2383241653442383, | |
| "eval_runtime": 2.5762, | |
| "eval_samples_per_second": 465.804, | |
| "eval_steps_per_second": 7.375, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 3.5e-05, | |
| "loss": 2.0199, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1": 0.2755954145408674, | |
| "eval_loss": 1.9595829248428345, | |
| "eval_runtime": 2.5961, | |
| "eval_samples_per_second": 462.236, | |
| "eval_steps_per_second": 7.319, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 3e-05, | |
| "loss": 1.6452, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1": 0.3250642674784511, | |
| "eval_loss": 1.9245597124099731, | |
| "eval_runtime": 2.6965, | |
| "eval_samples_per_second": 445.017, | |
| "eval_steps_per_second": 7.046, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.2583, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1": 0.37965408693840236, | |
| "eval_loss": 1.9094653129577637, | |
| "eval_runtime": 2.7978, | |
| "eval_samples_per_second": 428.907, | |
| "eval_steps_per_second": 6.791, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 2e-05, | |
| "loss": 0.9493, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_f1": 0.3963663601536282, | |
| "eval_loss": 2.2247843742370605, | |
| "eval_runtime": 2.7577, | |
| "eval_samples_per_second": 435.149, | |
| "eval_steps_per_second": 6.89, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "learning_rate": 1.5e-05, | |
| "loss": 0.6915, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_f1": 0.40079193118108436, | |
| "eval_loss": 2.1882429122924805, | |
| "eval_runtime": 2.7475, | |
| "eval_samples_per_second": 436.766, | |
| "eval_steps_per_second": 6.915, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "learning_rate": 1e-05, | |
| "loss": 0.4993, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_f1": 0.4258100227415372, | |
| "eval_loss": 2.3671979904174805, | |
| "eval_runtime": 2.5859, | |
| "eval_samples_per_second": 464.054, | |
| "eval_steps_per_second": 7.348, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "learning_rate": 5e-06, | |
| "loss": 0.354, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_f1": 0.4291906813901389, | |
| "eval_loss": 2.5120413303375244, | |
| "eval_runtime": 2.7759, | |
| "eval_samples_per_second": 432.299, | |
| "eval_steps_per_second": 6.845, | |
| "step": 963 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.262, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_f1": 0.42896169266509715, | |
| "eval_loss": 2.5574376583099365, | |
| "eval_runtime": 2.8234, | |
| "eval_samples_per_second": 425.014, | |
| "eval_steps_per_second": 6.729, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 1070, | |
| "total_flos": 4443650073600000.0, | |
| "train_loss": 1.298073563620309, | |
| "train_runtime": 598.9546, | |
| "train_samples_per_second": 113.531, | |
| "train_steps_per_second": 1.786 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 1070, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 4443650073600000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |