| { | |
| "best_metric": 0.6281057228463983, | |
| "best_model_checkpoint": "results/MELD_IEMOCAP/roberta-large/SEEDS/2022-03-14-19-06-40-speaker_mode-None-num_past_utterances-0-num_future_utterances-0-batch_size-16-seed-42/checkpoint-7086", | |
| "epoch": 6.0, | |
| "global_step": 7086, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.1135900346584852e-06, | |
| "loss": 1.6941, | |
| "step": 1181 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1_macro": 0.07791319292694036, | |
| "eval_f1_micro": 0.35635123614663256, | |
| "eval_f1_weighted": 0.19072939518738496, | |
| "eval_loss": 1.5153617858886719, | |
| "eval_runtime": 3.1794, | |
| "eval_samples_per_second": 737.872, | |
| "eval_steps_per_second": 23.275, | |
| "step": 1181 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 2.2338198736122084e-06, | |
| "loss": 1.298, | |
| "step": 2362 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1_macro": 0.39258179738738447, | |
| "eval_f1_micro": 0.5882352941176471, | |
| "eval_f1_weighted": 0.570696903628535, | |
| "eval_loss": 1.1774364709854126, | |
| "eval_runtime": 3.1793, | |
| "eval_samples_per_second": 737.903, | |
| "eval_steps_per_second": 23.276, | |
| "step": 2362 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 3.3540497125659314e-06, | |
| "loss": 1.0976, | |
| "step": 3543 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_f1_macro": 0.4195774350293237, | |
| "eval_f1_micro": 0.6052855924978687, | |
| "eval_f1_weighted": 0.5932857871930991, | |
| "eval_loss": 1.0991432666778564, | |
| "eval_runtime": 3.1785, | |
| "eval_samples_per_second": 738.086, | |
| "eval_steps_per_second": 23.281, | |
| "step": 3543 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 3.0822920081965484e-06, | |
| "loss": 0.9869, | |
| "step": 4724 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_f1_macro": 0.4634417760566037, | |
| "eval_f1_micro": 0.6287297527706734, | |
| "eval_f1_weighted": 0.6211850773676282, | |
| "eval_loss": 1.0642313957214355, | |
| "eval_runtime": 3.1809, | |
| "eval_samples_per_second": 737.537, | |
| "eval_steps_per_second": 23.264, | |
| "step": 4724 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 2.8022345484581173e-06, | |
| "loss": 0.8638, | |
| "step": 5905 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_f1_macro": 0.4789393979707725, | |
| "eval_f1_micro": 0.6172208013640239, | |
| "eval_f1_weighted": 0.6157912564989868, | |
| "eval_loss": 1.0899823904037476, | |
| "eval_runtime": 3.182, | |
| "eval_samples_per_second": 737.269, | |
| "eval_steps_per_second": 23.256, | |
| "step": 5905 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "learning_rate": 2.522414224587374e-06, | |
| "loss": 0.772, | |
| "step": 7086 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_f1_macro": 0.4957254043928597, | |
| "eval_f1_micro": 0.6295822676896846, | |
| "eval_f1_weighted": 0.6281057228463983, | |
| "eval_loss": 1.120431661605835, | |
| "eval_runtime": 3.1786, | |
| "eval_samples_per_second": 738.072, | |
| "eval_steps_per_second": 23.281, | |
| "step": 7086 | |
| } | |
| ], | |
| "max_steps": 17715, | |
| "num_train_epochs": 15, | |
| "total_flos": 8224038236774502.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |