| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9549795361527967, | |
| "global_step": 7000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.795361527967258e-05, | |
| "loss": 3.4231, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.5907230559345156e-05, | |
| "loss": 3.423, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_stsb_spearman": 0.4829721958511661, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.386084583901774e-05, | |
| "loss": 3.4229, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.1814461118690315e-05, | |
| "loss": 3.4229, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_stsb_spearman": 0.5840087248870882, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.9768076398362894e-05, | |
| "loss": 3.4229, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 1.772169167803547e-05, | |
| "loss": 3.4229, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_stsb_spearman": 0.6250461828473922, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.567530695770805e-05, | |
| "loss": 3.4229, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 1.3628922237380627e-05, | |
| "loss": 3.4229, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_stsb_spearman": 0.656661075878988, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 1.1582537517053206e-05, | |
| "loss": 3.4229, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.536152796725784e-06, | |
| "loss": 3.4228, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_stsb_spearman": 0.6699952089868008, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 7.489768076398363e-06, | |
| "loss": 3.4229, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 5.443383356070941e-06, | |
| "loss": 3.4228, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "eval_stsb_spearman": 0.6815928788452451, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 3.39699863574352e-06, | |
| "loss": 3.4228, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.3506139154160984e-06, | |
| "loss": 3.4229, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_stsb_spearman": 0.6869918326406103, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.0, | |
| "loss": 0.0002, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_stsb_spearman": 0.6832701515366976, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 7330, | |
| "total_flos": 0, | |
| "train_runtime": 3008.4826, | |
| "train_samples_per_second": 2.436 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.795361527967258e-05, | |
| "loss": 0.0002, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.5907230559345156e-05, | |
| "loss": 0.0002, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_stsb_spearman": 0.6647397049127042, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.386084583901774e-05, | |
| "loss": 0.0002, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.1814461118690315e-05, | |
| "loss": 0.0002, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_stsb_spearman": 0.6705695143163508, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.9768076398362894e-05, | |
| "loss": 0.0002, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 1.772169167803547e-05, | |
| "loss": 0.0002, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_stsb_spearman": 0.6834165899504511, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.567530695770805e-05, | |
| "loss": 0.0002, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 1.3628922237380627e-05, | |
| "loss": 0.0002, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_stsb_spearman": 0.6889523167904467, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 1.1582537517053206e-05, | |
| "loss": 0.0002, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.536152796725784e-06, | |
| "loss": 0.0002, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_stsb_spearman": 0.6907657748709582, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 7.489768076398363e-06, | |
| "loss": 0.0002, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 5.443383356070941e-06, | |
| "loss": 0.0002, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "eval_stsb_spearman": 0.6844441359759208, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 3.39699863574352e-06, | |
| "loss": 0.0002, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.3506139154160984e-06, | |
| "loss": 0.0002, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_stsb_spearman": 0.6838872101024212, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 7330, | |
| "total_flos": 0, | |
| "train_runtime": 3006.5028, | |
| "train_samples_per_second": 2.438 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 2.795361527967258e-05, | |
| "loss": 3.4229, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 2.5907230559345156e-05, | |
| "loss": 3.4229, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "eval_stsb_spearman": 0.6971006163293475, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.386084583901774e-05, | |
| "loss": 3.4228, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.1814461118690315e-05, | |
| "loss": 3.4229, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "eval_stsb_spearman": 0.7098151136325446, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.9768076398362894e-05, | |
| "loss": 3.4229, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 1.772169167803547e-05, | |
| "loss": 3.4228, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "eval_stsb_spearman": 0.7175803233471086, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.567530695770805e-05, | |
| "loss": 3.4228, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 1.3628922237380627e-05, | |
| "loss": 3.4229, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_stsb_spearman": 0.7188510448496929, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 1.1582537517053206e-05, | |
| "loss": 3.4229, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 9.536152796725784e-06, | |
| "loss": 3.4228, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_stsb_spearman": 0.71954530730918, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 7.489768076398363e-06, | |
| "loss": 3.4229, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 5.443383356070941e-06, | |
| "loss": 3.4228, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "eval_stsb_spearman": 0.715340103967707, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 3.39699863574352e-06, | |
| "loss": 3.4228, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.3506139154160984e-06, | |
| "loss": 3.4229, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_stsb_spearman": 0.7149024415170762, | |
| "step": 7000 | |
| } | |
| ], | |
| "max_steps": 7330, | |
| "num_train_epochs": 1, | |
| "total_flos": 0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |