| { | |
| "best_global_step": 115, | |
| "best_metric": 2.282820701599121, | |
| "best_model_checkpoint": "tiny_bert_rand_50_v2_stsb/checkpoint-115", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 230, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 4.757383823394775, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.577, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.11416083891115508, | |
| "eval_loss": 2.3197340965270996, | |
| "eval_pearson": 0.12061995262322296, | |
| "eval_runtime": 0.4408, | |
| "eval_samples_per_second": 3403.103, | |
| "eval_spearmanr": 0.10770172519908719, | |
| "eval_steps_per_second": 13.612, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 10.133877754211426, | |
| "learning_rate": 4.8e-05, | |
| "loss": 2.0557, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.12699515116533755, | |
| "eval_loss": 2.4031293392181396, | |
| "eval_pearson": 0.12910611175823888, | |
| "eval_runtime": 0.5154, | |
| "eval_samples_per_second": 2910.631, | |
| "eval_spearmanr": 0.1248841905724362, | |
| "eval_steps_per_second": 11.643, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 8.422947883605957, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.8854, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.2013417461648624, | |
| "eval_loss": 2.3712551593780518, | |
| "eval_pearson": 0.203885354732845, | |
| "eval_runtime": 0.4923, | |
| "eval_samples_per_second": 3047.172, | |
| "eval_spearmanr": 0.19879813759687975, | |
| "eval_steps_per_second": 12.189, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 11.295201301574707, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.7118, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.2468785884457752, | |
| "eval_loss": 2.3257670402526855, | |
| "eval_pearson": 0.24741486493265863, | |
| "eval_runtime": 0.4527, | |
| "eval_samples_per_second": 3313.331, | |
| "eval_spearmanr": 0.24634231195889175, | |
| "eval_steps_per_second": 13.253, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 6.806567668914795, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.4486, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.26071507127769444, | |
| "eval_loss": 2.282820701599121, | |
| "eval_pearson": 0.263404431762189, | |
| "eval_runtime": 0.4792, | |
| "eval_samples_per_second": 3130.148, | |
| "eval_spearmanr": 0.25802571079319986, | |
| "eval_steps_per_second": 12.521, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 8.495306968688965, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.2898, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.26830817042733024, | |
| "eval_loss": 2.7079834938049316, | |
| "eval_pearson": 0.2622377772014732, | |
| "eval_runtime": 0.4751, | |
| "eval_samples_per_second": 3157.388, | |
| "eval_spearmanr": 0.27437856365318725, | |
| "eval_steps_per_second": 12.63, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 13.502508163452148, | |
| "learning_rate": 4.3e-05, | |
| "loss": 1.0578, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.28573879609613473, | |
| "eval_loss": 2.650726318359375, | |
| "eval_pearson": 0.28150603591850215, | |
| "eval_runtime": 0.4542, | |
| "eval_samples_per_second": 3302.865, | |
| "eval_spearmanr": 0.2899715562737673, | |
| "eval_steps_per_second": 13.211, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 7.592569351196289, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.8953, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.2609240583203716, | |
| "eval_loss": 2.863307237625122, | |
| "eval_pearson": 0.25851307223546727, | |
| "eval_runtime": 0.5148, | |
| "eval_samples_per_second": 2913.731, | |
| "eval_spearmanr": 0.2633350444052759, | |
| "eval_steps_per_second": 11.655, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 29.740528106689453, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.7584, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.2446564587157261, | |
| "eval_loss": 3.175959587097168, | |
| "eval_pearson": 0.24206257082189755, | |
| "eval_runtime": 0.4631, | |
| "eval_samples_per_second": 3238.968, | |
| "eval_spearmanr": 0.24725034660955467, | |
| "eval_steps_per_second": 12.956, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 10.117051124572754, | |
| "learning_rate": 4e-05, | |
| "loss": 0.6589, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_combined_score": 0.26549244920567694, | |
| "eval_loss": 3.0019023418426514, | |
| "eval_pearson": 0.26127781004194356, | |
| "eval_runtime": 0.4522, | |
| "eval_samples_per_second": 3316.9, | |
| "eval_spearmanr": 0.2697070883694104, | |
| "eval_steps_per_second": 13.268, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 230, | |
| "total_flos": 1507548205378560.0, | |
| "train_loss": 1.5338545426078465, | |
| "train_runtime": 43.6565, | |
| "train_samples_per_second": 6584.362, | |
| "train_steps_per_second": 26.342 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1507548205378560.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |