| { | |
| "best_global_step": 92, | |
| "best_metric": 2.248051643371582, | |
| "best_model_checkpoint": "distilbert_rand_50_v1_stsb/checkpoint-92", | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 207, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.3921196460723877, | |
| "learning_rate": 4.9e-05, | |
| "loss": 2.9399, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.11153304402568132, | |
| "eval_loss": 2.4181017875671387, | |
| "eval_pearson": 0.1210825020444981, | |
| "eval_runtime": 0.6182, | |
| "eval_samples_per_second": 2426.376, | |
| "eval_spearmanr": 0.10198358600686455, | |
| "eval_steps_per_second": 9.706, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 13.365636825561523, | |
| "learning_rate": 4.8e-05, | |
| "loss": 1.9354, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.17215708664479992, | |
| "eval_loss": 2.312326192855835, | |
| "eval_pearson": 0.18364645975727972, | |
| "eval_runtime": 0.6339, | |
| "eval_samples_per_second": 2366.459, | |
| "eval_spearmanr": 0.16066771353232012, | |
| "eval_steps_per_second": 9.466, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 8.605367660522461, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.6144, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.24239520408024304, | |
| "eval_loss": 2.4430179595947266, | |
| "eval_pearson": 0.24859132705429451, | |
| "eval_runtime": 0.6959, | |
| "eval_samples_per_second": 2155.538, | |
| "eval_spearmanr": 0.23619908110619156, | |
| "eval_steps_per_second": 8.622, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 19.84820556640625, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.2906, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.3008819475996536, | |
| "eval_loss": 2.248051643371582, | |
| "eval_pearson": 0.3047020937532627, | |
| "eval_runtime": 0.6266, | |
| "eval_samples_per_second": 2393.751, | |
| "eval_spearmanr": 0.29706180144604444, | |
| "eval_steps_per_second": 9.575, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 9.585806846618652, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.9567, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.27995651397256227, | |
| "eval_loss": 2.52705979347229, | |
| "eval_pearson": 0.28298136160790793, | |
| "eval_runtime": 0.6929, | |
| "eval_samples_per_second": 2164.784, | |
| "eval_spearmanr": 0.27693166633721666, | |
| "eval_steps_per_second": 8.659, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 13.289966583251953, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.7014, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.32092489181758765, | |
| "eval_loss": 2.40600323677063, | |
| "eval_pearson": 0.3236777615592652, | |
| "eval_runtime": 0.6305, | |
| "eval_samples_per_second": 2378.921, | |
| "eval_spearmanr": 0.31817202207591005, | |
| "eval_steps_per_second": 9.516, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 13.387773513793945, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.5733, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.30272644237638974, | |
| "eval_loss": 2.605097770690918, | |
| "eval_pearson": 0.30481118731052326, | |
| "eval_runtime": 0.6164, | |
| "eval_samples_per_second": 2433.601, | |
| "eval_spearmanr": 0.3006416974422562, | |
| "eval_steps_per_second": 9.734, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 18.156740188598633, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.4539, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.3360242041865352, | |
| "eval_loss": 2.3850810527801514, | |
| "eval_pearson": 0.3375362511660661, | |
| "eval_runtime": 0.6438, | |
| "eval_samples_per_second": 2330.081, | |
| "eval_spearmanr": 0.3345121572070044, | |
| "eval_steps_per_second": 9.32, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 15.273696899414062, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.395, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.2926284624191829, | |
| "eval_loss": 2.572735071182251, | |
| "eval_pearson": 0.296678145405776, | |
| "eval_runtime": 0.6284, | |
| "eval_samples_per_second": 2386.845, | |
| "eval_spearmanr": 0.2885787794325898, | |
| "eval_steps_per_second": 9.547, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 207, | |
| "total_flos": 3426936721288704.0, | |
| "train_loss": 1.2067194998552258, | |
| "train_runtime": 65.2325, | |
| "train_samples_per_second": 4406.548, | |
| "train_steps_per_second": 17.629 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3426936721288704.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |