| { | |
| "best_global_step": 92, | |
| "best_metric": 2.2715282440185547, | |
| "best_model_checkpoint": "bert_base_rand_50_v1_stsb/checkpoint-92", | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 207, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 17.77509880065918, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.0174, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.07236667363525802, | |
| "eval_loss": 2.9029922485351562, | |
| "eval_pearson": 0.07728793470744158, | |
| "eval_runtime": 0.9732, | |
| "eval_samples_per_second": 1541.241, | |
| "eval_spearmanr": 0.06744541256307446, | |
| "eval_steps_per_second": 6.165, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 5.289533615112305, | |
| "learning_rate": 4.8e-05, | |
| "loss": 2.0319, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.1592761113745453, | |
| "eval_loss": 2.4340341091156006, | |
| "eval_pearson": 0.16903026931415796, | |
| "eval_runtime": 1.0056, | |
| "eval_samples_per_second": 1491.579, | |
| "eval_spearmanr": 0.14952195343493266, | |
| "eval_steps_per_second": 5.966, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 5.465396881103516, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.7905, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.19761658998050122, | |
| "eval_loss": 2.388899803161621, | |
| "eval_pearson": 0.20339284454988746, | |
| "eval_runtime": 0.9719, | |
| "eval_samples_per_second": 1543.33, | |
| "eval_spearmanr": 0.19184033541111498, | |
| "eval_steps_per_second": 6.173, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 6.473479747772217, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.467, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.26176388630421377, | |
| "eval_loss": 2.2715282440185547, | |
| "eval_pearson": 0.26353199498352486, | |
| "eval_runtime": 0.9866, | |
| "eval_samples_per_second": 1520.444, | |
| "eval_spearmanr": 0.25999577762490267, | |
| "eval_steps_per_second": 6.082, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 7.355830192565918, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.1681, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.24187656996270634, | |
| "eval_loss": 2.427948474884033, | |
| "eval_pearson": 0.24357506085370995, | |
| "eval_runtime": 0.9515, | |
| "eval_samples_per_second": 1576.521, | |
| "eval_spearmanr": 0.24017807907170277, | |
| "eval_steps_per_second": 6.306, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 7.450218677520752, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.0229, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.26963317556367566, | |
| "eval_loss": 2.867931604385376, | |
| "eval_pearson": 0.26693349119077764, | |
| "eval_runtime": 0.9987, | |
| "eval_samples_per_second": 1501.92, | |
| "eval_spearmanr": 0.2723328599365737, | |
| "eval_steps_per_second": 6.008, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 12.323619842529297, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.7645, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.2729713941605332, | |
| "eval_loss": 2.5479800701141357, | |
| "eval_pearson": 0.27251768491766837, | |
| "eval_runtime": 0.9634, | |
| "eval_samples_per_second": 1556.91, | |
| "eval_spearmanr": 0.27342510340339793, | |
| "eval_steps_per_second": 6.228, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 15.784290313720703, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.6161, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.2803685390449176, | |
| "eval_loss": 2.821274995803833, | |
| "eval_pearson": 0.27530356747170776, | |
| "eval_runtime": 0.9938, | |
| "eval_samples_per_second": 1509.373, | |
| "eval_spearmanr": 0.28543351061812744, | |
| "eval_steps_per_second": 6.037, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 14.405195236206055, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.4918, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.262987742121648, | |
| "eval_loss": 2.540947914123535, | |
| "eval_pearson": 0.26204122019352555, | |
| "eval_runtime": 0.9769, | |
| "eval_samples_per_second": 1535.44, | |
| "eval_spearmanr": 0.26393426404977044, | |
| "eval_steps_per_second": 6.142, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 207, | |
| "total_flos": 6806753442049536.0, | |
| "train_loss": 1.374455221609217, | |
| "train_runtime": 112.5711, | |
| "train_samples_per_second": 2553.497, | |
| "train_steps_per_second": 10.216 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6806753442049536.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |