| { | |
| "best_global_step": 92, | |
| "best_metric": 2.1965677738189697, | |
| "best_model_checkpoint": "distilbert_rand_100_v1_stsb/checkpoint-92", | |
| "epoch": 9.0, | |
| "eval_steps": 500, | |
| "global_step": 207, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 6.343764305114746, | |
| "learning_rate": 4.9e-05, | |
| "loss": 2.9205, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.11939916581055518, | |
| "eval_loss": 2.379629373550415, | |
| "eval_pearson": 0.1290949356872376, | |
| "eval_runtime": 0.6373, | |
| "eval_samples_per_second": 2353.727, | |
| "eval_spearmanr": 0.10970339593387277, | |
| "eval_steps_per_second": 9.415, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.955832481384277, | |
| "learning_rate": 4.8e-05, | |
| "loss": 1.9664, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.15324724394586886, | |
| "eval_loss": 2.4949734210968018, | |
| "eval_pearson": 0.16927024276504848, | |
| "eval_runtime": 0.6727, | |
| "eval_samples_per_second": 2229.939, | |
| "eval_spearmanr": 0.13722424512668924, | |
| "eval_steps_per_second": 8.92, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 5.716901779174805, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.779, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.2028938657115142, | |
| "eval_loss": 2.6714160442352295, | |
| "eval_pearson": 0.2124383098834424, | |
| "eval_runtime": 0.6515, | |
| "eval_samples_per_second": 2302.386, | |
| "eval_spearmanr": 0.19334942153958598, | |
| "eval_steps_per_second": 9.21, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 7.138859748840332, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.4527, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.289487086994792, | |
| "eval_loss": 2.1965677738189697, | |
| "eval_pearson": 0.2912711193822261, | |
| "eval_runtime": 0.6214, | |
| "eval_samples_per_second": 2414.003, | |
| "eval_spearmanr": 0.2877030546073578, | |
| "eval_steps_per_second": 9.656, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 12.057498931884766, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.1079, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.2643918329247542, | |
| "eval_loss": 2.624570608139038, | |
| "eval_pearson": 0.2674049765821388, | |
| "eval_runtime": 0.6383, | |
| "eval_samples_per_second": 2350.14, | |
| "eval_spearmanr": 0.26137868926736957, | |
| "eval_steps_per_second": 9.401, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 15.145489692687988, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.8434, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.2952728722123116, | |
| "eval_loss": 2.547182321548462, | |
| "eval_pearson": 0.2959286720667827, | |
| "eval_runtime": 0.6373, | |
| "eval_samples_per_second": 2353.654, | |
| "eval_spearmanr": 0.2946170723578405, | |
| "eval_steps_per_second": 9.415, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 10.89883041381836, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.6384, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_combined_score": 0.29155335708106955, | |
| "eval_loss": 2.654390335083008, | |
| "eval_pearson": 0.29459081978470353, | |
| "eval_runtime": 0.6166, | |
| "eval_samples_per_second": 2432.754, | |
| "eval_spearmanr": 0.28851589437743563, | |
| "eval_steps_per_second": 9.731, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 8.79697322845459, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.487, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_combined_score": 0.27666436943139244, | |
| "eval_loss": 3.033641815185547, | |
| "eval_pearson": 0.2802457627913858, | |
| "eval_runtime": 0.6568, | |
| "eval_samples_per_second": 2283.715, | |
| "eval_spearmanr": 0.2730829760713991, | |
| "eval_steps_per_second": 9.135, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 12.785646438598633, | |
| "learning_rate": 4.1e-05, | |
| "loss": 0.4161, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_combined_score": 0.2559946450169581, | |
| "eval_loss": 2.926753282546997, | |
| "eval_pearson": 0.25973489057100635, | |
| "eval_runtime": 0.6461, | |
| "eval_samples_per_second": 2321.769, | |
| "eval_spearmanr": 0.25225439946290995, | |
| "eval_steps_per_second": 9.287, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "step": 207, | |
| "total_flos": 3426936721288704.0, | |
| "train_loss": 1.290164403869334, | |
| "train_runtime": 65.418, | |
| "train_samples_per_second": 4394.047, | |
| "train_steps_per_second": 17.579 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 5 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3426936721288704.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |