| { | |
| "best_metric": 2.2016942501068115, | |
| "best_model_checkpoint": "tiny_bert_km_100_v1_stsb/checkpoint-23", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 138, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 2.6959726810455322, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.7897, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.18695206741134152, | |
| "eval_loss": 2.2016942501068115, | |
| "eval_pearson": 0.19832934084442527, | |
| "eval_runtime": 0.5834, | |
| "eval_samples_per_second": 2570.957, | |
| "eval_spearmanr": 0.17557479397825773, | |
| "eval_steps_per_second": 10.284, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 8.435463905334473, | |
| "learning_rate": 4.8e-05, | |
| "loss": 2.1293, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.195336433686907, | |
| "eval_loss": 2.2599353790283203, | |
| "eval_pearson": 0.20618687628879384, | |
| "eval_runtime": 0.5695, | |
| "eval_samples_per_second": 2633.833, | |
| "eval_spearmanr": 0.18448599108502017, | |
| "eval_steps_per_second": 10.535, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.052290439605713, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.9869, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.17145564110767253, | |
| "eval_loss": 2.3073794841766357, | |
| "eval_pearson": 0.1839128375346377, | |
| "eval_runtime": 0.6249, | |
| "eval_samples_per_second": 2400.342, | |
| "eval_spearmanr": 0.15899844468070734, | |
| "eval_steps_per_second": 9.601, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 7.113165378570557, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.8494, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.20344559502937293, | |
| "eval_loss": 2.3755438327789307, | |
| "eval_pearson": 0.21031045738602444, | |
| "eval_runtime": 0.574, | |
| "eval_samples_per_second": 2613.391, | |
| "eval_spearmanr": 0.19658073267272141, | |
| "eval_steps_per_second": 10.454, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 6.30102014541626, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.62, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.25598026887111425, | |
| "eval_loss": 2.345820903778076, | |
| "eval_pearson": 0.25888621802633294, | |
| "eval_runtime": 0.6092, | |
| "eval_samples_per_second": 2462.335, | |
| "eval_spearmanr": 0.2530743197158956, | |
| "eval_steps_per_second": 9.849, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 10.555892944335938, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.4033, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.2698149868649785, | |
| "eval_loss": 2.323427438735962, | |
| "eval_pearson": 0.2691045935746744, | |
| "eval_runtime": 0.5851, | |
| "eval_samples_per_second": 2563.76, | |
| "eval_spearmanr": 0.27052538015528255, | |
| "eval_steps_per_second": 10.255, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 138, | |
| "total_flos": 904528923227136.0, | |
| "train_loss": 2.1297607421875, | |
| "train_runtime": 32.0911, | |
| "train_samples_per_second": 8957.301, | |
| "train_steps_per_second": 35.835 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 904528923227136.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |