| { | |
| "best_metric": 2.227576732635498, | |
| "best_model_checkpoint": "tiny_bert_km_50_v1_stsb/checkpoint-23", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 138, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 4.95888090133667, | |
| "learning_rate": 4.9e-05, | |
| "loss": 3.8515, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_combined_score": 0.18734264198444261, | |
| "eval_loss": 2.227576732635498, | |
| "eval_pearson": 0.1923505309545669, | |
| "eval_runtime": 0.5509, | |
| "eval_samples_per_second": 2722.84, | |
| "eval_spearmanr": 0.18233475301431837, | |
| "eval_steps_per_second": 10.891, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 8.362377166748047, | |
| "learning_rate": 4.8e-05, | |
| "loss": 2.1315, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_combined_score": 0.2089354886210551, | |
| "eval_loss": 2.2513928413391113, | |
| "eval_pearson": 0.21651479454360775, | |
| "eval_runtime": 0.5991, | |
| "eval_samples_per_second": 2503.728, | |
| "eval_spearmanr": 0.20135618269850244, | |
| "eval_steps_per_second": 10.015, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.1979072093963623, | |
| "learning_rate": 4.7e-05, | |
| "loss": 1.9864, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_combined_score": 0.19251859064923532, | |
| "eval_loss": 2.2710776329040527, | |
| "eval_pearson": 0.1966087155320278, | |
| "eval_runtime": 0.6964, | |
| "eval_samples_per_second": 2154.04, | |
| "eval_spearmanr": 0.18842846576644287, | |
| "eval_steps_per_second": 8.616, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.702291488647461, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 1.8498, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_combined_score": 0.21904044995109873, | |
| "eval_loss": 2.3629276752471924, | |
| "eval_pearson": 0.22169527715203102, | |
| "eval_runtime": 0.5734, | |
| "eval_samples_per_second": 2616.115, | |
| "eval_spearmanr": 0.2163856227501664, | |
| "eval_steps_per_second": 10.464, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 6.9626946449279785, | |
| "learning_rate": 4.5e-05, | |
| "loss": 1.6182, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_combined_score": 0.24536057902618041, | |
| "eval_loss": 2.371917724609375, | |
| "eval_pearson": 0.24478048351146486, | |
| "eval_runtime": 0.6115, | |
| "eval_samples_per_second": 2453.097, | |
| "eval_spearmanr": 0.24594067454089594, | |
| "eval_steps_per_second": 9.812, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 7.2307891845703125, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 1.3745, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_combined_score": 0.24094916196629926, | |
| "eval_loss": 2.4488790035247803, | |
| "eval_pearson": 0.2402419127861277, | |
| "eval_runtime": 0.5723, | |
| "eval_samples_per_second": 2621.166, | |
| "eval_spearmanr": 0.24165641114647082, | |
| "eval_steps_per_second": 10.485, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 138, | |
| "total_flos": 904528923227136.0, | |
| "train_loss": 2.1353097998577617, | |
| "train_runtime": 31.7448, | |
| "train_samples_per_second": 9055.015, | |
| "train_steps_per_second": 36.226 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 904528923227136.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |