| { | |
| "best_metric": 0.6449503898620605, | |
| "best_model_checkpoint": "tiny_bert_km_50_v1_qnli/checkpoint-820", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 2870, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.3805930614471436, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.6696, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6119348343401062, | |
| "eval_loss": 0.6518764495849609, | |
| "eval_runtime": 2.0305, | |
| "eval_samples_per_second": 2690.426, | |
| "eval_steps_per_second": 10.835, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.6006733179092407, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.6425, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6143144792238696, | |
| "eval_loss": 0.6449503898620605, | |
| "eval_runtime": 1.9767, | |
| "eval_samples_per_second": 2763.716, | |
| "eval_steps_per_second": 11.13, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.297433614730835, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.5989, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6223686619073769, | |
| "eval_loss": 0.6500849723815918, | |
| "eval_runtime": 1.9563, | |
| "eval_samples_per_second": 2792.552, | |
| "eval_steps_per_second": 11.246, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.386117935180664, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.5322, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6300567453779974, | |
| "eval_loss": 0.6924402713775635, | |
| "eval_runtime": 1.96, | |
| "eval_samples_per_second": 2787.207, | |
| "eval_steps_per_second": 11.224, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 4.428912162780762, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.455, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6232839099395936, | |
| "eval_loss": 0.7667386531829834, | |
| "eval_runtime": 2.0332, | |
| "eval_samples_per_second": 2686.924, | |
| "eval_steps_per_second": 10.82, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 3.913470506668091, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.3862, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6130331319787663, | |
| "eval_loss": 0.8902132511138916, | |
| "eval_runtime": 2.0248, | |
| "eval_samples_per_second": 2698.05, | |
| "eval_steps_per_second": 10.865, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 5.6499834060668945, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.3213, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.6084568918176826, | |
| "eval_loss": 1.0427790880203247, | |
| "eval_runtime": 2.0477, | |
| "eval_samples_per_second": 2667.897, | |
| "eval_steps_per_second": 10.744, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 2870, | |
| "total_flos": 1.922715369529651e+16, | |
| "train_loss": 0.5151176266553925, | |
| "train_runtime": 516.0188, | |
| "train_samples_per_second": 10149.146, | |
| "train_steps_per_second": 39.727 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 20500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 1.922715369529651e+16, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |