| { | |
| "best_metric": 0.9457662701189643, | |
| "best_model_checkpoint": "../../checkpoint/ddi/biobert/checkpoint-9492", | |
| "epoch": 20.0, | |
| "eval_steps": 500, | |
| "global_step": 15820, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9384184744576627, | |
| "eval_loss": 0.227890744805336, | |
| "eval_runtime": 9.5554, | |
| "eval_samples_per_second": 598.194, | |
| "eval_steps_per_second": 2.407, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 3.1540586948394775, | |
| "learning_rate": 4.683944374209861e-05, | |
| "loss": 0.1997, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9326452064380686, | |
| "eval_loss": 0.308584600687027, | |
| "eval_runtime": 9.5439, | |
| "eval_samples_per_second": 598.917, | |
| "eval_steps_per_second": 2.41, | |
| "step": 1582 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "grad_norm": 0.6525352597236633, | |
| "learning_rate": 4.367888748419722e-05, | |
| "loss": 0.0772, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9305458362491252, | |
| "eval_loss": 0.3141626715660095, | |
| "eval_runtime": 9.5642, | |
| "eval_samples_per_second": 597.648, | |
| "eval_steps_per_second": 2.405, | |
| "step": 2373 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "grad_norm": 0.06780587136745453, | |
| "learning_rate": 4.051833122629583e-05, | |
| "loss": 0.0504, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9417424772568229, | |
| "eval_loss": 0.3149295151233673, | |
| "eval_runtime": 9.5361, | |
| "eval_samples_per_second": 599.408, | |
| "eval_steps_per_second": 2.412, | |
| "step": 3164 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9413925822253324, | |
| "eval_loss": 0.3343745470046997, | |
| "eval_runtime": 9.5618, | |
| "eval_samples_per_second": 597.795, | |
| "eval_steps_per_second": 2.405, | |
| "step": 3955 | |
| }, | |
| { | |
| "epoch": 5.06, | |
| "grad_norm": 0.008873376995325089, | |
| "learning_rate": 3.735777496839444e-05, | |
| "loss": 0.0367, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.9429671098670399, | |
| "eval_loss": 0.33325833082199097, | |
| "eval_runtime": 9.541, | |
| "eval_samples_per_second": 599.096, | |
| "eval_steps_per_second": 2.411, | |
| "step": 4746 | |
| }, | |
| { | |
| "epoch": 6.32, | |
| "grad_norm": 0.029533106833696365, | |
| "learning_rate": 3.419721871049305e-05, | |
| "loss": 0.0245, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.9408677396780966, | |
| "eval_loss": 0.36710676550865173, | |
| "eval_runtime": 9.5507, | |
| "eval_samples_per_second": 598.49, | |
| "eval_steps_per_second": 2.408, | |
| "step": 5537 | |
| }, | |
| { | |
| "epoch": 7.59, | |
| "grad_norm": 4.256861209869385, | |
| "learning_rate": 3.1036662452591655e-05, | |
| "loss": 0.0204, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.9394681595521344, | |
| "eval_loss": 0.424883097410202, | |
| "eval_runtime": 9.5729, | |
| "eval_samples_per_second": 597.1, | |
| "eval_steps_per_second": 2.403, | |
| "step": 6328 | |
| }, | |
| { | |
| "epoch": 8.85, | |
| "grad_norm": 0.002548321383073926, | |
| "learning_rate": 2.7876106194690264e-05, | |
| "loss": 0.0134, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.945591322603219, | |
| "eval_loss": 0.3557114601135254, | |
| "eval_runtime": 9.5898, | |
| "eval_samples_per_second": 596.05, | |
| "eval_steps_per_second": 2.398, | |
| "step": 7119 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.9384184744576627, | |
| "eval_loss": 0.4586348235607147, | |
| "eval_runtime": 9.5799, | |
| "eval_samples_per_second": 596.667, | |
| "eval_steps_per_second": 2.401, | |
| "step": 7910 | |
| }, | |
| { | |
| "epoch": 10.11, | |
| "grad_norm": 0.12175916880369186, | |
| "learning_rate": 2.4715549936788876e-05, | |
| "loss": 0.0109, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_accuracy": 0.937368789363191, | |
| "eval_loss": 0.5422797799110413, | |
| "eval_runtime": 9.5297, | |
| "eval_samples_per_second": 599.808, | |
| "eval_steps_per_second": 2.414, | |
| "step": 8701 | |
| }, | |
| { | |
| "epoch": 11.38, | |
| "grad_norm": 9.226353645324707, | |
| "learning_rate": 2.1554993678887485e-05, | |
| "loss": 0.0087, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_accuracy": 0.9457662701189643, | |
| "eval_loss": 0.467970609664917, | |
| "eval_runtime": 9.5503, | |
| "eval_samples_per_second": 598.514, | |
| "eval_steps_per_second": 2.408, | |
| "step": 9492 | |
| }, | |
| { | |
| "epoch": 12.64, | |
| "grad_norm": 0.0016844873316586018, | |
| "learning_rate": 1.8394437420986094e-05, | |
| "loss": 0.0052, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_accuracy": 0.9457662701189643, | |
| "eval_loss": 0.4594261050224304, | |
| "eval_runtime": 9.5033, | |
| "eval_samples_per_second": 601.473, | |
| "eval_steps_per_second": 2.42, | |
| "step": 10283 | |
| }, | |
| { | |
| "epoch": 13.91, | |
| "grad_norm": 0.004387652967125177, | |
| "learning_rate": 1.5233881163084704e-05, | |
| "loss": 0.0071, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_accuracy": 0.9389433170048985, | |
| "eval_loss": 0.5178358554840088, | |
| "eval_runtime": 9.5325, | |
| "eval_samples_per_second": 599.634, | |
| "eval_steps_per_second": 2.413, | |
| "step": 11074 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_accuracy": 0.9420923722883136, | |
| "eval_loss": 0.47059789299964905, | |
| "eval_runtime": 9.5558, | |
| "eval_samples_per_second": 598.171, | |
| "eval_steps_per_second": 2.407, | |
| "step": 11865 | |
| }, | |
| { | |
| "epoch": 15.17, | |
| "grad_norm": 0.2450585812330246, | |
| "learning_rate": 1.2073324905183313e-05, | |
| "loss": 0.0056, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_accuracy": 0.9434919524142757, | |
| "eval_loss": 0.491719126701355, | |
| "eval_runtime": 9.5376, | |
| "eval_samples_per_second": 599.312, | |
| "eval_steps_per_second": 2.412, | |
| "step": 12656 | |
| }, | |
| { | |
| "epoch": 16.43, | |
| "grad_norm": 0.0025873230770230293, | |
| "learning_rate": 8.912768647281922e-06, | |
| "loss": 0.0034, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_accuracy": 0.9447165850244926, | |
| "eval_loss": 0.46782389283180237, | |
| "eval_runtime": 9.6489, | |
| "eval_samples_per_second": 592.402, | |
| "eval_steps_per_second": 2.384, | |
| "step": 13447 | |
| }, | |
| { | |
| "epoch": 17.7, | |
| "grad_norm": 0.00046305323485285044, | |
| "learning_rate": 5.752212389380531e-06, | |
| "loss": 0.0026, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_accuracy": 0.9447165850244926, | |
| "eval_loss": 0.47931182384490967, | |
| "eval_runtime": 9.5141, | |
| "eval_samples_per_second": 600.794, | |
| "eval_steps_per_second": 2.417, | |
| "step": 14238 | |
| }, | |
| { | |
| "epoch": 18.96, | |
| "grad_norm": 0.0010309997014701366, | |
| "learning_rate": 2.59165613147914e-06, | |
| "loss": 0.0023, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_accuracy": 0.9457662701189643, | |
| "eval_loss": 0.48694220185279846, | |
| "eval_runtime": 9.5644, | |
| "eval_samples_per_second": 597.63, | |
| "eval_steps_per_second": 2.405, | |
| "step": 15029 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_accuracy": 0.9443666899930021, | |
| "eval_loss": 0.49064961075782776, | |
| "eval_runtime": 9.5296, | |
| "eval_samples_per_second": 599.814, | |
| "eval_steps_per_second": 2.414, | |
| "step": 15820 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "step": 15820, | |
| "total_flos": 3.327918265884672e+16, | |
| "train_loss": 0.02967632641231667, | |
| "train_runtime": 3368.6231, | |
| "train_samples_per_second": 150.186, | |
| "train_steps_per_second": 4.696 | |
| } | |
| ], | |
| "logging_steps": 1000, | |
| "max_steps": 15820, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 500, | |
| "total_flos": 3.327918265884672e+16, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |