| { | |
| "best_metric": 0.6399914026260376, | |
| "best_model_checkpoint": "tiny_bert_km_100_v1_qnli/checkpoint-1230", | |
| "epoch": 8.0, | |
| "eval_steps": 500, | |
| "global_step": 3280, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 1.3996866941452026, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.6679, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6135822807980963, | |
| "eval_loss": 0.6484543681144714, | |
| "eval_runtime": 1.9711, | |
| "eval_samples_per_second": 2771.484, | |
| "eval_steps_per_second": 11.161, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.5129108428955078, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.6394, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6251144060040271, | |
| "eval_loss": 0.6451206803321838, | |
| "eval_runtime": 1.9619, | |
| "eval_samples_per_second": 2784.561, | |
| "eval_steps_per_second": 11.214, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 2.295121192932129, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.5956, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.632985539081091, | |
| "eval_loss": 0.6399914026260376, | |
| "eval_runtime": 1.9021, | |
| "eval_samples_per_second": 2872.124, | |
| "eval_steps_per_second": 11.566, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 4.478973388671875, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.5331, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6243822075782537, | |
| "eval_loss": 0.7005908489227295, | |
| "eval_runtime": 1.9455, | |
| "eval_samples_per_second": 2808.047, | |
| "eval_steps_per_second": 11.308, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 4.21664571762085, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.4637, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6196229178107268, | |
| "eval_loss": 0.7611814737319946, | |
| "eval_runtime": 1.9185, | |
| "eval_samples_per_second": 2847.577, | |
| "eval_steps_per_second": 11.467, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 4.947391510009766, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.3991, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6223686619073769, | |
| "eval_loss": 0.8429216146469116, | |
| "eval_runtime": 1.9613, | |
| "eval_samples_per_second": 2785.386, | |
| "eval_steps_per_second": 11.217, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 6.124049186706543, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.3391, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.6163280248947465, | |
| "eval_loss": 0.9844570755958557, | |
| "eval_runtime": 1.9405, | |
| "eval_samples_per_second": 2815.187, | |
| "eval_steps_per_second": 11.337, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 4.775021553039551, | |
| "learning_rate": 4.2e-05, | |
| "loss": 0.2843, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.6176093721398499, | |
| "eval_loss": 1.0887032747268677, | |
| "eval_runtime": 2.0199, | |
| "eval_samples_per_second": 2704.58, | |
| "eval_steps_per_second": 10.892, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "step": 3280, | |
| "total_flos": 2.197388993748173e+16, | |
| "train_loss": 0.49026855375708606, | |
| "train_runtime": 578.0795, | |
| "train_samples_per_second": 9059.567, | |
| "train_steps_per_second": 35.462 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 20500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 2.197388993748173e+16, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |