| { | |
| "best_metric": 0.6237846612930298, | |
| "best_model_checkpoint": "tiny_bert_km_100_v1_cola/checkpoint-34", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 204, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.7257574200630188, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.6196, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.6237846612930298, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.3991, | |
| "eval_samples_per_second": 2613.323, | |
| "eval_steps_per_second": 12.528, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.121646761894226, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.6075, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.6263473629951477, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.4095, | |
| "eval_samples_per_second": 2546.705, | |
| "eval_steps_per_second": 12.209, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.8357798457145691, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.5972, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.6293219923973083, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.3815, | |
| "eval_samples_per_second": 2733.875, | |
| "eval_steps_per_second": 13.106, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 1.1285151243209839, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.5804, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.668264627456665, | |
| "eval_loss": 0.635418713092804, | |
| "eval_matthews_correlation": -0.019583278456274576, | |
| "eval_runtime": 0.481, | |
| "eval_samples_per_second": 2168.225, | |
| "eval_steps_per_second": 10.394, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 1.3463857173919678, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.5561, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6673058271408081, | |
| "eval_loss": 0.6380937099456787, | |
| "eval_matthews_correlation": 0.036081175524456124, | |
| "eval_runtime": 0.3833, | |
| "eval_samples_per_second": 2721.024, | |
| "eval_steps_per_second": 13.044, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 2.237225294113159, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.5088, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6615532040596008, | |
| "eval_loss": 0.6592812538146973, | |
| "eval_matthews_correlation": 0.05373746752832175, | |
| "eval_runtime": 0.4768, | |
| "eval_samples_per_second": 2187.53, | |
| "eval_steps_per_second": 10.487, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 204, | |
| "total_flos": 1345426898614272.0, | |
| "train_loss": 0.5782780647277832, | |
| "train_runtime": 41.8478, | |
| "train_samples_per_second": 10216.779, | |
| "train_steps_per_second": 40.623 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 1345426898614272.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |