| { | |
| "best_metric": 0.621593713760376, | |
| "best_model_checkpoint": "tiny_bert_km_50_v1_cola/checkpoint-34", | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 204, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.7628885507583618, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.6224, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.621593713760376, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.4018, | |
| "eval_samples_per_second": 2595.926, | |
| "eval_steps_per_second": 12.445, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 1.0041512250900269, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.6083, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.6220319271087646, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.4197, | |
| "eval_samples_per_second": 2485.372, | |
| "eval_steps_per_second": 11.915, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.9402241110801697, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.5982, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.6912751793861389, | |
| "eval_loss": 0.6228198409080505, | |
| "eval_matthews_correlation": 0.0, | |
| "eval_runtime": 0.4066, | |
| "eval_samples_per_second": 2564.868, | |
| "eval_steps_per_second": 12.296, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.9954250454902649, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.5783, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.6883988380432129, | |
| "eval_loss": 0.6298257112503052, | |
| "eval_matthews_correlation": 0.08585482785878726, | |
| "eval_runtime": 0.4156, | |
| "eval_samples_per_second": 2509.564, | |
| "eval_steps_per_second": 12.031, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 1.6896628141403198, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.5458, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.6788111329078674, | |
| "eval_loss": 0.6444757580757141, | |
| "eval_matthews_correlation": 0.058343519603323714, | |
| "eval_runtime": 0.4047, | |
| "eval_samples_per_second": 2576.964, | |
| "eval_steps_per_second": 12.354, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 1.9982967376708984, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.501, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.6586769223213196, | |
| "eval_loss": 0.669975221157074, | |
| "eval_matthews_correlation": 0.11363806297455119, | |
| "eval_runtime": 0.413, | |
| "eval_samples_per_second": 2525.728, | |
| "eval_steps_per_second": 12.108, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "step": 204, | |
| "total_flos": 1345426898614272.0, | |
| "train_loss": 0.5756891848994237, | |
| "train_runtime": 42.7732, | |
| "train_samples_per_second": 9995.753, | |
| "train_steps_per_second": 39.745 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 1345426898614272.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |