| { | |
| "best_metric": 0.48913320899009705, | |
| "best_model_checkpoint": "tiny_bert_km_100_v1_sst2/checkpoint-528", | |
| "epoch": 7.0, | |
| "eval_steps": 500, | |
| "global_step": 1848, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 6.6441330909729, | |
| "learning_rate": 4.9e-05, | |
| "loss": 0.4472, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7706422018348624, | |
| "eval_loss": 0.49030551314353943, | |
| "eval_runtime": 0.3358, | |
| "eval_samples_per_second": 2597.018, | |
| "eval_steps_per_second": 11.913, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 8.4056978225708, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.2467, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8096330275229358, | |
| "eval_loss": 0.48913320899009705, | |
| "eval_runtime": 0.3348, | |
| "eval_samples_per_second": 2604.301, | |
| "eval_steps_per_second": 11.946, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 6.154969692230225, | |
| "learning_rate": 4.7e-05, | |
| "loss": 0.1937, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8119266055045872, | |
| "eval_loss": 0.5006576180458069, | |
| "eval_runtime": 0.3369, | |
| "eval_samples_per_second": 2588.53, | |
| "eval_steps_per_second": 11.874, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 5.024272441864014, | |
| "learning_rate": 4.600000000000001e-05, | |
| "loss": 0.1599, | |
| "step": 1056 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8211009174311926, | |
| "eval_loss": 0.5037238001823425, | |
| "eval_runtime": 0.3341, | |
| "eval_samples_per_second": 2610.386, | |
| "eval_steps_per_second": 11.974, | |
| "step": 1056 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 12.794002532958984, | |
| "learning_rate": 4.5e-05, | |
| "loss": 0.1346, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.8027522935779816, | |
| "eval_loss": 0.6623803973197937, | |
| "eval_runtime": 0.3377, | |
| "eval_samples_per_second": 2582.468, | |
| "eval_steps_per_second": 11.846, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 7.680220603942871, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.1116, | |
| "step": 1584 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.801605504587156, | |
| "eval_loss": 0.6961482167243958, | |
| "eval_runtime": 0.3365, | |
| "eval_samples_per_second": 2591.759, | |
| "eval_steps_per_second": 11.889, | |
| "step": 1584 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 14.835103034973145, | |
| "learning_rate": 4.3e-05, | |
| "loss": 0.094, | |
| "step": 1848 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.8084862385321101, | |
| "eval_loss": 0.7630754113197327, | |
| "eval_runtime": 0.3544, | |
| "eval_samples_per_second": 2460.707, | |
| "eval_steps_per_second": 11.288, | |
| "step": 1848 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "step": 1848, | |
| "total_flos": 1.2362922335855616e+16, | |
| "train_loss": 0.19824472650305017, | |
| "train_runtime": 325.8985, | |
| "train_samples_per_second": 10332.818, | |
| "train_steps_per_second": 40.503 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 13200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 1.2362922335855616e+16, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |