| { | |
| "best_metric": 0.9196428571428571, | |
| "best_model_checkpoint": "swinv2-tiny-patch4-window8-256-Lego-v2-3ep/checkpoint-147", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 147, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.20408163265306123, | |
| "grad_norm": 4.521535396575928, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 2.7556, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.40816326530612246, | |
| "grad_norm": 7.520199775695801, | |
| "learning_rate": 4.810606060606061e-05, | |
| "loss": 2.4445, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6122448979591837, | |
| "grad_norm": 13.214489936828613, | |
| "learning_rate": 4.431818181818182e-05, | |
| "loss": 1.8022, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8163265306122449, | |
| "grad_norm": 14.265619277954102, | |
| "learning_rate": 4.053030303030303e-05, | |
| "loss": 1.3415, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7857142857142857, | |
| "eval_loss": 0.6598730683326721, | |
| "eval_runtime": 0.7369, | |
| "eval_samples_per_second": 151.997, | |
| "eval_steps_per_second": 5.428, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 1.0204081632653061, | |
| "grad_norm": 15.687291145324707, | |
| "learning_rate": 3.6742424242424246e-05, | |
| "loss": 1.138, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.2244897959183674, | |
| "grad_norm": 11.222309112548828, | |
| "learning_rate": 3.295454545454545e-05, | |
| "loss": 0.8949, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 13.59701919555664, | |
| "learning_rate": 2.916666666666667e-05, | |
| "loss": 0.8637, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.6326530612244898, | |
| "grad_norm": 8.102276802062988, | |
| "learning_rate": 2.537878787878788e-05, | |
| "loss": 0.7632, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.836734693877551, | |
| "grad_norm": 10.037010192871094, | |
| "learning_rate": 2.1590909090909093e-05, | |
| "loss": 0.7005, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8928571428571429, | |
| "eval_loss": 0.3036225736141205, | |
| "eval_runtime": 0.7855, | |
| "eval_samples_per_second": 142.578, | |
| "eval_steps_per_second": 5.092, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 2.0408163265306123, | |
| "grad_norm": 9.7354154586792, | |
| "learning_rate": 1.7803030303030303e-05, | |
| "loss": 0.6343, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.2448979591836733, | |
| "grad_norm": 8.97431468963623, | |
| "learning_rate": 1.4015151515151515e-05, | |
| "loss": 0.5505, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.4489795918367347, | |
| "grad_norm": 8.927824974060059, | |
| "learning_rate": 1.0227272727272729e-05, | |
| "loss": 0.5527, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.6530612244897958, | |
| "grad_norm": 7.921907424926758, | |
| "learning_rate": 6.43939393939394e-06, | |
| "loss": 0.4982, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 9.745354652404785, | |
| "learning_rate": 2.651515151515152e-06, | |
| "loss": 0.5615, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9196428571428571, | |
| "eval_loss": 0.2128717005252838, | |
| "eval_runtime": 0.7718, | |
| "eval_samples_per_second": 145.113, | |
| "eval_steps_per_second": 5.183, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 147, | |
| "total_flos": 4.6849871171251814e+17, | |
| "train_loss": 1.081290222349621, | |
| "train_runtime": 252.638, | |
| "train_samples_per_second": 74.419, | |
| "train_steps_per_second": 0.582 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 147, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 4.6849871171251814e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |