| { | |
| "best_metric": 0.9717868338557993, | |
| "best_model_checkpoint": "swin-tiny-patch4-window7-224-LEGO/checkpoint-225", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 225, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 4.567494869232178, | |
| "learning_rate": 2.173913043478261e-05, | |
| "loss": 2.7413, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 8.966611862182617, | |
| "learning_rate": 4.347826086956522e-05, | |
| "loss": 2.4945, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 16.994686126708984, | |
| "learning_rate": 4.826732673267327e-05, | |
| "loss": 1.8167, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 23.02696990966797, | |
| "learning_rate": 4.57920792079208e-05, | |
| "loss": 1.2301, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.713166144200627, | |
| "eval_loss": 0.7920515537261963, | |
| "eval_runtime": 617.4774, | |
| "eval_samples_per_second": 1.033, | |
| "eval_steps_per_second": 0.032, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 15.22125244140625, | |
| "learning_rate": 4.331683168316832e-05, | |
| "loss": 0.9193, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 12.350472450256348, | |
| "learning_rate": 4.0841584158415844e-05, | |
| "loss": 0.7267, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 20.679580688476562, | |
| "learning_rate": 3.8366336633663367e-05, | |
| "loss": 0.6571, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 11.873376846313477, | |
| "learning_rate": 3.589108910891089e-05, | |
| "loss": 0.591, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 17.2018985748291, | |
| "learning_rate": 3.341584158415842e-05, | |
| "loss": 0.5433, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8918495297805643, | |
| "eval_loss": 0.30467158555984497, | |
| "eval_runtime": 4.017, | |
| "eval_samples_per_second": 158.823, | |
| "eval_steps_per_second": 4.979, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 6.819035053253174, | |
| "learning_rate": 3.094059405940594e-05, | |
| "loss": 0.5013, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 9.604576110839844, | |
| "learning_rate": 2.8465346534653464e-05, | |
| "loss": 0.4186, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 13.680152893066406, | |
| "learning_rate": 2.5990099009900993e-05, | |
| "loss": 0.4364, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 6.642324924468994, | |
| "learning_rate": 2.3514851485148515e-05, | |
| "loss": 0.4067, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9278996865203761, | |
| "eval_loss": 0.20282776653766632, | |
| "eval_runtime": 4.147, | |
| "eval_samples_per_second": 153.847, | |
| "eval_steps_per_second": 4.823, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 3.111111111111111, | |
| "grad_norm": 10.98905086517334, | |
| "learning_rate": 2.103960396039604e-05, | |
| "loss": 0.4095, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 3.3333333333333335, | |
| "grad_norm": 10.862679481506348, | |
| "learning_rate": 1.8564356435643564e-05, | |
| "loss": 0.3771, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.5555555555555554, | |
| "grad_norm": 12.117408752441406, | |
| "learning_rate": 1.608910891089109e-05, | |
| "loss": 0.3815, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 3.7777777777777777, | |
| "grad_norm": 8.852999687194824, | |
| "learning_rate": 1.3613861386138616e-05, | |
| "loss": 0.3363, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 11.115220069885254, | |
| "learning_rate": 1.113861386138614e-05, | |
| "loss": 0.3297, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.957680250783699, | |
| "eval_loss": 0.12823660671710968, | |
| "eval_runtime": 4.1114, | |
| "eval_samples_per_second": 155.177, | |
| "eval_steps_per_second": 4.864, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 4.222222222222222, | |
| "grad_norm": 17.916852951049805, | |
| "learning_rate": 8.663366336633663e-06, | |
| "loss": 0.3482, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 4.444444444444445, | |
| "grad_norm": 7.79290771484375, | |
| "learning_rate": 6.1881188118811885e-06, | |
| "loss": 0.3002, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 4.666666666666667, | |
| "grad_norm": 10.635743141174316, | |
| "learning_rate": 3.7128712871287128e-06, | |
| "loss": 0.3364, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 4.888888888888889, | |
| "grad_norm": 9.049588203430176, | |
| "learning_rate": 1.2376237623762377e-06, | |
| "loss": 0.3334, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.9717868338557993, | |
| "eval_loss": 0.11080693453550339, | |
| "eval_runtime": 3.9373, | |
| "eval_samples_per_second": 162.039, | |
| "eval_steps_per_second": 5.08, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 225, | |
| "total_flos": 7.137702770984755e+17, | |
| "train_loss": 0.747228291299608, | |
| "train_runtime": 6337.5569, | |
| "train_samples_per_second": 4.529, | |
| "train_steps_per_second": 0.036 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 225, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 7.137702770984755e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |