| { | |
| "best_metric": 0.8770806658130602, | |
| "best_model_checkpoint": "swin-base-patch4-window7-224-MM_Classification_base/checkpoint-133", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 190, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 2.9590132236480713, | |
| "learning_rate": 2.6315789473684212e-05, | |
| "loss": 0.887, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.8565941101152369, | |
| "eval_loss": 0.40119943022727966, | |
| "eval_runtime": 115.286, | |
| "eval_samples_per_second": 6.774, | |
| "eval_steps_per_second": 0.061, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 3.5732860565185547, | |
| "learning_rate": 4.970760233918128e-05, | |
| "loss": 0.5767, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 2.910856246948242, | |
| "learning_rate": 4.678362573099415e-05, | |
| "loss": 0.4302, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8655569782330346, | |
| "eval_loss": 0.336061030626297, | |
| "eval_runtime": 117.4096, | |
| "eval_samples_per_second": 6.652, | |
| "eval_steps_per_second": 0.06, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 3.088682174682617, | |
| "learning_rate": 4.3859649122807014e-05, | |
| "loss": 0.3897, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 2.117629289627075, | |
| "learning_rate": 4.093567251461988e-05, | |
| "loss": 0.3477, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8655569782330346, | |
| "eval_loss": 0.32724371552467346, | |
| "eval_runtime": 80.0181, | |
| "eval_samples_per_second": 9.76, | |
| "eval_steps_per_second": 0.087, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 3.1578947368421053, | |
| "grad_norm": 2.3793277740478516, | |
| "learning_rate": 3.8011695906432746e-05, | |
| "loss": 0.351, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.6842105263157894, | |
| "grad_norm": 2.0850284099578857, | |
| "learning_rate": 3.508771929824561e-05, | |
| "loss": 0.3281, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.8693982074263764, | |
| "eval_loss": 0.31292393803596497, | |
| "eval_runtime": 102.92, | |
| "eval_samples_per_second": 7.588, | |
| "eval_steps_per_second": 0.068, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 4.2105263157894735, | |
| "grad_norm": 2.5610477924346924, | |
| "learning_rate": 3.216374269005848e-05, | |
| "loss": 0.3068, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.7368421052631575, | |
| "grad_norm": 2.278917074203491, | |
| "learning_rate": 2.9239766081871346e-05, | |
| "loss": 0.308, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.29839685559272766, | |
| "eval_runtime": 111.8165, | |
| "eval_samples_per_second": 6.985, | |
| "eval_steps_per_second": 0.063, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 5.2631578947368425, | |
| "grad_norm": 3.0176851749420166, | |
| "learning_rate": 2.6315789473684212e-05, | |
| "loss": 0.2932, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 5.7894736842105265, | |
| "grad_norm": 2.252465009689331, | |
| "learning_rate": 2.3391812865497074e-05, | |
| "loss": 0.2821, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.8693982074263764, | |
| "eval_loss": 0.30104175209999084, | |
| "eval_runtime": 80.2492, | |
| "eval_samples_per_second": 9.732, | |
| "eval_steps_per_second": 0.087, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 6.315789473684211, | |
| "grad_norm": 2.284252643585205, | |
| "learning_rate": 2.046783625730994e-05, | |
| "loss": 0.2867, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 6.842105263157895, | |
| "grad_norm": 1.9984803199768066, | |
| "learning_rate": 1.7543859649122806e-05, | |
| "loss": 0.2763, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.8770806658130602, | |
| "eval_loss": 0.2997962534427643, | |
| "eval_runtime": 88.4046, | |
| "eval_samples_per_second": 8.834, | |
| "eval_steps_per_second": 0.079, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 7.368421052631579, | |
| "grad_norm": 2.303579330444336, | |
| "learning_rate": 1.4619883040935673e-05, | |
| "loss": 0.2721, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 7.894736842105263, | |
| "grad_norm": 2.0915815830230713, | |
| "learning_rate": 1.1695906432748537e-05, | |
| "loss": 0.2607, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8719590268886044, | |
| "eval_loss": 0.29377269744873047, | |
| "eval_runtime": 111.5913, | |
| "eval_samples_per_second": 6.999, | |
| "eval_steps_per_second": 0.063, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 8.421052631578947, | |
| "grad_norm": 2.786160469055176, | |
| "learning_rate": 8.771929824561403e-06, | |
| "loss": 0.2567, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 8.947368421052632, | |
| "grad_norm": 2.6700336933135986, | |
| "learning_rate": 5.8479532163742686e-06, | |
| "loss": 0.2502, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_accuracy": 0.8732394366197183, | |
| "eval_loss": 0.2990225553512573, | |
| "eval_runtime": 81.6699, | |
| "eval_samples_per_second": 9.563, | |
| "eval_steps_per_second": 0.086, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 9.473684210526315, | |
| "grad_norm": 2.385366439819336, | |
| "learning_rate": 2.9239766081871343e-06, | |
| "loss": 0.2528, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 26.124296188354492, | |
| "learning_rate": 0.0, | |
| "loss": 0.2337, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_accuracy": 0.8758002560819462, | |
| "eval_loss": 0.29783955216407776, | |
| "eval_runtime": 109.5383, | |
| "eval_samples_per_second": 7.13, | |
| "eval_steps_per_second": 0.064, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 190, | |
| "total_flos": 7.539281902738575e+18, | |
| "train_loss": 0.3468283264260543, | |
| "train_runtime": 12701.0006, | |
| "train_samples_per_second": 7.577, | |
| "train_steps_per_second": 0.015 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 190, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.539281902738575e+18, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |