| { | |
| "best_metric": 0.09380888193845749, | |
| "best_model_checkpoint": "swin-tiny-patch4-window7-224-crack-detector\\checkpoint-390", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 390, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 8.47457627118644e-06, | |
| "loss": 0.0205, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.694915254237288e-05, | |
| "loss": 0.0192, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 2.5423728813559322e-05, | |
| "loss": 0.0016, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 3.389830508474576e-05, | |
| "loss": 0.0006, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.2372881355932206e-05, | |
| "loss": 0.0048, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.990494296577947e-05, | |
| "loss": 0.0033, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 4.8954372623574146e-05, | |
| "loss": 0.0118, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 4.800380228136883e-05, | |
| "loss": 0.0119, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.70532319391635e-05, | |
| "loss": 0.0159, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.610266159695818e-05, | |
| "loss": 0.0376, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.5152091254752856e-05, | |
| "loss": 0.0165, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.4201520912547525e-05, | |
| "loss": 0.0399, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.325095057034221e-05, | |
| "loss": 0.0022, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.2300380228136884e-05, | |
| "loss": 0.0325, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 4.134980988593156e-05, | |
| "loss": 0.0384, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.0399239543726235e-05, | |
| "loss": 0.0329, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 3.944866920152092e-05, | |
| "loss": 0.0258, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.849809885931559e-05, | |
| "loss": 0.0683, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 3.754752851711027e-05, | |
| "loss": 0.0701, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": { | |
| "accuracy": 0.9721153846153846 | |
| }, | |
| "eval_f1": { | |
| "f1": 0.9727832552836626 | |
| }, | |
| "eval_loss": 0.13044534623622894, | |
| "eval_precision": { | |
| "precision": 0.9746967252849491 | |
| }, | |
| "eval_recall": { | |
| "recall": 0.9711095149952231 | |
| }, | |
| "eval_runtime": 18.8919, | |
| "eval_samples_per_second": 165.15, | |
| "eval_steps_per_second": 10.322, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 3.6596958174904945e-05, | |
| "loss": 0.051, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 3.564638783269962e-05, | |
| "loss": 0.0247, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 3.46958174904943e-05, | |
| "loss": 0.0193, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 3.374524714828898e-05, | |
| "loss": 0.1006, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 3.2794676806083655e-05, | |
| "loss": 0.0373, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 3.1844106463878324e-05, | |
| "loss": 0.017, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.0893536121673007e-05, | |
| "loss": 0.0077, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 2.994296577946768e-05, | |
| "loss": 0.0087, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 2.8992395437262358e-05, | |
| "loss": 0.0183, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 2.8041825095057034e-05, | |
| "loss": 0.0366, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 2.7091254752851713e-05, | |
| "loss": 0.023, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 2.614068441064639e-05, | |
| "loss": 0.0081, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 2.5190114068441068e-05, | |
| "loss": 0.0129, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 2.423954372623574e-05, | |
| "loss": 0.0161, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 2.328897338403042e-05, | |
| "loss": 0.0447, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 2.2338403041825095e-05, | |
| "loss": 0.0273, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 2.138783269961977e-05, | |
| "loss": 0.0252, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 2.043726235741445e-05, | |
| "loss": 0.0328, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 1.9486692015209126e-05, | |
| "loss": 0.0218, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.8536121673003802e-05, | |
| "loss": 0.0905, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": { | |
| "accuracy": 0.9814102564102564 | |
| }, | |
| "eval_f1": { | |
| "f1": 0.9818383801713862 | |
| }, | |
| "eval_loss": 0.09380888193845749, | |
| "eval_precision": { | |
| "precision": 0.9826561250472529 | |
| }, | |
| "eval_recall": { | |
| "recall": 0.9810808183547794 | |
| }, | |
| "eval_runtime": 19.2819, | |
| "eval_samples_per_second": 161.81, | |
| "eval_steps_per_second": 10.113, | |
| "step": 390 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 585, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 6.204401563336704e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |