Image Classification
Transformers
TensorBoard
Safetensors
swin
Generated from Trainer
Eval Results (legacy)
Instructions to use cppgohan/swin-tiny-patch4-window7-224-finetuned-eurosat with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use cppgohan/swin-tiny-patch4-window7-224-finetuned-eurosat with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-classification", model="cppgohan/swin-tiny-patch4-window7-224-finetuned-eurosat") pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/parrots.png")# Load model directly from transformers import AutoImageProcessor, AutoModelForImageClassification processor = AutoImageProcessor.from_pretrained("cppgohan/swin-tiny-patch4-window7-224-finetuned-eurosat") model = AutoModelForImageClassification.from_pretrained("cppgohan/swin-tiny-patch4-window7-224-finetuned-eurosat") - Notebooks
- Google Colab
- Kaggle
| { | |
| "best_metric": 0.9777777777777777, | |
| "best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-570", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 570, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 6.905220031738281, | |
| "learning_rate": 8.771929824561403e-06, | |
| "loss": 2.2755, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 5.883072853088379, | |
| "learning_rate": 1.7543859649122806e-05, | |
| "loss": 2.0818, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 6.574605941772461, | |
| "learning_rate": 2.6315789473684212e-05, | |
| "loss": 1.6674, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 18.086021423339844, | |
| "learning_rate": 3.508771929824561e-05, | |
| "loss": 1.0708, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 13.633099555969238, | |
| "learning_rate": 4.3859649122807014e-05, | |
| "loss": 0.6766, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 10.846696853637695, | |
| "learning_rate": 4.970760233918128e-05, | |
| "loss": 0.5076, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 18.08544921875, | |
| "learning_rate": 4.8732943469785574e-05, | |
| "loss": 0.4044, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 18.805522918701172, | |
| "learning_rate": 4.7758284600389865e-05, | |
| "loss": 0.4289, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 25.817827224731445, | |
| "learning_rate": 4.678362573099415e-05, | |
| "loss": 0.3679, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 19.707490921020508, | |
| "learning_rate": 4.580896686159844e-05, | |
| "loss": 0.3635, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 12.682143211364746, | |
| "learning_rate": 4.483430799220273e-05, | |
| "loss": 0.3333, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 10.674261093139648, | |
| "learning_rate": 4.3859649122807014e-05, | |
| "loss": 0.2928, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 11.278220176696777, | |
| "learning_rate": 4.2884990253411305e-05, | |
| "loss": 0.2902, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 13.635940551757812, | |
| "learning_rate": 4.1910331384015596e-05, | |
| "loss": 0.2921, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 9.778814315795898, | |
| "learning_rate": 4.093567251461988e-05, | |
| "loss": 0.2716, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 25.61815643310547, | |
| "learning_rate": 3.996101364522417e-05, | |
| "loss": 0.2775, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 13.44371223449707, | |
| "learning_rate": 3.898635477582846e-05, | |
| "loss": 0.2516, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 9.713900566101074, | |
| "learning_rate": 3.8011695906432746e-05, | |
| "loss": 0.2305, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 10.197415351867676, | |
| "learning_rate": 3.7037037037037037e-05, | |
| "loss": 0.2542, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9614814814814815, | |
| "eval_loss": 0.12235087901353836, | |
| "eval_runtime": 30.8807, | |
| "eval_samples_per_second": 87.433, | |
| "eval_steps_per_second": 2.753, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 9.8814697265625, | |
| "learning_rate": 3.606237816764133e-05, | |
| "loss": 0.2462, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "grad_norm": 11.23945140838623, | |
| "learning_rate": 3.508771929824561e-05, | |
| "loss": 0.2386, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 11.149582862854004, | |
| "learning_rate": 3.41130604288499e-05, | |
| "loss": 0.2409, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "grad_norm": 12.31209659576416, | |
| "learning_rate": 3.313840155945419e-05, | |
| "loss": 0.2301, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 28.526798248291016, | |
| "learning_rate": 3.216374269005848e-05, | |
| "loss": 0.2241, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 12.132701873779297, | |
| "learning_rate": 3.118908382066277e-05, | |
| "loss": 0.2201, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "grad_norm": 13.534875869750977, | |
| "learning_rate": 3.0214424951267055e-05, | |
| "loss": 0.2013, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "grad_norm": 9.8684720993042, | |
| "learning_rate": 2.9239766081871346e-05, | |
| "loss": 0.2075, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "grad_norm": 17.482439041137695, | |
| "learning_rate": 2.8265107212475634e-05, | |
| "loss": 0.1876, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "grad_norm": 6.973119258880615, | |
| "learning_rate": 2.729044834307992e-05, | |
| "loss": 0.1875, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 7.083260536193848, | |
| "learning_rate": 2.6315789473684212e-05, | |
| "loss": 0.181, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "grad_norm": 9.20019245147705, | |
| "learning_rate": 2.53411306042885e-05, | |
| "loss": 0.1562, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "grad_norm": 14.853752136230469, | |
| "learning_rate": 2.4366471734892787e-05, | |
| "loss": 0.1644, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "grad_norm": 6.650509357452393, | |
| "learning_rate": 2.3391812865497074e-05, | |
| "loss": 0.1794, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "grad_norm": 7.536526679992676, | |
| "learning_rate": 2.2417153996101365e-05, | |
| "loss": 0.1369, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "grad_norm": 9.41813850402832, | |
| "learning_rate": 2.1442495126705653e-05, | |
| "loss": 0.199, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "grad_norm": 10.232301712036133, | |
| "learning_rate": 2.046783625730994e-05, | |
| "loss": 0.1678, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 8.716858863830566, | |
| "learning_rate": 1.949317738791423e-05, | |
| "loss": 0.1889, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 9.904465675354004, | |
| "learning_rate": 1.8518518518518518e-05, | |
| "loss": 0.171, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9688888888888889, | |
| "eval_loss": 0.08309615403413773, | |
| "eval_runtime": 31.5481, | |
| "eval_samples_per_second": 85.584, | |
| "eval_steps_per_second": 2.694, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 11.40892505645752, | |
| "learning_rate": 1.7543859649122806e-05, | |
| "loss": 0.1923, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "grad_norm": 15.750476837158203, | |
| "learning_rate": 1.6569200779727097e-05, | |
| "loss": 0.1873, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 10.811800003051758, | |
| "learning_rate": 1.5594541910331384e-05, | |
| "loss": 0.1703, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "grad_norm": 16.07097816467285, | |
| "learning_rate": 1.4619883040935673e-05, | |
| "loss": 0.1624, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "grad_norm": 10.764117240905762, | |
| "learning_rate": 1.364522417153996e-05, | |
| "loss": 0.1532, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 3.9213733673095703, | |
| "learning_rate": 1.267056530214425e-05, | |
| "loss": 0.1373, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "grad_norm": 6.07966947555542, | |
| "learning_rate": 1.1695906432748537e-05, | |
| "loss": 0.16, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "grad_norm": 13.090170860290527, | |
| "learning_rate": 1.0721247563352826e-05, | |
| "loss": 0.1317, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "grad_norm": 9.082914352416992, | |
| "learning_rate": 9.746588693957115e-06, | |
| "loss": 0.14, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "grad_norm": 7.2037248611450195, | |
| "learning_rate": 8.771929824561403e-06, | |
| "loss": 0.1731, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "grad_norm": 6.6279120445251465, | |
| "learning_rate": 7.797270955165692e-06, | |
| "loss": 0.1256, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "grad_norm": 7.699619770050049, | |
| "learning_rate": 6.82261208576998e-06, | |
| "loss": 0.12, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 7.525733947753906, | |
| "learning_rate": 5.8479532163742686e-06, | |
| "loss": 0.1514, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "grad_norm": 6.572248458862305, | |
| "learning_rate": 4.873294346978558e-06, | |
| "loss": 0.1316, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "grad_norm": 17.000349044799805, | |
| "learning_rate": 3.898635477582846e-06, | |
| "loss": 0.1674, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 8.921578407287598, | |
| "learning_rate": 2.9239766081871343e-06, | |
| "loss": 0.1624, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "grad_norm": 5.688661098480225, | |
| "learning_rate": 1.949317738791423e-06, | |
| "loss": 0.1641, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 7.114222526550293, | |
| "learning_rate": 9.746588693957115e-07, | |
| "loss": 0.152, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 9.845104217529297, | |
| "learning_rate": 0.0, | |
| "loss": 0.1617, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9777777777777777, | |
| "eval_loss": 0.06170220300555229, | |
| "eval_runtime": 31.1379, | |
| "eval_samples_per_second": 86.711, | |
| "eval_steps_per_second": 2.73, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 570, | |
| "total_flos": 1.8124066505760768e+18, | |
| "train_loss": 0.33351591700001765, | |
| "train_runtime": 1156.1276, | |
| "train_samples_per_second": 63.055, | |
| "train_steps_per_second": 0.493 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 570, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 1.8124066505760768e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |