Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
|
|
|
| 3 |
|
| 4 |
-
#
|
| 5 |
vit_classifier = pipeline("image-classification", model="kuhs/vit-base-oxford-iiit-pets")
|
| 6 |
clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
|
| 7 |
|
|
|
|
| 8 |
labels_oxford_pets = [
|
| 9 |
'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
|
| 10 |
'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
|
|
@@ -13,6 +15,15 @@ labels_oxford_pets = [
|
|
| 13 |
'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
|
| 14 |
]
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
def classify_pet(image):
|
| 17 |
vit_results = vit_classifier(image)
|
| 18 |
vit_output = {result['label']: result['score'] for result in vit_results}
|
|
@@ -25,28 +36,16 @@ def classify_pet(image):
|
|
| 25 |
"CLIP Zero-Shot Classification": clip_output
|
| 26 |
}
|
| 27 |
|
| 28 |
-
|
| 29 |
-
["example_images/dog1.jpeg"],
|
| 30 |
-
["example_images/dog2.jpeg"],
|
| 31 |
-
["example_images/leonberger.jpg"],
|
| 32 |
-
["example_images/snow_leopard.jpeg"],
|
| 33 |
-
["example_images/cat.jpg"]
|
| 34 |
-
]
|
| 35 |
-
|
| 36 |
iface = gr.Interface(
|
| 37 |
fn=classify_pet,
|
| 38 |
inputs=gr.Image(type="filepath"),
|
| 39 |
outputs=gr.JSON(),
|
| 40 |
title="Pet Classification Comparison",
|
| 41 |
description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
|
| 42 |
-
examples=
|
| 43 |
-
["example_images/dog1.jpeg"],
|
| 44 |
-
["example_images/dog2.jpeg"],
|
| 45 |
-
["example_images/leonberger.jpg"],
|
| 46 |
-
["example_images/snow_leopard.jpeg"],
|
| 47 |
-
["example_images/cat.jpg"]
|
| 48 |
-
],
|
| 49 |
cache_examples=False
|
| 50 |
)
|
| 51 |
|
|
|
|
| 52 |
iface.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
| 3 |
+
import os
|
| 4 |
|
| 5 |
+
# Modelle laden
|
| 6 |
vit_classifier = pipeline("image-classification", model="kuhs/vit-base-oxford-iiit-pets")
|
| 7 |
clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
|
| 8 |
|
| 9 |
+
# Label-Liste für das CLIP-Modell
|
| 10 |
labels_oxford_pets = [
|
| 11 |
'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
|
| 12 |
'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
|
|
|
|
| 15 |
'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
|
| 16 |
]
|
| 17 |
|
| 18 |
+
# Beispielbilder vorbereiten
|
| 19 |
+
example_images = []
|
| 20 |
+
example_dir = "example_images"
|
| 21 |
+
if os.path.exists(example_dir):
|
| 22 |
+
for img in os.listdir(example_dir):
|
| 23 |
+
if img.endswith(('.jpg', '.jpeg', '.png')):
|
| 24 |
+
example_images.append([os.path.join(example_dir, img)])
|
| 25 |
+
|
| 26 |
+
# Klassifikationsfunktion
|
| 27 |
def classify_pet(image):
|
| 28 |
vit_results = vit_classifier(image)
|
| 29 |
vit_output = {result['label']: result['score'] for result in vit_results}
|
|
|
|
| 36 |
"CLIP Zero-Shot Classification": clip_output
|
| 37 |
}
|
| 38 |
|
| 39 |
+
# Gradio-Interface mit Beispielbildern
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
iface = gr.Interface(
|
| 41 |
fn=classify_pet,
|
| 42 |
inputs=gr.Image(type="filepath"),
|
| 43 |
outputs=gr.JSON(),
|
| 44 |
title="Pet Classification Comparison",
|
| 45 |
description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
|
| 46 |
+
examples=example_images if example_images else None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
cache_examples=False
|
| 48 |
)
|
| 49 |
|
| 50 |
+
# App starten
|
| 51 |
iface.launch()
|