Update app.py
Browse files
app.py
CHANGED
|
@@ -1,17 +1,17 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
| 3 |
|
| 4 |
-
# ViT-Modell (dein trainiertes Modell
|
| 5 |
vit_classifier = pipeline(
|
| 6 |
"image-classification",
|
| 7 |
model="dewiri/vit-base-oxford-iiit-pets",
|
| 8 |
top_k=3,
|
| 9 |
-
device=0
|
| 10 |
)
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
model="
|
| 15 |
task="zero-shot-image-classification",
|
| 16 |
top_k=3,
|
| 17 |
device=0
|
|
@@ -29,12 +29,12 @@ def classify_pet(image):
|
|
| 29 |
vit_results = vit_classifier(image)
|
| 30 |
vit_output = {res['label']: round(res['score'], 3) for res in vit_results}
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
|
| 35 |
return {
|
| 36 |
"ViT Classification (dewiri)": vit_output,
|
| 37 |
-
"
|
| 38 |
}
|
| 39 |
|
| 40 |
example_images = [
|
|
@@ -50,7 +50,7 @@ iface = gr.Interface(
|
|
| 50 |
inputs=gr.Image(type="filepath"),
|
| 51 |
outputs=gr.JSON(),
|
| 52 |
title="Pet Classification Comparison",
|
| 53 |
-
description="Compare a fine-tuned ViT model (dewiri/vit-base-oxford-iiit-pets) with
|
| 54 |
examples=example_images
|
| 55 |
)
|
| 56 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import pipeline
|
| 3 |
|
| 4 |
+
# ViT-Modell (dein trainiertes Modell)
|
| 5 |
vit_classifier = pipeline(
|
| 6 |
"image-classification",
|
| 7 |
model="dewiri/vit-base-oxford-iiit-pets",
|
| 8 |
top_k=3,
|
| 9 |
+
device=0
|
| 10 |
)
|
| 11 |
|
| 12 |
+
# CLIP-Modell für Zero-Shot Klassifikation
|
| 13 |
+
clip_detector = pipeline(
|
| 14 |
+
model="openai/clip-vit-large-patch14",
|
| 15 |
task="zero-shot-image-classification",
|
| 16 |
top_k=3,
|
| 17 |
device=0
|
|
|
|
| 29 |
vit_results = vit_classifier(image)
|
| 30 |
vit_output = {res['label']: round(res['score'], 3) for res in vit_results}
|
| 31 |
|
| 32 |
+
clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
|
| 33 |
+
clip_output = {res['label']: round(res['score'], 3) for res in clip_results}
|
| 34 |
|
| 35 |
return {
|
| 36 |
"ViT Classification (dewiri)": vit_output,
|
| 37 |
+
"CLIP Zero-Shot Classification": clip_output
|
| 38 |
}
|
| 39 |
|
| 40 |
example_images = [
|
|
|
|
| 50 |
inputs=gr.Image(type="filepath"),
|
| 51 |
outputs=gr.JSON(),
|
| 52 |
title="Pet Classification Comparison",
|
| 53 |
+
description="Compare a fine-tuned ViT model (dewiri/vit-base-oxford-iiit-pets) with OpenAI's CLIP zero-shot classifier.",
|
| 54 |
examples=example_images
|
| 55 |
)
|
| 56 |
|