dewiri commited on
Commit
885dda4
·
verified ·
1 Parent(s): ab3cb42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1,17 +1,17 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # ViT-Modell (dein trainiertes Modell von Hugging Face)
5
  vit_classifier = pipeline(
6
  "image-classification",
7
  model="dewiri/vit-base-oxford-iiit-pets",
8
  top_k=3,
9
- device=0 # GPU, falls verfügbar
10
  )
11
 
12
- # SIGLIP für Zero-Shot Klassifikation
13
- siglip_detector = pipeline(
14
- model="google/siglip-so400m-patch14-384",
15
  task="zero-shot-image-classification",
16
  top_k=3,
17
  device=0
@@ -29,12 +29,12 @@ def classify_pet(image):
29
  vit_results = vit_classifier(image)
30
  vit_output = {res['label']: round(res['score'], 3) for res in vit_results}
31
 
32
- siglip_results = siglip_detector(image, candidate_labels=labels_oxford_pets)
33
- siglip_output = {res['label']: round(res['score'], 3) for res in siglip_results}
34
 
35
  return {
36
  "ViT Classification (dewiri)": vit_output,
37
- "SIGLIP Zero-Shot Classification": siglip_output
38
  }
39
 
40
  example_images = [
@@ -50,7 +50,7 @@ iface = gr.Interface(
50
  inputs=gr.Image(type="filepath"),
51
  outputs=gr.JSON(),
52
  title="Pet Classification Comparison",
53
- description="Compare a fine-tuned ViT model (dewiri/vit-base-oxford-iiit-pets) with a SIGLIP zero-shot classifier.",
54
  examples=example_images
55
  )
56
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # ViT-Modell (dein trainiertes Modell)
5
  vit_classifier = pipeline(
6
  "image-classification",
7
  model="dewiri/vit-base-oxford-iiit-pets",
8
  top_k=3,
9
+ device=0
10
  )
11
 
12
+ # CLIP-Modell für Zero-Shot Klassifikation
13
+ clip_detector = pipeline(
14
+ model="openai/clip-vit-large-patch14",
15
  task="zero-shot-image-classification",
16
  top_k=3,
17
  device=0
 
29
  vit_results = vit_classifier(image)
30
  vit_output = {res['label']: round(res['score'], 3) for res in vit_results}
31
 
32
+ clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
33
+ clip_output = {res['label']: round(res['score'], 3) for res in clip_results}
34
 
35
  return {
36
  "ViT Classification (dewiri)": vit_output,
37
+ "CLIP Zero-Shot Classification": clip_output
38
  }
39
 
40
  example_images = [
 
50
  inputs=gr.Image(type="filepath"),
51
  outputs=gr.JSON(),
52
  title="Pet Classification Comparison",
53
+ description="Compare a fine-tuned ViT model (dewiri/vit-base-oxford-iiit-pets) with OpenAI's CLIP zero-shot classifier.",
54
  examples=example_images
55
  )
56