Monyrak commited on
Commit
4ef939e
·
verified ·
1 Parent(s): f35bda7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -20
app.py CHANGED
@@ -1,12 +1,10 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import os
4
 
5
- # Modelle laden
6
  vit_classifier = pipeline("image-classification", model="kuhs/vit-base-oxford-iiit-pets")
7
  clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
8
 
9
- # Label-Liste für das CLIP-Modell
10
  labels_oxford_pets = [
11
  'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
12
  'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
@@ -15,15 +13,6 @@ labels_oxford_pets = [
15
  'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
16
  ]
17
 
18
- # Beispielbilder vorbereiten
19
- example_images = []
20
- example_dir = "example_images"
21
- if os.path.exists(example_dir):
22
- for img in os.listdir(example_dir):
23
- if img.endswith(('.jpg', '.jpeg', '.png')):
24
- example_images.append([os.path.join(example_dir, img)])
25
-
26
- # Klassifikationsfunktion
27
  def classify_pet(image):
28
  vit_results = vit_classifier(image)
29
  vit_output = {result['label']: result['score'] for result in vit_results}
@@ -31,21 +20,23 @@ def classify_pet(image):
31
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
32
  clip_output = {result['label']: result['score'] for result in clip_results}
33
 
34
- return {
35
- "ViT Classification": vit_output,
36
- "CLIP Zero-Shot Classification": clip_output
37
- }
 
 
 
 
 
38
 
39
- # Gradio-Interface mit Beispielbildern
40
  iface = gr.Interface(
41
  fn=classify_pet,
42
  inputs=gr.Image(type="filepath"),
43
  outputs=gr.JSON(),
44
  title="Pet Classification Comparison",
45
  description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
46
- examples=example_images if example_images else None,
47
- cache_examples=False
48
  )
49
 
50
- # App starten
51
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
 
3
 
4
+ # Load models
5
  vit_classifier = pipeline("image-classification", model="kuhs/vit-base-oxford-iiit-pets")
6
  clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
7
 
 
8
  labels_oxford_pets = [
9
  'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
10
  'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
 
13
  'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
14
  ]
15
 
 
 
 
 
 
 
 
 
 
16
  def classify_pet(image):
17
  vit_results = vit_classifier(image)
18
  vit_output = {result['label']: result['score'] for result in vit_results}
 
20
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
21
  clip_output = {result['label']: result['score'] for result in clip_results}
22
 
23
+ return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
24
+
25
+ example_images = [
26
+ ["example_images/dog1.jpeg"],
27
+ ["example_images/dog2.jpeg"],
28
+ ["example_images/leonberger.jpg"],
29
+ ["example_images/snow_leopard.jpeg"],
30
+ ["example_images/cat.jpg"]
31
+ ]
32
 
 
33
  iface = gr.Interface(
34
  fn=classify_pet,
35
  inputs=gr.Image(type="filepath"),
36
  outputs=gr.JSON(),
37
  title="Pet Classification Comparison",
38
  description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
39
+ examples=example_images
 
40
  )
41
 
 
42
  iface.launch()