dewiri commited on
Commit
c6ab637
·
verified ·
1 Parent(s): 885dda4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -23
app.py CHANGED
@@ -1,21 +1,9 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # ViT-Modell (dein trainiertes Modell)
5
- vit_classifier = pipeline(
6
- "image-classification",
7
- model="dewiri/vit-base-oxford-iiit-pets",
8
- top_k=3,
9
- device=0
10
- )
11
-
12
- # CLIP-Modell für Zero-Shot Klassifikation
13
- clip_detector = pipeline(
14
- model="openai/clip-vit-large-patch14",
15
- task="zero-shot-image-classification",
16
- top_k=3,
17
- device=0
18
- )
19
 
20
  labels_oxford_pets = [
21
  'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
@@ -27,15 +15,12 @@ labels_oxford_pets = [
27
 
28
  def classify_pet(image):
29
  vit_results = vit_classifier(image)
30
- vit_output = {res['label']: round(res['score'], 3) for res in vit_results}
31
 
32
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
33
- clip_output = {res['label']: round(res['score'], 3) for res in clip_results}
34
 
35
- return {
36
- "ViT Classification (dewiri)": vit_output,
37
- "CLIP Zero-Shot Classification": clip_output
38
- }
39
 
40
  example_images = [
41
  ["example_images/dog1.jpeg"],
@@ -50,8 +35,8 @@ iface = gr.Interface(
50
  inputs=gr.Image(type="filepath"),
51
  outputs=gr.JSON(),
52
  title="Pet Classification Comparison",
53
- description="Compare a fine-tuned ViT model (dewiri/vit-base-oxford-iiit-pets) with OpenAI's CLIP zero-shot classifier.",
54
  examples=example_images
55
  )
56
 
57
- iface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load models
5
+ vit_classifier = pipeline("image-classification", model="dewiri/vit-base-oxford-iiit-pets")
6
+ clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  labels_oxford_pets = [
9
  'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
 
15
 
16
  def classify_pet(image):
17
  vit_results = vit_classifier(image)
18
+ vit_output = {result['label']: result['score'] for result in vit_results}
19
 
20
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
21
+ clip_output = {result['label']: result['score'] for result in clip_results}
22
 
23
+ return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
 
 
 
24
 
25
  example_images = [
26
  ["example_images/dog1.jpeg"],
 
35
  inputs=gr.Image(type="filepath"),
36
  outputs=gr.JSON(),
37
  title="Pet Classification Comparison",
38
+ description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
39
  examples=example_images
40
  )
41
 
42
+ iface.launch()