dewiri commited on
Commit
d2dcfe8
·
verified ·
1 Parent(s): 458e57c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # load models
5
+ vit_classifier = pipeline("image-classification", model="dewiri/vit-base-oxford-iiit-pets")
6
+ clip_detector = pipeline("zero-shot-image-classification",model="openai/clip-vit-large-patch14"
7
+ )
8
+
9
+ # define labels from oxford pets dataset
10
+ labels_oxford_pets = [
11
+ 'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
12
+ 'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
13
+ 'Ragdoll', 'Persian', 'Egyptian Mau', 'miniature pinscher', 'Sphynx', 'Maine Coon', 'keeshond', 'yorkshire terrier',
14
+ 'havanese', 'leonberger', 'wheaten terrier', 'american bulldog', 'english setter', 'boxer', 'newfoundland', 'Bengal',
15
+ 'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
16
+ ]
17
+
18
+ def classify_pet(image):
19
+ vit_results = vit_classifier(image)
20
+ vit_output = {result['label']: result['score'] for result in vit_results}
21
+
22
+ clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
23
+ clip_output = {result['label']: result['score'] for result in clip_results}
24
+
25
+ return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
26
+
27
+ # define example images for testing
28
+ example_images = [
29
+ ["example_images/dog1.jpeg"],
30
+ ["example_images/dog2.jpeg"],
31
+ ["example_images/leonberger.jpg"],
32
+ ["example_images/snow_leopard.jpeg"],
33
+ ["example_images/cat.jpg"]
34
+ ]
35
+
36
+ # create the gradio interface
37
+ iface = gr.Interface(
38
+ fn=classify_pet,
39
+ inputs=gr.Image(type="filepath"),
40
+ outputs=gr.JSON(),
41
+ title="Pet Classification Comparison",
42
+ description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
43
+ examples=example_images
44
+ )
45
+
46
+ iface.launch()