Fadri commited on
Commit
c2fdcf4
·
verified ·
1 Parent(s): ae2d93e

Delete app2.py

Browse files
Files changed (1) hide show
  1. app2.py +0 -36
app2.py DELETED
@@ -1,36 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- # Load models
5
- vit_classifier = pipeline("image-classification", model="LukeXOTWOD/vit-base-oxford-iiit-pets")
6
- clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
7
-
8
- labels_oxford_pets = [
9
- 'Siamese', 'Birman', 'shiba inu', 'staffordshire bull terrier', 'basset hound', 'Bombay', 'japanese chin',
10
- 'chihuahua', 'german shorthaired', 'pomeranian', 'beagle', 'english cocker spaniel', 'american pit bull terrier',
11
- 'Ragdoll', 'Persian', 'Egyptian Mau', 'miniature pinscher', 'Sphynx', 'Maine Coon', 'keeshond', 'yorkshire terrier',
12
- 'havanese', 'leonberger', 'wheaten terrier', 'american bulldog', 'english setter', 'boxer', 'newfoundland', 'Bengal',
13
- 'samoyed', 'British Shorthair', 'great pyrenees', 'Abyssinian', 'pug', 'saint bernard', 'Russian Blue', 'scottish terrier'
14
- ]
15
-
16
- def classify_pet(image):
17
- vit_results = vit_classifier(image)
18
- vit_output = {result['label']: result['score'] for result in vit_results}
19
-
20
- clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
21
- clip_output = {result['label']: result['score'] for result in clip_results}
22
-
23
- return {
24
- "ViT Transfer Learning Model": vit_output,
25
- "CLIP Zero-Shot Model": clip_output
26
- }
27
-
28
- iface = gr.Interface(
29
- fn=classify_pet,
30
- inputs=gr.Image(type="filepath"),
31
- outputs=gr.JSON(),
32
- title="Pet Classification Comparison",
33
- description="Upload an image of a pet, and compare predictions from a trained ViT model and a zero-shot CLIP model."
34
- )
35
-
36
- iface.launch()