Update app.py
Browse files
app.py
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from transformers import pipeline
|
| 3 |
-
|
| 4 |
-
vit_classifier = pipeline("image-classification", model="ElioBaserga/fruits-and-vegetables-vit")
|
| 5 |
-
clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
|
| 6 |
-
|
| 7 |
-
labels_oxford_pets = ['apple', 'banana', 'beetroot', 'bell pepper', 'cabbage', 'capsicum', 'carrot', 'cauliflower', 'chilli pepper', 'corn', 'cucumber', 'eggplant', 'garlic', 'ginger', 'grapes', 'jalepeno', 'kiwi', 'lemon', 'lettuce', 'mango', 'onion', 'orange', 'paprika', 'pear', 'peas', 'pineapple', 'pomegranate', 'potato', 'raddish', 'soy beans', 'spinach', 'sweetcorn', 'sweetpotato', 'tomato', 'turnip', 'watermelon']
|
| 8 |
-
|
| 9 |
-
def classify_pet(image):
|
| 10 |
-
vit_results = vit_classifier(image)
|
| 11 |
-
vit_output = {result['label']: result['score'] for result in vit_results}
|
| 12 |
-
|
| 13 |
-
clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
|
| 14 |
-
clip_output = {result['label']: result['score'] for result in clip_results}
|
| 15 |
-
|
| 16 |
-
return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
|
| 17 |
-
|
| 18 |
-
example_images = [
|
| 19 |
-
["
|
| 20 |
-
["
|
| 21 |
-
["
|
| 22 |
-
["
|
| 23 |
-
["
|
| 24 |
-
]
|
| 25 |
-
|
| 26 |
-
iface = gr.Interface(
|
| 27 |
-
fn=classify_pet,
|
| 28 |
-
inputs=gr.Image(type="filepath"),
|
| 29 |
-
outputs=gr.JSON(),
|
| 30 |
-
title="Fruit and Vegetable Classification Comparison",
|
| 31 |
-
description="Upload an image of a fruit or vegetable, and compare results from a trained ViT model and a zero-shot CLIP model.",
|
| 32 |
-
examples=example_images
|
| 33 |
-
)
|
| 34 |
-
|
| 35 |
iface.launch(share=True)
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
|
| 4 |
+
vit_classifier = pipeline("image-classification", model="ElioBaserga/fruits-and-vegetables-vit")
|
| 5 |
+
clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
|
| 6 |
+
|
| 7 |
+
labels_oxford_pets = ['apple', 'banana', 'beetroot', 'bell pepper', 'cabbage', 'capsicum', 'carrot', 'cauliflower', 'chilli pepper', 'corn', 'cucumber', 'eggplant', 'garlic', 'ginger', 'grapes', 'jalepeno', 'kiwi', 'lemon', 'lettuce', 'mango', 'onion', 'orange', 'paprika', 'pear', 'peas', 'pineapple', 'pomegranate', 'potato', 'raddish', 'soy beans', 'spinach', 'sweetcorn', 'sweetpotato', 'tomato', 'turnip', 'watermelon']
|
| 8 |
+
|
| 9 |
+
def classify_pet(image):
|
| 10 |
+
vit_results = vit_classifier(image)
|
| 11 |
+
vit_output = {result['label']: result['score'] for result in vit_results}
|
| 12 |
+
|
| 13 |
+
clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
|
| 14 |
+
clip_output = {result['label']: result['score'] for result in clip_results}
|
| 15 |
+
|
| 16 |
+
return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
|
| 17 |
+
|
| 18 |
+
example_images = [
|
| 19 |
+
["examples/Image_1.jpg"],
|
| 20 |
+
["examples/Image_3.jpg"],
|
| 21 |
+
["examples/Image_6.jpg"],
|
| 22 |
+
["examples/Image_9.jpg"],
|
| 23 |
+
["examples/Image_5.jpg"]
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
iface = gr.Interface(
|
| 27 |
+
fn=classify_pet,
|
| 28 |
+
inputs=gr.Image(type="filepath"),
|
| 29 |
+
outputs=gr.JSON(),
|
| 30 |
+
title="Fruit and Vegetable Classification Comparison",
|
| 31 |
+
description="Upload an image of a fruit or vegetable, and compare results from a trained ViT model and a zero-shot CLIP model.",
|
| 32 |
+
examples=example_images
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
iface.launch(share=True)
|