|
|
import gradio as gr
|
|
|
from transformers import pipeline
|
|
|
|
|
|
|
|
|
vit_classifier = pipeline("image-classification", model="LindiSimon/vit-beans-model")
|
|
|
clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
|
|
|
|
|
|
labels_beans = ["angular_leaf_spot", "bean_rust", "healthy"]
|
|
|
|
|
|
def classify_bean(image):
|
|
|
vit_results = vit_classifier(image)
|
|
|
vit_output = {result['label']: result['score'] for result in vit_results}
|
|
|
|
|
|
clip_results = clip_detector(image, candidate_labels=labels_beans)
|
|
|
clip_output = {result['label']: result['score'] for result in clip_results}
|
|
|
|
|
|
return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
|
|
|
|
|
|
examples = [["example_input.png"]]
|
|
|
|
|
|
iface = gr.Interface(
|
|
|
fn=classify_bean,
|
|
|
inputs=gr.Image(type="filepath"),
|
|
|
outputs=gr.JSON(),
|
|
|
title="Bean Disease Classification",
|
|
|
description="Vergleich eines trainierten ViT-Modells mit CLIP für Bean-Disease-Klassifikation.",
|
|
|
examples=examples
|
|
|
)
|
|
|
|
|
|
iface.launch()
|
|
|
|