plant3 / app.py
vishwak1's picture
Update app.py
c0ea153 verified
import gradio as gr
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from PIL import Image
import numpy as np
import requests
# Load your trained .h5 model
model = load_model("model.h5")
# Hugging Face LLM (e.g., Mistral)
HF_TOKEN = "hf_your_token_here"
LLM_API =
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-In
struct-v0.1"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Prediction and explanation function
def classify_and_explain(image):
image = image.resize((225, 225))
img_array = img_to_array(image) / 255.0
img_array = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_array)
predicted_class = int(np.argmax(prediction, axis=1)[0])
class_label = f"Class_{predicted_class}" # Or use actual class
names if available
# Get explanation from LLM
prompt = f"Explain the plant disease {class_label} and how to
treat it."
response = requests.post(LLM_API, headers=headers,
json={"inputs": prompt})
llm_text = response.json()[0]['generated_text'] if
isinstance(response.json(), list) else response.json()
return class_label, llm_text
# Gradio UI
interface = gr.Interface(
fn=classify_and_explain,
inputs=gr.Image(type="pil"),
outputs=["text", "text"],
title="Plant Disease Predictor & Explainer",
description="Upload a leaf image to detect plant disease and get
treatment advice using LLM"
)
interface.launch()