| import gradio as gr |
| from tensorflow.keras.models import load_model |
| from tensorflow.keras.preprocessing.image import img_to_array |
| from PIL import Image |
| import numpy as np |
| import requests |
| |
| |
| model = load_model("model.h5") |
| |
| |
| HF_TOKEN = "hf_your_token_here" |
| LLM_API = |
| "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-In |
| struct-v0.1" |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} |
| |
| |
| def classify_and_explain(image): |
| image = image.resize((225, 225)) |
| img_array = img_to_array(image) / 255.0 |
| img_array = np.expand_dims(img_array, axis=0) |
| |
| prediction = model.predict(img_array) |
| predicted_class = int(np.argmax(prediction, axis=1)[0]) |
| class_label = f"Class_{predicted_class}" |
| names if available |
| |
| |
| prompt = f"Explain the plant disease {class_label} and how to |
| treat it." |
| response = requests.post(LLM_API, headers=headers, |
| json={"inputs": prompt}) |
| llm_text = response.json()[0]['generated_text'] if |
| isinstance(response.json(), list) else response.json() |
| |
| return class_label, llm_text |
| |
| |
| interface = gr.Interface( |
| fn=classify_and_explain, |
| inputs=gr.Image(type="pil"), |
| outputs=["text", "text"], |
| title="Plant Disease Predictor & Explainer", |
| description="Upload a leaf image to detect plant disease and get |
| treatment advice using LLM" |
| ) |
| interface.launch() |