|
|
import os |
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
from tensorflow.keras.preprocessing import image |
|
|
import gradio as gr |
|
|
import requests |
|
|
import json |
|
|
|
|
|
|
|
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" |
|
|
device = "cuda" if tf.test.is_gpu_available() else "cpu" |
|
|
print(f"Running on: {device.upper()}") |
|
|
|
|
|
|
|
|
GROQ_API_KEY = "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" |
|
|
GROQ_MODEL = "llama3-70b-8192" |
|
|
|
|
|
|
|
|
HF_API_TOKEN = os.getenv("HUGGINGFACE_TOKEN") |
|
|
print(f"API tokens available: Groq=Yes, HF={'Yes' if HF_API_TOKEN else 'No'}") |
|
|
|
|
|
|
|
|
model = tf.keras.models.load_model("Tomato_Leaf_Disease_Model.h5") |
|
|
|
|
|
|
|
|
class_labels = [ |
|
|
"Tomato Bacterial Spot", |
|
|
"Tomato Early Blight", |
|
|
"Tomato Late Blight", |
|
|
"Tomato Mosaic Virus", |
|
|
"Tomato Yellow Leaf Curl Virus" |
|
|
] |
|
|
|
|
|
|
|
|
disease_info = { |
|
|
"Tomato Bacterial Spot": { |
|
|
"description": "A bacterial disease that causes small, dark spots on leaves, stems, and fruits.", |
|
|
"causes": "Caused by Xanthomonas bacteria, spread by water splash, contaminated tools, and seeds.", |
|
|
"recommendations": [ |
|
|
"Remove and destroy infected plants", |
|
|
"Rotate crops with non-solanaceous plants", |
|
|
"Use copper-based fungicides", |
|
|
"Avoid overhead irrigation" |
|
|
] |
|
|
}, |
|
|
"Tomato Early Blight": { |
|
|
"description": "A fungal disease that causes dark spots with concentric rings on lower leaves first.", |
|
|
"causes": "Caused by Alternaria solani fungus, favored by warm, humid conditions.", |
|
|
"recommendations": [ |
|
|
"Remove infected leaves promptly", |
|
|
"Improve air circulation around plants", |
|
|
"Apply fungicides preventatively", |
|
|
"Mulch around plants to prevent soil splash" |
|
|
] |
|
|
}, |
|
|
"Tomato Late Blight": { |
|
|
"description": "A devastating fungal disease that causes dark, water-soaked lesions on leaves and fruits.", |
|
|
"causes": "Caused by Phytophthora infestans, favored by cool, wet conditions.", |
|
|
"recommendations": [ |
|
|
"Remove and destroy infected plants immediately", |
|
|
"Apply fungicides preventatively in humid conditions", |
|
|
"Improve drainage and air circulation", |
|
|
"Plant resistant varieties when available" |
|
|
] |
|
|
}, |
|
|
"Tomato Mosaic Virus": { |
|
|
"description": "A viral disease that causes mottled green/yellow patterns on leaves and stunted growth.", |
|
|
"causes": "Caused by tobacco mosaic virus (TMV), spread by handling, tools, and sometimes seeds.", |
|
|
"recommendations": [ |
|
|
"Remove and destroy infected plants", |
|
|
"Wash hands and tools after handling infected plants", |
|
|
"Control insect vectors like aphids", |
|
|
"Plant resistant varieties" |
|
|
] |
|
|
}, |
|
|
"Tomato Yellow Leaf Curl Virus": { |
|
|
"description": "A viral disease transmitted by whiteflies that causes yellowing and curling of leaves.", |
|
|
"causes": "Caused by a begomovirus, transmitted primarily by whiteflies.", |
|
|
"recommendations": [ |
|
|
"Use whitefly control measures", |
|
|
"Remove and destroy infected plants", |
|
|
"Use reflective mulches to repel whiteflies", |
|
|
"Plant resistant varieties" |
|
|
] |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
def preprocess_image(img): |
|
|
img = img.resize((224, 224)) |
|
|
img = image.img_to_array(img) / 255.0 |
|
|
return np.expand_dims(img, axis=0) |
|
|
|
|
|
|
|
|
def apply_temperature_scaling(prediction, temperature): |
|
|
|
|
|
eps = 1e-8 |
|
|
scaled_logits = np.log(np.maximum(prediction, eps)) / temperature |
|
|
exp_logits = np.exp(scaled_logits) |
|
|
scaled_probs = exp_logits / np.sum(exp_logits) |
|
|
return scaled_probs |
|
|
|
|
|
|
|
|
def apply_min_max_scaling(confidence, min_conf, max_conf): |
|
|
norm = (confidence - min_conf) / (max_conf - min_conf) * 100 |
|
|
norm = np.clip(norm, 0, 100) |
|
|
return norm |
|
|
|
|
|
|
|
|
def call_groq_api(prompt): |
|
|
"""Call Groq API for detailed disease analysis and advice""" |
|
|
headers = { |
|
|
"Authorization": f"Bearer {GROQ_API_KEY}", |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
|
|
|
payload = { |
|
|
"model": GROQ_MODEL, |
|
|
"messages": [ |
|
|
{"role": "system", "content": "You are an expert agricultural advisor specializing in tomato farming and plant diseases."}, |
|
|
{"role": "user", "content": prompt} |
|
|
], |
|
|
"max_tokens": 800, |
|
|
"temperature": 0.7 |
|
|
} |
|
|
|
|
|
try: |
|
|
response = requests.post( |
|
|
"https://api.groq.com/openai/v1/chat/completions", |
|
|
headers=headers, |
|
|
json=payload, |
|
|
timeout=30 |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
if "choices" in result and len(result["choices"]) > 0: |
|
|
return result["choices"][0]["message"]["content"] |
|
|
|
|
|
print(f"Groq API error: {response.status_code} - {response.text}") |
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error with Groq API: {str(e)}") |
|
|
return None |
|
|
|
|
|
|
|
|
def call_hf_model(prompt, model_id="mistralai/Mistral-7B-Instruct-v0.2"): |
|
|
"""Call an AI model on Hugging Face for detailed disease analysis.""" |
|
|
if not HF_API_TOKEN: |
|
|
return None |
|
|
|
|
|
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} |
|
|
|
|
|
|
|
|
formatted_prompt = f"""<s>[INST] {prompt} [/INST]""" |
|
|
|
|
|
payload = { |
|
|
"inputs": formatted_prompt, |
|
|
"parameters": { |
|
|
"max_new_tokens": 500, |
|
|
"temperature": 0.7, |
|
|
"top_p": 0.95, |
|
|
"do_sample": True |
|
|
} |
|
|
} |
|
|
|
|
|
url = f"https://api-inference.huggingface.co/models/{model_id}" |
|
|
|
|
|
try: |
|
|
response = requests.post(url, headers=headers, json=payload, timeout=30) |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
if isinstance(result, list) and len(result) > 0: |
|
|
if "generated_text" in result[0]: |
|
|
|
|
|
generated_text = result[0]["generated_text"] |
|
|
|
|
|
response_text = generated_text.split("[/INST]")[-1].strip() |
|
|
return response_text |
|
|
|
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Exception when calling HF model: {str(e)}") |
|
|
return None |
|
|
|
|
|
|
|
|
def call_ai_model(prompt): |
|
|
"""Call AI models with fallback mechanisms""" |
|
|
|
|
|
response = call_groq_api(prompt) |
|
|
if response: |
|
|
return response |
|
|
|
|
|
|
|
|
response = call_hf_model(prompt) |
|
|
if response: |
|
|
return response |
|
|
|
|
|
|
|
|
return "Sorry, I'm having trouble connecting to the AI service. Using fallback information instead." |
|
|
|
|
|
|
|
|
def generate_ai_response(disease_name, confidence): |
|
|
"""Generate a detailed AI response about the detected disease.""" |
|
|
|
|
|
info = disease_info.get(disease_name, { |
|
|
"description": "Information not available for this disease.", |
|
|
"causes": "Unknown causes.", |
|
|
"recommendations": ["Consult with a local agricultural extension service."] |
|
|
}) |
|
|
|
|
|
|
|
|
prompt = ( |
|
|
f"You are an agricultural expert advisor. A tomato plant disease has been detected: {disease_name} " |
|
|
f"with {confidence:.2f}% confidence. " |
|
|
f"Provide a detailed analysis including: " |
|
|
f"1) A brief description of the disease " |
|
|
f"2) What causes it and how it spreads " |
|
|
f"3) The impact on tomato plants and yield " |
|
|
f"4) Detailed treatment options (both organic and chemical) " |
|
|
f"5) Prevention strategies for future crops " |
|
|
f"Format your response in clear sections with bullet points where appropriate." |
|
|
) |
|
|
|
|
|
|
|
|
ai_response = call_ai_model(prompt) |
|
|
|
|
|
|
|
|
if "Sorry, I'm having trouble" in ai_response: |
|
|
ai_response = f""" |
|
|
# Disease: {disease_name} |
|
|
|
|
|
## Description |
|
|
{info['description']} |
|
|
|
|
|
## Causes |
|
|
{info.get('causes', 'Information not available.')} |
|
|
|
|
|
## Recommended Treatment |
|
|
{chr(10).join(f"- {rec}" for rec in info['recommendations'])} |
|
|
|
|
|
*Note: This is fallback information. For more detailed advice, please try again later when the AI service is available.* |
|
|
""" |
|
|
|
|
|
return ai_response |
|
|
|
|
|
|
|
|
def chat_with_expert(message, chat_history): |
|
|
"""Handle chat interactions with farmers about agricultural topics.""" |
|
|
if not message.strip(): |
|
|
return "", chat_history |
|
|
|
|
|
|
|
|
context = "\n".join([f"Farmer: {q}\nExpert: {a}" for q, a in chat_history[-3:]]) |
|
|
|
|
|
prompt = ( |
|
|
f"You are an expert agricultural advisor specializing in tomato farming and plant diseases. " |
|
|
f"You provide helpful, accurate, and practical advice to farmers. " |
|
|
f"Always be respectful and considerate of farmers' knowledge while providing expert guidance. " |
|
|
f"If you're unsure about something, acknowledge it and provide the best information you can. " |
|
|
f"Previous conversation:\n{context}\n\n" |
|
|
f"Farmer's new question: {message}\n\n" |
|
|
f"Provide a helpful, informative response about farming, focusing on tomatoes if relevant." |
|
|
) |
|
|
|
|
|
|
|
|
response = call_ai_model(prompt) |
|
|
|
|
|
|
|
|
if "Sorry, I'm having trouble" in response: |
|
|
response = "I apologize, but I'm having trouble connecting to my knowledge base at the moment. Please try again later, or ask a different question about tomato farming or plant diseases." |
|
|
|
|
|
chat_history.append((message, response)) |
|
|
return "", chat_history |
|
|
|
|
|
|
|
|
def detect_disease_scaled(img, scaling_method, temperature, min_conf, max_conf): |
|
|
processed_img = preprocess_image(img) |
|
|
prediction = model.predict(processed_img)[0] |
|
|
raw_confidence = np.max(prediction) * 100 |
|
|
class_idx = np.argmax(prediction) |
|
|
disease_name = class_labels[class_idx] |
|
|
|
|
|
if scaling_method == "Temperature Scaling": |
|
|
scaled_probs = apply_temperature_scaling(prediction, temperature) |
|
|
adjusted_confidence = np.max(scaled_probs) * 100 |
|
|
elif scaling_method == "Min-Max Normalization": |
|
|
adjusted_confidence = apply_min_max_scaling(raw_confidence, min_conf, max_conf) |
|
|
else: |
|
|
adjusted_confidence = raw_confidence |
|
|
|
|
|
|
|
|
ai_response = generate_ai_response(disease_name, adjusted_confidence) |
|
|
|
|
|
|
|
|
result = f"{disease_name} (Confidence: {adjusted_confidence:.2f}%)" |
|
|
raw_text = f"Raw Confidence: {raw_confidence:.2f}%" |
|
|
return result, raw_text, ai_response |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# 🍅 EvSentry8: Tomato Disease Detection with AI Assistant") |
|
|
|
|
|
with gr.Tab("Disease Detection"): |
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
image_input = gr.Image(type="pil", label="Upload a Tomato Leaf Image") |
|
|
|
|
|
scaling_method = gr.Radio( |
|
|
["Temperature Scaling", "Min-Max Normalization"], |
|
|
label="Confidence Scaling Method", |
|
|
value="Temperature Scaling" |
|
|
) |
|
|
temperature_slider = gr.Slider(0.5, 2.0, step=0.1, label="Temperature", value=1.0) |
|
|
min_conf_slider = gr.Slider(0, 100, step=1, label="Min Confidence", value=20) |
|
|
max_conf_slider = gr.Slider(0, 100, step=1, label="Max Confidence", value=90) |
|
|
|
|
|
detect_button = gr.Button("Detect Disease") |
|
|
|
|
|
with gr.Column(): |
|
|
disease_output = gr.Textbox(label="Detected Disease & Adjusted Confidence") |
|
|
raw_confidence_output = gr.Textbox(label="Raw Confidence") |
|
|
ai_response_output = gr.Markdown(label="AI Assistant's Analysis & Recommendations") |
|
|
|
|
|
with gr.Tab("Chat with Expert"): |
|
|
gr.Markdown("# 💬 Chat with Agricultural Expert") |
|
|
gr.Markdown("Ask any questions about tomato farming, diseases, or agricultural practices.") |
|
|
|
|
|
chatbot = gr.Chatbot(height=400) |
|
|
|
|
|
with gr.Row(): |
|
|
chat_input = gr.Textbox( |
|
|
label="Your Question", |
|
|
placeholder="Ask about tomato farming, diseases, or agricultural practices...", |
|
|
lines=2 |
|
|
) |
|
|
chat_button = gr.Button("Send") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### Example Questions: |
|
|
- How do I identify tomato bacterial spot? |
|
|
- What's the best way to prevent late blight? |
|
|
- How often should I water my tomato plants? |
|
|
- What are the signs of nutrient deficiency in tomatoes? |
|
|
""") |
|
|
|
|
|
|
|
|
detect_button.click( |
|
|
detect_disease_scaled, |
|
|
inputs=[image_input, scaling_method, temperature_slider, min_conf_slider, max_conf_slider], |
|
|
outputs=[disease_output, raw_confidence_output, ai_response_output] |
|
|
) |
|
|
|
|
|
|
|
|
chat_button.click( |
|
|
fn=chat_with_expert, |
|
|
inputs=[chat_input, chatbot], |
|
|
outputs=[chat_input, chatbot] |
|
|
) |
|
|
|
|
|
|
|
|
chat_input.submit( |
|
|
fn=chat_with_expert, |
|
|
inputs=[chat_input, chatbot], |
|
|
outputs=[chat_input, chatbot] |
|
|
) |
|
|
|
|
|
demo.launch() |
|
|
|