File size: 14,286 Bytes
5540d77
01799e3
29a3fa4
 
01799e3
9678781
ee1f474
01799e3
 
 
 
 
 
9678781
 
 
 
 
 
 
 
01799e3
 
 
 
 
 
 
 
 
 
 
 
9678781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01799e3
 
 
 
 
5540d77
01799e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9678781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01799e3
 
 
 
 
 
 
9678781
01799e3
 
 
 
 
 
 
 
9678781
 
 
 
01799e3
 
9678781
01799e3
ee1f474
 
 
 
 
 
 
 
9678781
ee1f474
 
 
 
9678781
ee1f474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9678781
 
01799e3
 
 
9678781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01799e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import gradio as gr
import requests
import json

# Suppress TensorFlow warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
device = "cuda" if tf.test.is_gpu_available() else "cpu"
print(f"Running on: {device.upper()}")

# Groq API key for AI assistant
GROQ_API_KEY = "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"
GROQ_MODEL = "llama3-70b-8192"  # Using Llama 3 70B model

# Fallback to Hugging Face token if Groq fails
HF_API_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
print(f"API tokens available: Groq=Yes, HF={'Yes' if HF_API_TOKEN else 'No'}")

# Load the trained tomato disease detection model
model = tf.keras.models.load_model("Tomato_Leaf_Disease_Model.h5")

# Disease categories
class_labels = [
    "Tomato Bacterial Spot",
    "Tomato Early Blight",
    "Tomato Late Blight",
    "Tomato Mosaic Virus",
    "Tomato Yellow Leaf Curl Virus"
]

# Disease information database (fallback if API fails)
disease_info = {
    "Tomato Bacterial Spot": {
        "description": "A bacterial disease that causes small, dark spots on leaves, stems, and fruits.",
        "causes": "Caused by Xanthomonas bacteria, spread by water splash, contaminated tools, and seeds.",
        "recommendations": [
            "Remove and destroy infected plants",
            "Rotate crops with non-solanaceous plants",
            "Use copper-based fungicides",
            "Avoid overhead irrigation"
        ]
    },
    "Tomato Early Blight": {
        "description": "A fungal disease that causes dark spots with concentric rings on lower leaves first.",
        "causes": "Caused by Alternaria solani fungus, favored by warm, humid conditions.",
        "recommendations": [
            "Remove infected leaves promptly",
            "Improve air circulation around plants",
            "Apply fungicides preventatively",
            "Mulch around plants to prevent soil splash"
        ]
    },
    "Tomato Late Blight": {
        "description": "A devastating fungal disease that causes dark, water-soaked lesions on leaves and fruits.",
        "causes": "Caused by Phytophthora infestans, favored by cool, wet conditions.",
        "recommendations": [
            "Remove and destroy infected plants immediately",
            "Apply fungicides preventatively in humid conditions",
            "Improve drainage and air circulation",
            "Plant resistant varieties when available"
        ]
    },
    "Tomato Mosaic Virus": {
        "description": "A viral disease that causes mottled green/yellow patterns on leaves and stunted growth.",
        "causes": "Caused by tobacco mosaic virus (TMV), spread by handling, tools, and sometimes seeds.",
        "recommendations": [
            "Remove and destroy infected plants",
            "Wash hands and tools after handling infected plants",
            "Control insect vectors like aphids",
            "Plant resistant varieties"
        ]
    },
    "Tomato Yellow Leaf Curl Virus": {
        "description": "A viral disease transmitted by whiteflies that causes yellowing and curling of leaves.",
        "causes": "Caused by a begomovirus, transmitted primarily by whiteflies.",
        "recommendations": [
            "Use whitefly control measures",
            "Remove and destroy infected plants",
            "Use reflective mulches to repel whiteflies",
            "Plant resistant varieties"
        ]
    }
}

# Image preprocessing function
def preprocess_image(img):
    img = img.resize((224, 224))  # Resize for model input
    img = image.img_to_array(img) / 255.0  # Normalize
    return np.expand_dims(img, axis=0)  # Add batch dimension

# Temperature Scaling: Adjusts predictions using a temperature parameter.
def apply_temperature_scaling(prediction, temperature):
    # Avoid log(0) by adding a small epsilon
    eps = 1e-8
    scaled_logits = np.log(np.maximum(prediction, eps)) / temperature
    exp_logits = np.exp(scaled_logits)
    scaled_probs = exp_logits / np.sum(exp_logits)
    return scaled_probs

# Min-Max Normalization: Scales the raw confidence based on provided min and max values.
def apply_min_max_scaling(confidence, min_conf, max_conf):
    norm = (confidence - min_conf) / (max_conf - min_conf) * 100
    norm = np.clip(norm, 0, 100)
    return norm

# Call Groq API for AI assistant
def call_groq_api(prompt):
    """Call Groq API for detailed disease analysis and advice"""
    headers = {
        "Authorization": f"Bearer {GROQ_API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": GROQ_MODEL,
        "messages": [
            {"role": "system", "content": "You are an expert agricultural advisor specializing in tomato farming and plant diseases."},
            {"role": "user", "content": prompt}
        ],
        "max_tokens": 800,
        "temperature": 0.7
    }

    try:
        response = requests.post(
            "https://api.groq.com/openai/v1/chat/completions",
            headers=headers,
            json=payload,
            timeout=30
        )

        if response.status_code == 200:
            result = response.json()
            if "choices" in result and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]

        print(f"Groq API error: {response.status_code} - {response.text}")
        return None

    except Exception as e:
        print(f"Error with Groq API: {str(e)}")
        return None

# Fallback to Hugging Face if Groq fails
def call_hf_model(prompt, model_id="mistralai/Mistral-7B-Instruct-v0.2"):
    """Call an AI model on Hugging Face for detailed disease analysis."""
    if not HF_API_TOKEN:
        return None

    headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}

    # Format prompt for instruction-tuned models
    formatted_prompt = f"""<s>[INST] {prompt} [/INST]"""

    payload = {
        "inputs": formatted_prompt,
        "parameters": {
            "max_new_tokens": 500,
            "temperature": 0.7,
            "top_p": 0.95,
            "do_sample": True
        }
    }

    url = f"https://api-inference.huggingface.co/models/{model_id}"

    try:
        response = requests.post(url, headers=headers, json=payload, timeout=30)

        if response.status_code == 200:
            result = response.json()
            if isinstance(result, list) and len(result) > 0:
                if "generated_text" in result[0]:
                    # Extract just the response part (after the prompt)
                    generated_text = result[0]["generated_text"]
                    # Remove the prompt from the response
                    response_text = generated_text.split("[/INST]")[-1].strip()
                    return response_text

        return None

    except Exception as e:
        print(f"Exception when calling HF model: {str(e)}")
        return None

# Combined AI model call with fallback
def call_ai_model(prompt):
    """Call AI models with fallback mechanisms"""
    # Try Groq first
    response = call_groq_api(prompt)
    if response:
        return response

    # If Groq fails, try Hugging Face
    response = call_hf_model(prompt)
    if response:
        return response

    # If both fail, return fallback message
    return "Sorry, I'm having trouble connecting to the AI service. Using fallback information instead."

# Generate AI response for disease analysis
def generate_ai_response(disease_name, confidence):
    """Generate a detailed AI response about the detected disease."""
    # Get fallback information in case AI call fails
    info = disease_info.get(disease_name, {
        "description": "Information not available for this disease.",
        "causes": "Unknown causes.",
        "recommendations": ["Consult with a local agricultural extension service."]
    })

    # Create prompt for AI model
    prompt = (
        f"You are an agricultural expert advisor. A tomato plant disease has been detected: {disease_name} "
        f"with {confidence:.2f}% confidence. "
        f"Provide a detailed analysis including: "
        f"1) A brief description of the disease "
        f"2) What causes it and how it spreads "
        f"3) The impact on tomato plants and yield "
        f"4) Detailed treatment options (both organic and chemical) "
        f"5) Prevention strategies for future crops "
        f"Format your response in clear sections with bullet points where appropriate."
    )

    # Call AI model with fallback mechanisms
    ai_response = call_ai_model(prompt)

    # If AI response contains error message, use fallback information
    if "Sorry, I'm having trouble" in ai_response:
        ai_response = f"""
# Disease: {disease_name}

## Description
{info['description']}

## Causes
{info.get('causes', 'Information not available.')}

## Recommended Treatment
{chr(10).join(f"- {rec}" for rec in info['recommendations'])}

*Note: This is fallback information. For more detailed advice, please try again later when the AI service is available.*
"""

    return ai_response

# Chat with agricultural expert
def chat_with_expert(message, chat_history):
    """Handle chat interactions with farmers about agricultural topics."""
    if not message.strip():
        return "", chat_history

    # Prepare context from chat history - use last 3 exchanges for context to avoid token limits
    context = "\n".join([f"Farmer: {q}\nExpert: {a}" for q, a in chat_history[-3:]])

    prompt = (
        f"You are an expert agricultural advisor specializing in tomato farming and plant diseases. "
        f"You provide helpful, accurate, and practical advice to farmers. "
        f"Always be respectful and considerate of farmers' knowledge while providing expert guidance. "
        f"If you're unsure about something, acknowledge it and provide the best information you can. "
        f"Previous conversation:\n{context}\n\n"
        f"Farmer's new question: {message}\n\n"
        f"Provide a helpful, informative response about farming, focusing on tomatoes if relevant."
    )

    # Call AI model with fallback mechanisms
    response = call_ai_model(prompt)

    # If AI response contains error message, use fallback response
    if "Sorry, I'm having trouble" in response:
        response = "I apologize, but I'm having trouble connecting to my knowledge base at the moment. Please try again later, or ask a different question about tomato farming or plant diseases."

    chat_history.append((message, response))
    return "", chat_history

# Main detection function with adjustable confidence scaling
def detect_disease_scaled(img, scaling_method, temperature, min_conf, max_conf):
    processed_img = preprocess_image(img)
    prediction = model.predict(processed_img)[0]  # Get prediction for single image
    raw_confidence = np.max(prediction) * 100
    class_idx = np.argmax(prediction)
    disease_name = class_labels[class_idx]

    if scaling_method == "Temperature Scaling":
        scaled_probs = apply_temperature_scaling(prediction, temperature)
        adjusted_confidence = np.max(scaled_probs) * 100
    elif scaling_method == "Min-Max Normalization":
        adjusted_confidence = apply_min_max_scaling(raw_confidence, min_conf, max_conf)
    else:
        adjusted_confidence = raw_confidence

    # Generate AI response
    ai_response = generate_ai_response(disease_name, adjusted_confidence)

    # Return results
    result = f"{disease_name} (Confidence: {adjusted_confidence:.2f}%)"
    raw_text = f"Raw Confidence: {raw_confidence:.2f}%"
    return result, raw_text, ai_response

# Simplified Gradio UI for better compatibility
with gr.Blocks() as demo:
    gr.Markdown("# 🍅 EvSentry8: Tomato Disease Detection with AI Assistant")

    with gr.Tab("Disease Detection"):
        with gr.Row():
            with gr.Column():
                image_input = gr.Image(type="pil", label="Upload a Tomato Leaf Image")

                scaling_method = gr.Radio(
                    ["Temperature Scaling", "Min-Max Normalization"],
                    label="Confidence Scaling Method",
                    value="Temperature Scaling"
                )
                temperature_slider = gr.Slider(0.5, 2.0, step=0.1, label="Temperature", value=1.0)
                min_conf_slider = gr.Slider(0, 100, step=1, label="Min Confidence", value=20)
                max_conf_slider = gr.Slider(0, 100, step=1, label="Max Confidence", value=90)

                detect_button = gr.Button("Detect Disease")

            with gr.Column():
                disease_output = gr.Textbox(label="Detected Disease & Adjusted Confidence")
                raw_confidence_output = gr.Textbox(label="Raw Confidence")
                ai_response_output = gr.Markdown(label="AI Assistant's Analysis & Recommendations")

    with gr.Tab("Chat with Expert"):
        gr.Markdown("# 💬 Chat with Agricultural Expert")
        gr.Markdown("Ask any questions about tomato farming, diseases, or agricultural practices.")

        chatbot = gr.Chatbot(height=400)

        with gr.Row():
            chat_input = gr.Textbox(
                label="Your Question",
                placeholder="Ask about tomato farming, diseases, or agricultural practices...",
                lines=2
            )
            chat_button = gr.Button("Send")

        gr.Markdown("""
        ### Example Questions:
        - How do I identify tomato bacterial spot?
        - What's the best way to prevent late blight?
        - How often should I water my tomato plants?
        - What are the signs of nutrient deficiency in tomatoes?
        """)

    # Set up event handlers
    detect_button.click(
        detect_disease_scaled,
        inputs=[image_input, scaling_method, temperature_slider, min_conf_slider, max_conf_slider],
        outputs=[disease_output, raw_confidence_output, ai_response_output]
    )

    # Chat functionality
    chat_button.click(
        fn=chat_with_expert,
        inputs=[chat_input, chatbot],
        outputs=[chat_input, chatbot]
    )

    # Also allow pressing Enter to send chat
    chat_input.submit(
        fn=chat_with_expert,
        inputs=[chat_input, chatbot],
        outputs=[chat_input, chatbot]
    )

demo.launch()