Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
"""
|
| 2 |
Advanced Agriculture Intelligence System with Professional UI
|
| 3 |
-
Integrates Gemini
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
import gradio as gr
|
|
@@ -25,16 +26,20 @@ class AgricultureIntelligenceSystem:
|
|
| 25 |
def __init__(self):
|
| 26 |
print("Initializing Agriculture Intelligence System...")
|
| 27 |
|
| 28 |
-
|
| 29 |
-
self.
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
self.groq_api_key = os.environ.get("GROQ_API_KEY", "")
|
| 32 |
self.groq_api_url = "https://api.groq.com/openai/v1/chat/completions"
|
| 33 |
|
| 34 |
self.conversation_history = []
|
| 35 |
self.image_context = None
|
| 36 |
self.current_analysis = None
|
| 37 |
|
|
|
|
| 38 |
self.supported_languages = {
|
| 39 |
"🇬🇧 English": "en",
|
| 40 |
"🇮🇳 हिंदी (Hindi)": "hi",
|
|
@@ -52,10 +57,14 @@ class AgricultureIntelligenceSystem:
|
|
| 52 |
|
| 53 |
def encode_image_to_base64(self, image: Image.Image) -> str:
|
| 54 |
buffered = io.BytesIO()
|
| 55 |
-
image.save(buffered, format="JPEG")
|
| 56 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
| 57 |
|
| 58 |
def analyze_with_gemini(self, image: Image.Image) -> Dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
if not self.google_api_key:
|
| 60 |
return {"error": "Google API Key not configured"}
|
| 61 |
|
|
@@ -105,8 +114,8 @@ class AgricultureIntelligenceSystem:
|
|
| 105 |
}
|
| 106 |
}
|
| 107 |
|
| 108 |
-
Be specific, practical, and provide actionable recommendations."""
|
| 109 |
-
|
| 110 |
request_body = {
|
| 111 |
"contents": [{
|
| 112 |
"parts": [
|
|
@@ -115,41 +124,89 @@ Be specific, practical, and provide actionable recommendations."""
|
|
| 115 |
]
|
| 116 |
}],
|
| 117 |
"generationConfig": {
|
| 118 |
-
"temperature": 0.
|
| 119 |
-
"topK":
|
| 120 |
-
"topP":
|
| 121 |
-
"maxOutputTokens":
|
| 122 |
}
|
| 123 |
}
|
| 124 |
|
|
|
|
| 125 |
response = requests.post(self.gemini_api_url, json=request_body, timeout=60)
|
| 126 |
|
|
|
|
| 127 |
if response.status_code != 200:
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
result = response.json()
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
-
|
|
|
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
except Exception as e:
|
| 154 |
return {"error": f"Analysis failed: {str(e)}"}
|
| 155 |
|
|
@@ -161,16 +218,17 @@ Be specific, practical, and provide actionable recommendations."""
|
|
| 161 |
return [
|
| 162 |
"What type of plants can you analyze?",
|
| 163 |
"How accurate is the AI diagnosis?",
|
| 164 |
-
"Can you explain disease prevention methods?"
|
|
|
|
| 165 |
]
|
| 166 |
|
| 167 |
health = analysis.get('health_assessment', {})
|
| 168 |
disease_name = health.get('disease_name')
|
| 169 |
-
severity = health.get('severity', '').lower()
|
| 170 |
treatment = analysis.get('treatment', {})
|
| 171 |
|
| 172 |
# Disease-specific questions
|
| 173 |
-
if disease_name and disease_name
|
| 174 |
questions.extend([
|
| 175 |
f"What causes {disease_name}?",
|
| 176 |
f"How do I treat {disease_name} effectively?",
|
|
@@ -200,17 +258,34 @@ Be specific, practical, and provide actionable recommendations."""
|
|
| 200 |
"What preventive measures should I take?"
|
| 201 |
])
|
| 202 |
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
|
| 205 |
-
def chat_with_llama(self, user_message: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
if not self.groq_api_key:
|
| 207 |
return "⚠️ Groq API key not configured. Chat functionality unavailable."
|
| 208 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
context = ""
|
| 210 |
if self.image_context:
|
| 211 |
plant = self.image_context.get('plant_identification', {})
|
| 212 |
health = self.image_context.get('health_assessment', {})
|
| 213 |
-
|
| 214 |
context = f"""
|
| 215 |
Current Plant Analysis:
|
| 216 |
- Plant: {plant.get('common_name', 'Unknown')} ({plant.get('scientific_name', 'N/A')})
|
|
@@ -218,14 +293,14 @@ Current Plant Analysis:
|
|
| 218 |
- Health: {health.get('status', 'Unknown')}
|
| 219 |
- Disease: {health.get('disease_name', 'None')}
|
| 220 |
- Severity: {health.get('severity', 'Unknown')}
|
| 221 |
-
- Confidence: {
|
| 222 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
-
|
| 225 |
-
user_message_en = self.translate_text(user_message, "en", language)
|
| 226 |
-
else:
|
| 227 |
-
user_message_en = user_message
|
| 228 |
-
|
| 229 |
messages = [{
|
| 230 |
"role": "system",
|
| 231 |
"content": f"""You are an expert agricultural advisor specializing in plant diseases and crop management.
|
|
@@ -242,14 +317,13 @@ Provide practical, science-based advice. Be specific about:
|
|
| 242 |
Keep responses clear, actionable, and farmer-friendly. Use bullet points for lists."""
|
| 243 |
}]
|
| 244 |
|
|
|
|
| 245 |
if history:
|
| 246 |
-
for msg in history[-
|
| 247 |
if isinstance(msg, dict):
|
| 248 |
-
|
| 249 |
-
messages.append({"role": "user", "content": msg.get('content', '')})
|
| 250 |
-
elif msg.get('role') == 'assistant':
|
| 251 |
-
messages.append({"role": "assistant", "content": msg.get('content', '')})
|
| 252 |
|
|
|
|
| 253 |
messages.append({"role": "user", "content": user_message_en})
|
| 254 |
|
| 255 |
try:
|
|
@@ -261,61 +335,87 @@ Keep responses clear, actionable, and farmer-friendly. Use bullet points for lis
|
|
| 261 |
payload = {
|
| 262 |
"model": "llama-3.3-70b-versatile",
|
| 263 |
"messages": messages,
|
| 264 |
-
"temperature": 0.
|
| 265 |
-
"max_tokens":
|
| 266 |
-
"top_p": 0.
|
| 267 |
}
|
| 268 |
|
| 269 |
response = requests.post(self.groq_api_url, headers=headers, json=payload, timeout=30)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
| 273 |
bot_response = result['choices'][0]['message']['content']
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
except Exception as e:
|
| 283 |
return f"⚠️ Error: {str(e)}"
|
| 284 |
|
| 285 |
def translate_text(self, text: str, target_lang: str, source_lang: str = "en") -> str:
|
| 286 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
return text
|
| 288 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
try:
|
| 290 |
-
|
| 291 |
-
return text
|
| 292 |
-
|
| 293 |
-
lang_names = {v: k for k, v in self.supported_languages.items()}
|
| 294 |
-
target_lang_name = lang_names.get(target_lang, target_lang)
|
| 295 |
-
|
| 296 |
-
headers = {
|
| 297 |
-
"Authorization": f"Bearer {self.groq_api_key}",
|
| 298 |
-
"Content-Type": "application/json"
|
| 299 |
-
}
|
| 300 |
-
|
| 301 |
-
payload = {
|
| 302 |
-
"model": "llama-3.3-70b-versatile",
|
| 303 |
-
"messages": [
|
| 304 |
-
{
|
| 305 |
-
"role": "system",
|
| 306 |
-
"content": f"Translate to {target_lang_name}. Only provide translation."
|
| 307 |
-
},
|
| 308 |
-
{"role": "user", "content": text}
|
| 309 |
-
],
|
| 310 |
-
"temperature": 0.3,
|
| 311 |
-
"max_tokens": 2000
|
| 312 |
-
}
|
| 313 |
-
|
| 314 |
-
response = requests.post(self.groq_api_url, headers=headers, json=payload, timeout=30)
|
| 315 |
-
|
| 316 |
if response.status_code == 200:
|
| 317 |
-
|
| 318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
except:
|
| 320 |
return text
|
| 321 |
|
|
@@ -327,7 +427,7 @@ Keep responses clear, actionable, and farmer-friendly. Use bullet points for lis
|
|
| 327 |
def create_advanced_interface():
|
| 328 |
system = AgricultureIntelligenceSystem()
|
| 329 |
|
| 330 |
-
# Custom CSS for professional look
|
| 331 |
custom_css = """
|
| 332 |
.container {max-width: 1400px; margin: auto;}
|
| 333 |
.header {background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 2rem; border-radius: 10px; margin-bottom: 2rem;}
|
|
@@ -342,53 +442,45 @@ def create_advanced_interface():
|
|
| 342 |
|
| 343 |
def process_image(image):
|
| 344 |
if image is None:
|
| 345 |
-
return None, "⚠️ Please upload an image",
|
| 346 |
|
| 347 |
try:
|
| 348 |
img = Image.fromarray(image).convert('RGB') if isinstance(image, np.ndarray) else image.convert('RGB')
|
| 349 |
|
| 350 |
-
# Show loading state
|
| 351 |
-
loading_html = """
|
| 352 |
-
<div style='text-align: center; padding: 2rem;'>
|
| 353 |
-
<div style='font-size: 3rem;'>🔬</div>
|
| 354 |
-
|
| 355 |
-
<p style='color: #666;'>Please wait while AI examines your plant</p>
|
| 356 |
-
</div>
|
| 357 |
-
"""
|
| 358 |
-
|
| 359 |
analysis = system.analyze_with_gemini(img)
|
| 360 |
|
| 361 |
if "error" in analysis:
|
| 362 |
error_html = f"""
|
| 363 |
<div style='background: #fee; padding: 2rem; border-radius: 8px; border-left: 4px solid #f44;'>
|
| 364 |
<h3 style='color: #c33; margin: 0;'>❌ Analysis Error</h3>
|
| 365 |
-
<p style='margin: 0.5rem 0 0 0;'>{analysis
|
| 366 |
-
</div>
|
| 367 |
"""
|
| 368 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
|
| 370 |
system.image_context = analysis
|
| 371 |
system.current_analysis = analysis
|
| 372 |
|
| 373 |
-
# Generate beautiful analysis card
|
| 374 |
analysis_html = format_analysis_card(analysis)
|
| 375 |
-
|
| 376 |
-
# Generate suggested questions
|
| 377 |
suggestions = system.generate_suggested_questions(analysis)
|
| 378 |
|
| 379 |
return analysis, analysis_html, suggestions, gr.update(visible=True)
|
| 380 |
|
| 381 |
except Exception as e:
|
| 382 |
-
return
|
| 383 |
|
| 384 |
def format_analysis_card(analysis: Dict) -> str:
|
| 385 |
-
plant = analysis.get('plant_identification', {})
|
| 386 |
-
health = analysis.get('health_assessment', {})
|
| 387 |
-
symptoms = analysis.get('visual_symptoms', [])
|
| 388 |
-
treatment = analysis.get('treatment', {})
|
| 389 |
-
urgency = analysis.get('urgency', {})
|
| 390 |
|
| 391 |
-
status = health.get('status'
|
| 392 |
disease_name = health.get('disease_name', 'None')
|
| 393 |
|
| 394 |
# Determine card style
|
|
@@ -396,7 +488,7 @@ def create_advanced_interface():
|
|
| 396 |
card_class = 'healthy-card'
|
| 397 |
icon = '✅'
|
| 398 |
status_text = 'Plant is Healthy'
|
| 399 |
-
elif disease_name and disease_name
|
| 400 |
card_class = 'disease-card'
|
| 401 |
icon = '⚠️'
|
| 402 |
status_text = 'Disease Detected'
|
|
@@ -439,7 +531,7 @@ def create_advanced_interface():
|
|
| 439 |
html += "</ul></div>"
|
| 440 |
|
| 441 |
# Treatment
|
| 442 |
-
if treatment and (treatment.get('immediate_actions') or treatment.get('chemical_treatments')):
|
| 443 |
html += """
|
| 444 |
<div style='background: #fff; padding: 1.5rem; border-radius: 8px; margin: 1rem 0; border: 1px solid #e0e0e0;'>
|
| 445 |
<h3 style='margin: 0 0 1rem 0; color: #333;'>💊 Treatment Recommendations</h3>
|
|
@@ -485,12 +577,12 @@ def create_advanced_interface():
|
|
| 485 |
|
| 486 |
return html
|
| 487 |
|
| 488 |
-
def handle_suggestion_click(question, history,
|
| 489 |
if not question:
|
| 490 |
-
return history, ""
|
| 491 |
|
| 492 |
# Get bot response
|
| 493 |
-
bot_response = system.chat_with_llama(question,
|
| 494 |
|
| 495 |
# Update history
|
| 496 |
if history is None:
|
|
@@ -501,17 +593,16 @@ def create_advanced_interface():
|
|
| 501 |
|
| 502 |
return history, ""
|
| 503 |
|
| 504 |
-
def send_message(message, history,
|
| 505 |
-
if not message.strip():
|
| 506 |
-
return history, ""
|
| 507 |
-
|
| 508 |
-
return handle_suggestion_click(message, history, language, analysis)
|
| 509 |
|
| 510 |
def clear_all():
|
| 511 |
system.conversation_history = []
|
| 512 |
system.image_context = None
|
| 513 |
system.current_analysis = None
|
| 514 |
-
return None, None, "👋 Upload a plant image to begin AI-powered diagnosis", [], None, gr.update(visible=False)
|
| 515 |
|
| 516 |
# Create interface
|
| 517 |
with gr.Blocks(title="AgriBot Pro - AI Plant Doctor") as demo:
|
|
@@ -523,7 +614,6 @@ def create_advanced_interface():
|
|
| 523 |
gr.HTML("""
|
| 524 |
<div class='header'>
|
| 525 |
<h1>🌾 AgriBot Pro - AI Plant Doctor</h1>
|
| 526 |
-
|
| 527 |
</div>
|
| 528 |
""")
|
| 529 |
|
|
@@ -567,14 +657,16 @@ def create_advanced_interface():
|
|
| 567 |
)
|
| 568 |
|
| 569 |
# Suggested questions row
|
| 570 |
-
|
|
|
|
| 571 |
gr.Markdown("### 💡 Suggested Questions:")
|
| 572 |
|
| 573 |
suggestions_display = gr.HTML(visible=False)
|
| 574 |
|
| 575 |
# Create suggestion buttons dynamically
|
| 576 |
suggestion_buttons = []
|
| 577 |
-
|
|
|
|
| 578 |
for i in range(6):
|
| 579 |
btn = gr.Button("", visible=False, elem_classes="suggestion-btn")
|
| 580 |
suggestion_buttons.append(btn)
|
|
@@ -589,26 +681,29 @@ def create_advanced_interface():
|
|
| 589 |
send_btn = gr.Button("📤 Send", variant="primary")
|
| 590 |
clear_btn = gr.Button("🗑️ Clear All")
|
| 591 |
|
| 592 |
-
#
|
| 593 |
def update_suggestions(suggestions):
|
| 594 |
if not suggestions:
|
| 595 |
-
|
| 596 |
-
|
|
|
|
| 597 |
updates = []
|
| 598 |
for i, btn in enumerate(suggestion_buttons):
|
| 599 |
if i < len(suggestions):
|
| 600 |
updates.append(gr.update(visible=True, value=suggestions[i]))
|
| 601 |
else:
|
| 602 |
-
updates.append(gr.update(visible=False))
|
| 603 |
-
updates.append(gr.update(visible=True))
|
| 604 |
return updates
|
| 605 |
|
|
|
|
| 606 |
analyze_btn.click(
|
| 607 |
fn=process_image,
|
| 608 |
inputs=[image_input],
|
| 609 |
outputs=[analysis_state, analysis_display, gr.State(), suggestion_btns_row]
|
| 610 |
).then(
|
| 611 |
-
|
|
|
|
| 612 |
inputs=[analysis_state],
|
| 613 |
outputs=[gr.State()]
|
| 614 |
).then(
|
|
@@ -617,14 +712,15 @@ def create_advanced_interface():
|
|
| 617 |
outputs=suggestion_buttons + [suggestion_btns_row]
|
| 618 |
)
|
| 619 |
|
| 620 |
-
# Connect suggestion buttons
|
| 621 |
for btn in suggestion_buttons:
|
| 622 |
btn.click(
|
| 623 |
-
fn=
|
| 624 |
inputs=[btn, chatbot, language_selector, analysis_state],
|
| 625 |
outputs=[chatbot, msg_input]
|
| 626 |
)
|
| 627 |
|
|
|
|
| 628 |
msg_input.submit(
|
| 629 |
fn=send_message,
|
| 630 |
inputs=[msg_input, chatbot, language_selector, analysis_state],
|
|
@@ -664,7 +760,7 @@ if __name__ == "__main__":
|
|
| 664 |
print("⚠️ GOOGLE_API_KEY not found - Image analysis disabled")
|
| 665 |
print("Get free key: https://aistudio.google.com/app/apikey\n")
|
| 666 |
else:
|
| 667 |
-
print("✅ Gemini
|
| 668 |
|
| 669 |
if not groq_key:
|
| 670 |
print("⚠️ GROQ_API_KEY not found - Chat disabled")
|
|
@@ -677,11 +773,10 @@ if __name__ == "__main__":
|
|
| 677 |
print("🚀 Launching AgriBot Pro...\n")
|
| 678 |
print("="*70 + "\n")
|
| 679 |
|
| 680 |
-
# For Hugging Face Spaces
|
| 681 |
demo.launch(
|
| 682 |
server_name="0.0.0.0",
|
| 683 |
server_port=7860,
|
| 684 |
share=False,
|
| 685 |
show_error=True,
|
| 686 |
-
ssr_mode=False
|
| 687 |
-
)
|
|
|
|
| 1 |
"""
|
| 2 |
Advanced Agriculture Intelligence System with Professional UI
|
| 3 |
+
Integrates Gemini (Google) + Groq (Llama) + Multilingual Support
|
| 4 |
+
Corrected version with improved error handling and language mapping.
|
| 5 |
"""
|
| 6 |
|
| 7 |
import gradio as gr
|
|
|
|
| 26 |
def __init__(self):
|
| 27 |
print("Initializing Agriculture Intelligence System...")
|
| 28 |
|
| 29 |
+
# Load API keys from env
|
| 30 |
+
self.google_api_key = os.environ.get("GOOGLE_API_KEY", "").strip()
|
| 31 |
+
# Note: user earlier referenced Gemini 1.5 Pro — adjust endpoint if you have another model name.
|
| 32 |
+
# Keep key as query param for simplicity; you may prefer Authorization header in prod.
|
| 33 |
+
self.gemini_api_url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?key={self.google_api_key}"
|
| 34 |
|
| 35 |
+
self.groq_api_key = os.environ.get("GROQ_API_KEY", "").strip()
|
| 36 |
self.groq_api_url = "https://api.groq.com/openai/v1/chat/completions"
|
| 37 |
|
| 38 |
self.conversation_history = []
|
| 39 |
self.image_context = None
|
| 40 |
self.current_analysis = None
|
| 41 |
|
| 42 |
+
# Map UI labels to language codes
|
| 43 |
self.supported_languages = {
|
| 44 |
"🇬🇧 English": "en",
|
| 45 |
"🇮🇳 हिंदी (Hindi)": "hi",
|
|
|
|
| 57 |
|
| 58 |
def encode_image_to_base64(self, image: Image.Image) -> str:
|
| 59 |
buffered = io.BytesIO()
|
| 60 |
+
image.save(buffered, format="JPEG", quality=90)
|
| 61 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
| 62 |
|
| 63 |
def analyze_with_gemini(self, image: Image.Image) -> Dict:
|
| 64 |
+
"""
|
| 65 |
+
Send image + prompt to Gemini (Generative Language API). This function is robust to several
|
| 66 |
+
API output shapes and returns a parsed JSON analysis dict or a helpful error dict.
|
| 67 |
+
"""
|
| 68 |
if not self.google_api_key:
|
| 69 |
return {"error": "Google API Key not configured"}
|
| 70 |
|
|
|
|
| 114 |
}
|
| 115 |
}
|
| 116 |
|
| 117 |
+
Be specific, practical, and provide actionable recommendations in strict JSON only (no commentary)."""
|
| 118 |
+
|
| 119 |
request_body = {
|
| 120 |
"contents": [{
|
| 121 |
"parts": [
|
|
|
|
| 124 |
]
|
| 125 |
}],
|
| 126 |
"generationConfig": {
|
| 127 |
+
"temperature": 0.2,
|
| 128 |
+
"topK": 40,
|
| 129 |
+
"topP": 0.95,
|
| 130 |
+
"maxOutputTokens": 2048,
|
| 131 |
}
|
| 132 |
}
|
| 133 |
|
| 134 |
+
# Post with a 60s timeout
|
| 135 |
response = requests.post(self.gemini_api_url, json=request_body, timeout=60)
|
| 136 |
|
| 137 |
+
# If non-200, include response text in error for easier debugging
|
| 138 |
if response.status_code != 200:
|
| 139 |
+
body_text = ""
|
| 140 |
+
try:
|
| 141 |
+
body_text = response.text
|
| 142 |
+
except:
|
| 143 |
+
body_text = "<no body>"
|
| 144 |
+
return {"error": f"API Error ({response.status_code})", "raw": body_text}
|
| 145 |
|
| 146 |
result = response.json()
|
| 147 |
|
| 148 |
+
# Gemini / Generative Language responses have varied shapes.
|
| 149 |
+
# Try common paths: 'candidates', 'outputs', 'content' etc.
|
| 150 |
+
text_response = None
|
| 151 |
+
# Path 1: result['candidates'][0]['content']['parts'][0]['text']
|
| 152 |
+
try:
|
| 153 |
+
if isinstance(result, dict):
|
| 154 |
+
if 'candidates' in result and len(result['candidates']) > 0:
|
| 155 |
+
cand = result['candidates'][0]
|
| 156 |
+
# Many shapes inside candidate
|
| 157 |
+
if isinstance(cand, dict):
|
| 158 |
+
# often: cand['content']['parts'][0]['text']
|
| 159 |
+
if 'content' in cand and isinstance(cand['content'], dict):
|
| 160 |
+
parts = cand['content'].get('parts', [])
|
| 161 |
+
if parts and isinstance(parts[0], dict) and 'text' in parts[0]:
|
| 162 |
+
text_response = parts[0]['text']
|
| 163 |
+
# Path 2: 'output' or 'outputs'
|
| 164 |
+
if text_response is None:
|
| 165 |
+
# Sometimes the model returns in 'outputs'
|
| 166 |
+
outputs = result.get('outputs') or result.get('output')
|
| 167 |
+
if isinstance(outputs, list) and len(outputs) > 0:
|
| 168 |
+
first_out = outputs[0]
|
| 169 |
+
if isinstance(first_out, dict):
|
| 170 |
+
# check nested text
|
| 171 |
+
if 'text' in first_out:
|
| 172 |
+
text_response = first_out['text']
|
| 173 |
+
elif 'content' in first_out and isinstance(first_out['content'], dict):
|
| 174 |
+
# content may contain parts
|
| 175 |
+
parts = first_out['content'].get('parts', [])
|
| 176 |
+
if parts and isinstance(parts[0], dict) and 'text' in parts[0]:
|
| 177 |
+
text_response = parts[0]['text']
|
| 178 |
+
# Fallback: top-level string
|
| 179 |
+
if text_response is None:
|
| 180 |
+
# attempt to find any string value in JSON response heuristically
|
| 181 |
+
# (last resort)
|
| 182 |
+
maybe = json.dumps(result)
|
| 183 |
+
text_response = maybe
|
| 184 |
+
except Exception as e:
|
| 185 |
+
return {"error": f"Failed to parse API response: {str(e)}", "raw_response": str(result)}
|
| 186 |
|
| 187 |
+
if not text_response:
|
| 188 |
+
return {"error": "No textual response returned from Gemini", "raw_response": result}
|
| 189 |
|
| 190 |
+
# Clean markdown fences if present
|
| 191 |
+
text_response = text_response.strip()
|
| 192 |
+
if text_response.startswith("```json"):
|
| 193 |
+
text_response = text_response.split("```json", 1)[1].rsplit("```", 1)[0].strip()
|
| 194 |
+
elif text_response.startswith("```"):
|
| 195 |
+
text_response = text_response.strip("` \n")
|
| 196 |
+
|
| 197 |
+
# Try to parse as JSON
|
| 198 |
+
try:
|
| 199 |
+
analysis_data = json.loads(text_response)
|
| 200 |
+
if not isinstance(analysis_data, dict):
|
| 201 |
+
return {"error": "Parsed response is not JSON object", "raw_response": text_response}
|
| 202 |
+
analysis_data['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 203 |
+
return analysis_data
|
| 204 |
+
except json.JSONDecodeError:
|
| 205 |
+
# Return useful debug info so UI shows the raw response for troubleshooting
|
| 206 |
+
return {"error": "Failed to parse response as JSON", "raw_response": text_response}
|
| 207 |
+
|
| 208 |
+
except requests.Timeout:
|
| 209 |
+
return {"error": "Timeout while contacting Gemini API"}
|
| 210 |
except Exception as e:
|
| 211 |
return {"error": f"Analysis failed: {str(e)}"}
|
| 212 |
|
|
|
|
| 218 |
return [
|
| 219 |
"What type of plants can you analyze?",
|
| 220 |
"How accurate is the AI diagnosis?",
|
| 221 |
+
"Can you explain disease prevention methods?",
|
| 222 |
+
"What information do you need for better results?"
|
| 223 |
]
|
| 224 |
|
| 225 |
health = analysis.get('health_assessment', {})
|
| 226 |
disease_name = health.get('disease_name')
|
| 227 |
+
severity = (health.get('severity', '') or '').lower()
|
| 228 |
treatment = analysis.get('treatment', {})
|
| 229 |
|
| 230 |
# Disease-specific questions
|
| 231 |
+
if disease_name and disease_name not in ("None", "None detected", "", None):
|
| 232 |
questions.extend([
|
| 233 |
f"What causes {disease_name}?",
|
| 234 |
f"How do I treat {disease_name} effectively?",
|
|
|
|
| 258 |
"What preventive measures should I take?"
|
| 259 |
])
|
| 260 |
|
| 261 |
+
# Deduplicate and limit to 6
|
| 262 |
+
seen = set()
|
| 263 |
+
out = []
|
| 264 |
+
for q in questions:
|
| 265 |
+
if q not in seen:
|
| 266 |
+
out.append(q)
|
| 267 |
+
seen.add(q)
|
| 268 |
+
if len(out) >= 6:
|
| 269 |
+
break
|
| 270 |
+
return out
|
| 271 |
|
| 272 |
+
def chat_with_llama(self, user_message: str, language_label: str = "🇬🇧 English", history: List = None) -> str:
|
| 273 |
+
"""
|
| 274 |
+
Chat via Groq (Llama). Translates incoming user message to English if necessary,
|
| 275 |
+
includes current image analysis context, calls the Groq chat endpoint, then translates
|
| 276 |
+
assistant response back to selected language (if not English).
|
| 277 |
+
"""
|
| 278 |
if not self.groq_api_key:
|
| 279 |
return "⚠️ Groq API key not configured. Chat functionality unavailable."
|
| 280 |
|
| 281 |
+
# Map language label to code; default to 'en'
|
| 282 |
+
lang_code = self.supported_languages.get(language_label, "en")
|
| 283 |
+
|
| 284 |
+
# Build context from latest image analysis (if present)
|
| 285 |
context = ""
|
| 286 |
if self.image_context:
|
| 287 |
plant = self.image_context.get('plant_identification', {})
|
| 288 |
health = self.image_context.get('health_assessment', {})
|
|
|
|
| 289 |
context = f"""
|
| 290 |
Current Plant Analysis:
|
| 291 |
- Plant: {plant.get('common_name', 'Unknown')} ({plant.get('scientific_name', 'N/A')})
|
|
|
|
| 293 |
- Health: {health.get('status', 'Unknown')}
|
| 294 |
- Disease: {health.get('disease_name', 'None')}
|
| 295 |
- Severity: {health.get('severity', 'Unknown')}
|
| 296 |
+
- Confidence: {plant.get('confidence', 0)}%
|
| 297 |
"""
|
| 298 |
+
# If user selected a non-English language, translate user_message -> English for the model
|
| 299 |
+
user_message_en = user_message
|
| 300 |
+
if lang_code != "en":
|
| 301 |
+
user_message_en = self.translate_text(user_message, target_lang="en", source_lang=lang_code)
|
| 302 |
|
| 303 |
+
# Build messages
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
messages = [{
|
| 305 |
"role": "system",
|
| 306 |
"content": f"""You are an expert agricultural advisor specializing in plant diseases and crop management.
|
|
|
|
| 317 |
Keep responses clear, actionable, and farmer-friendly. Use bullet points for lists."""
|
| 318 |
}]
|
| 319 |
|
| 320 |
+
# Append last few history items (if present)
|
| 321 |
if history:
|
| 322 |
+
for msg in history[-8:]:
|
| 323 |
if isinstance(msg, dict):
|
| 324 |
+
messages.append(msg)
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
+
# Append user message (English version)
|
| 327 |
messages.append({"role": "user", "content": user_message_en})
|
| 328 |
|
| 329 |
try:
|
|
|
|
| 335 |
payload = {
|
| 336 |
"model": "llama-3.3-70b-versatile",
|
| 337 |
"messages": messages,
|
| 338 |
+
"temperature": 0.6,
|
| 339 |
+
"max_tokens": 1000,
|
| 340 |
+
"top_p": 0.95
|
| 341 |
}
|
| 342 |
|
| 343 |
response = requests.post(self.groq_api_url, headers=headers, json=payload, timeout=30)
|
| 344 |
+
if response.status_code != 200:
|
| 345 |
+
# include body for debugging
|
| 346 |
+
try:
|
| 347 |
+
return f"⚠️ API Error ({response.status_code}): {response.text}"
|
| 348 |
+
except:
|
| 349 |
+
return f"⚠️ API Error ({response.status_code})"
|
| 350 |
|
| 351 |
+
result = response.json()
|
| 352 |
+
# Expecting OpenAI-like chat completion shape
|
| 353 |
+
bot_response = ""
|
| 354 |
+
try:
|
| 355 |
bot_response = result['choices'][0]['message']['content']
|
| 356 |
+
except Exception:
|
| 357 |
+
# Fallback for different shapes
|
| 358 |
+
bot_response = result.get('choices', [{}])[0].get('text') or json.dumps(result)
|
| 359 |
+
|
| 360 |
+
# If user language isn't English, translate bot response -> user language
|
| 361 |
+
if lang_code != "en" and bot_response:
|
| 362 |
+
bot_response_translated = self.translate_text(bot_response, target_lang=lang_code, source_lang="en")
|
| 363 |
+
return bot_response_translated
|
| 364 |
+
return bot_response
|
| 365 |
+
|
| 366 |
+
except requests.Timeout:
|
| 367 |
+
return "⚠️ Groq API request timed out."
|
| 368 |
except Exception as e:
|
| 369 |
return f"⚠️ Error: {str(e)}"
|
| 370 |
|
| 371 |
def translate_text(self, text: str, target_lang: str, source_lang: str = "en") -> str:
|
| 372 |
+
"""
|
| 373 |
+
Translate `text` from source_lang -> target_lang.
|
| 374 |
+
For simplicity, we use the Groq chat endpoint and ask it to translate. This is a best-effort
|
| 375 |
+
approach; consider using a dedicated translation API (Google Translate) for production.
|
| 376 |
+
"""
|
| 377 |
+
if not text or target_lang == source_lang:
|
| 378 |
return text
|
| 379 |
|
| 380 |
+
# If Groq API is not available, return original text
|
| 381 |
+
if not self.groq_api_key:
|
| 382 |
+
return text
|
| 383 |
+
|
| 384 |
+
# Create a short system instruction to force only the translation
|
| 385 |
+
target_name = target_lang
|
| 386 |
+
source_name = source_lang
|
| 387 |
+
# Map codes to readable names for the model if possible
|
| 388 |
+
code_to_name = {v: k for k, v in self.supported_languages.items()}
|
| 389 |
+
target_name = code_to_name.get(target_lang, target_lang)
|
| 390 |
+
source_name = code_to_name.get(source_lang, source_lang)
|
| 391 |
+
|
| 392 |
+
headers = {
|
| 393 |
+
"Authorization": f"Bearer {self.groq_api_key}",
|
| 394 |
+
"Content-Type": "application/json"
|
| 395 |
+
}
|
| 396 |
+
payload = {
|
| 397 |
+
"model": "llama-3.3-70b-versatile",
|
| 398 |
+
"messages": [
|
| 399 |
+
{
|
| 400 |
+
"role": "system",
|
| 401 |
+
"content": f"Translate the following text from {source_name} to {target_name}. Only provide the translation, no extra commentary."
|
| 402 |
+
},
|
| 403 |
+
{"role": "user", "content": text}
|
| 404 |
+
],
|
| 405 |
+
"temperature": 0.0,
|
| 406 |
+
"max_tokens": 1500
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
try:
|
| 410 |
+
response = requests.post(self.groq_api_url, headers=headers, json=payload, timeout=20)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
if response.status_code == 200:
|
| 412 |
+
result = response.json()
|
| 413 |
+
try:
|
| 414 |
+
return result['choices'][0]['message']['content'].strip()
|
| 415 |
+
except Exception:
|
| 416 |
+
return text
|
| 417 |
+
else:
|
| 418 |
+
return text
|
| 419 |
except:
|
| 420 |
return text
|
| 421 |
|
|
|
|
| 427 |
def create_advanced_interface():
|
| 428 |
system = AgricultureIntelligenceSystem()
|
| 429 |
|
| 430 |
+
# Custom CSS for professional look (kept same)
|
| 431 |
custom_css = """
|
| 432 |
.container {max-width: 1400px; margin: auto;}
|
| 433 |
.header {background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 2rem; border-radius: 10px; margin-bottom: 2rem;}
|
|
|
|
| 442 |
|
| 443 |
def process_image(image):
|
| 444 |
if image is None:
|
| 445 |
+
return None, "<div style='text-align:center;color:#666;padding:1rem;'>⚠️ Please upload an image</div>", [], gr.update(visible=False)
|
| 446 |
|
| 447 |
try:
|
| 448 |
img = Image.fromarray(image).convert('RGB') if isinstance(image, np.ndarray) else image.convert('RGB')
|
| 449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
analysis = system.analyze_with_gemini(img)
|
| 451 |
|
| 452 |
if "error" in analysis:
|
| 453 |
error_html = f"""
|
| 454 |
<div style='background: #fee; padding: 2rem; border-radius: 8px; border-left: 4px solid #f44;'>
|
| 455 |
<h3 style='color: #c33; margin: 0;'>❌ Analysis Error</h3>
|
| 456 |
+
<p style='margin: 0.5rem 0 0 0;'>{analysis.get('error')}</p>
|
|
|
|
| 457 |
"""
|
| 458 |
+
raw = analysis.get('raw') or analysis.get('raw_response') or analysis.get('raw_response')
|
| 459 |
+
if raw:
|
| 460 |
+
error_html += f"<pre style='white-space:pre-wrap;margin-top:0.5rem;background:#fff;padding:0.5rem;border-radius:4px;color:#333;'>Debug: {str(raw)[:1000]}</pre>"
|
| 461 |
+
error_html += "</div>"
|
| 462 |
+
# Provide nothing to suggestions
|
| 463 |
+
return analysis, error_html, [], gr.update(visible=False)
|
| 464 |
|
| 465 |
system.image_context = analysis
|
| 466 |
system.current_analysis = analysis
|
| 467 |
|
|
|
|
| 468 |
analysis_html = format_analysis_card(analysis)
|
|
|
|
|
|
|
| 469 |
suggestions = system.generate_suggested_questions(analysis)
|
| 470 |
|
| 471 |
return analysis, analysis_html, suggestions, gr.update(visible=True)
|
| 472 |
|
| 473 |
except Exception as e:
|
| 474 |
+
return {"error": str(e)}, f"<div style='color:#c33;'>⚠️ Error: {str(e)}</div>", [], gr.update(visible=False)
|
| 475 |
|
| 476 |
def format_analysis_card(analysis: Dict) -> str:
|
| 477 |
+
plant = analysis.get('plant_identification', {}) if isinstance(analysis, dict) else {}
|
| 478 |
+
health = analysis.get('health_assessment', {}) if isinstance(analysis, dict) else {}
|
| 479 |
+
symptoms = analysis.get('visual_symptoms', []) if isinstance(analysis, dict) else []
|
| 480 |
+
treatment = analysis.get('treatment', {}) if isinstance(analysis, dict) else {}
|
| 481 |
+
urgency = analysis.get('urgency', {}) if isinstance(analysis, dict) else {}
|
| 482 |
|
| 483 |
+
status = (health.get('status') or '').lower()
|
| 484 |
disease_name = health.get('disease_name', 'None')
|
| 485 |
|
| 486 |
# Determine card style
|
|
|
|
| 488 |
card_class = 'healthy-card'
|
| 489 |
icon = '✅'
|
| 490 |
status_text = 'Plant is Healthy'
|
| 491 |
+
elif disease_name and disease_name not in ('None', 'None detected', None, ''):
|
| 492 |
card_class = 'disease-card'
|
| 493 |
icon = '⚠️'
|
| 494 |
status_text = 'Disease Detected'
|
|
|
|
| 531 |
html += "</ul></div>"
|
| 532 |
|
| 533 |
# Treatment
|
| 534 |
+
if treatment and (treatment.get('immediate_actions') or treatment.get('chemical_treatments') or treatment.get('organic_alternatives')):
|
| 535 |
html += """
|
| 536 |
<div style='background: #fff; padding: 1.5rem; border-radius: 8px; margin: 1rem 0; border: 1px solid #e0e0e0;'>
|
| 537 |
<h3 style='margin: 0 0 1rem 0; color: #333;'>💊 Treatment Recommendations</h3>
|
|
|
|
| 577 |
|
| 578 |
return html
|
| 579 |
|
| 580 |
+
def handle_suggestion_click(question, history, language_label, analysis):
|
| 581 |
if not question:
|
| 582 |
+
return history or [], ""
|
| 583 |
|
| 584 |
# Get bot response
|
| 585 |
+
bot_response = system.chat_with_llama(question, language_label, history or [])
|
| 586 |
|
| 587 |
# Update history
|
| 588 |
if history is None:
|
|
|
|
| 593 |
|
| 594 |
return history, ""
|
| 595 |
|
| 596 |
+
def send_message(message, history, language_label, analysis):
|
| 597 |
+
if not message or not message.strip():
|
| 598 |
+
return history or [], ""
|
| 599 |
+
return handle_suggestion_click(message.strip(), history or [], language_label, analysis)
|
|
|
|
| 600 |
|
| 601 |
def clear_all():
|
| 602 |
system.conversation_history = []
|
| 603 |
system.image_context = None
|
| 604 |
system.current_analysis = None
|
| 605 |
+
return None, None, "<div style='text-align:center;padding:1rem;color:#666;'>👋 Upload a plant image to begin AI-powered diagnosis</div>", [], None, gr.update(visible=False)
|
| 606 |
|
| 607 |
# Create interface
|
| 608 |
with gr.Blocks(title="AgriBot Pro - AI Plant Doctor") as demo:
|
|
|
|
| 614 |
gr.HTML("""
|
| 615 |
<div class='header'>
|
| 616 |
<h1>🌾 AgriBot Pro - AI Plant Doctor</h1>
|
|
|
|
| 617 |
</div>
|
| 618 |
""")
|
| 619 |
|
|
|
|
| 657 |
)
|
| 658 |
|
| 659 |
# Suggested questions row
|
| 660 |
+
suggestions_row = gr.Row(visible=False)
|
| 661 |
+
with suggestions_row:
|
| 662 |
gr.Markdown("### 💡 Suggested Questions:")
|
| 663 |
|
| 664 |
suggestions_display = gr.HTML(visible=False)
|
| 665 |
|
| 666 |
# Create suggestion buttons dynamically
|
| 667 |
suggestion_buttons = []
|
| 668 |
+
suggestion_btns_row = gr.Row(visible=False)
|
| 669 |
+
with suggestion_btns_row:
|
| 670 |
for i in range(6):
|
| 671 |
btn = gr.Button("", visible=False, elem_classes="suggestion-btn")
|
| 672 |
suggestion_buttons.append(btn)
|
|
|
|
| 681 |
send_btn = gr.Button("📤 Send", variant="primary")
|
| 682 |
clear_btn = gr.Button("🗑️ Clear All")
|
| 683 |
|
| 684 |
+
# Helper to update suggestion buttons (returns list of updates for each button + the container)
|
| 685 |
def update_suggestions(suggestions):
|
| 686 |
if not suggestions:
|
| 687 |
+
updates = [gr.update(visible=False, value="")] * len(suggestion_buttons)
|
| 688 |
+
updates.append(gr.update(visible=False)) # hide container
|
| 689 |
+
return updates
|
| 690 |
updates = []
|
| 691 |
for i, btn in enumerate(suggestion_buttons):
|
| 692 |
if i < len(suggestions):
|
| 693 |
updates.append(gr.update(visible=True, value=suggestions[i]))
|
| 694 |
else:
|
| 695 |
+
updates.append(gr.update(visible=False, value=""))
|
| 696 |
+
updates.append(gr.update(visible=True)) # show container
|
| 697 |
return updates
|
| 698 |
|
| 699 |
+
# Wire analyze button
|
| 700 |
analyze_btn.click(
|
| 701 |
fn=process_image,
|
| 702 |
inputs=[image_input],
|
| 703 |
outputs=[analysis_state, analysis_display, gr.State(), suggestion_btns_row]
|
| 704 |
).then(
|
| 705 |
+
# generate suggestions based on analysis_state
|
| 706 |
+
fn=lambda analysis: system.generate_suggested_questions(analysis) if analysis and isinstance(analysis, dict) and 'error' not in analysis else [],
|
| 707 |
inputs=[analysis_state],
|
| 708 |
outputs=[gr.State()]
|
| 709 |
).then(
|
|
|
|
| 712 |
outputs=suggestion_buttons + [suggestion_btns_row]
|
| 713 |
)
|
| 714 |
|
| 715 |
+
# Connect suggestion buttons (each click calls handle_suggestion_click)
|
| 716 |
for btn in suggestion_buttons:
|
| 717 |
btn.click(
|
| 718 |
+
fn=handle_suggestion_click,
|
| 719 |
inputs=[btn, chatbot, language_selector, analysis_state],
|
| 720 |
outputs=[chatbot, msg_input]
|
| 721 |
)
|
| 722 |
|
| 723 |
+
# Sending messages
|
| 724 |
msg_input.submit(
|
| 725 |
fn=send_message,
|
| 726 |
inputs=[msg_input, chatbot, language_selector, analysis_state],
|
|
|
|
| 760 |
print("⚠️ GOOGLE_API_KEY not found - Image analysis disabled")
|
| 761 |
print("Get free key: https://aistudio.google.com/app/apikey\n")
|
| 762 |
else:
|
| 763 |
+
print("✅ Gemini enabled\n")
|
| 764 |
|
| 765 |
if not groq_key:
|
| 766 |
print("⚠️ GROQ_API_KEY not found - Chat disabled")
|
|
|
|
| 773 |
print("🚀 Launching AgriBot Pro...\n")
|
| 774 |
print("="*70 + "\n")
|
| 775 |
|
|
|
|
| 776 |
demo.launch(
|
| 777 |
server_name="0.0.0.0",
|
| 778 |
server_port=7860,
|
| 779 |
share=False,
|
| 780 |
show_error=True,
|
| 781 |
+
ssr_mode=False
|
| 782 |
+
)
|