diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..1c79e556ac4cc2ac6c4a036cb5627974d16928ca
Binary files /dev/null and b/.DS_Store differ
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..e92cf9b594e8e4608b60733d93926bdec544b5c1
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.pt filter=lfs diff=lfs merge=lfs -text
diff --git a/.ipynb_checkpoints/config-checkpoint.py b/.ipynb_checkpoints/config-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0ac883623c95f479b8e3f72798a737448912ca5
--- /dev/null
+++ b/.ipynb_checkpoints/config-checkpoint.py
@@ -0,0 +1,452 @@
+"""
+FarmEyes Configuration File
+===========================
+Central configuration for the FarmEyes crop disease detection application.
+Contains model paths, class mappings, device settings, and app configurations.
+
+Device: Apple Silicon M1 Pro with MPS (Metal Performance Shaders) acceleration
+"""
+
+import os
+from pathlib import Path
+from typing import Dict, List, Optional
+from dataclasses import dataclass, field
+
+
+# =============================================================================
+# PATH CONFIGURATIONS
+# =============================================================================
+
+# Base project directory - update this to your local path
+BASE_DIR = Path(__file__).parent.resolve()
+
+# Data directories
+DATA_DIR = BASE_DIR / "data"
+STATIC_DIR = BASE_DIR / "static"
+MODELS_DIR = BASE_DIR / "models"
+OUTPUTS_DIR = BASE_DIR / "outputs"
+
+# Create directories if they don't exist
+for directory in [DATA_DIR, STATIC_DIR, MODELS_DIR, OUTPUTS_DIR]:
+ directory.mkdir(parents=True, exist_ok=True)
+
+# Knowledge base and UI translations paths
+KNOWLEDGE_BASE_PATH = DATA_DIR / "knowledge_base.json"
+UI_TRANSLATIONS_PATH = STATIC_DIR / "ui_translations.json"
+
+
+# =============================================================================
+# MODEL CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class YOLOConfig:
+ """Configuration for YOLOv11 disease detection model"""
+
+ # Path to trained YOLOv11 model weights (.pt file)
+ # Update this path once your model training is complete
+ model_path: Path = MODELS_DIR / "farmeyes_yolov11.pt"
+
+ # Confidence threshold for detections (0.0 - 1.0)
+ confidence_threshold: float = 0.5
+
+ # IoU threshold for non-maximum suppression
+ iou_threshold: float = 0.45
+
+ # Input image size (YOLOv11 default)
+ input_size: int = 640
+
+ # Maximum number of detections per image
+ max_detections: int = 10
+
+ # Device for inference ('mps' for Apple Silicon, 'cuda' for NVIDIA, 'cpu' for CPU)
+ device: str = "mps"
+
+
+@dataclass
+class NATLaSConfig:
+ """Configuration for N-ATLaS language model (GGUF format)"""
+
+ # Hugging Face model repository
+ hf_repo: str = "tosinamuda/N-ATLaS-GGUF"
+
+ # GGUF model filename (16-bit quantized version)
+ model_filename: str = "N-ATLaS-8B-Instruct-v2.2-F16.gguf"
+
+ # Local path where model will be downloaded/cached
+ model_path: Path = MODELS_DIR / "natlas"
+
+ # Full path to the GGUF file
+ @property
+ def gguf_path(self) -> Path:
+ return self.model_path / self.model_filename
+
+ # Context window size (tokens)
+ context_length: int = 4096
+
+ # Maximum tokens to generate in response
+ max_tokens: int = 1024
+
+ # Temperature for text generation (0.0 = deterministic, 1.0 = creative)
+ temperature: float = 0.7
+
+ # Top-p (nucleus) sampling
+ top_p: float = 0.9
+
+ # Number of GPU layers to offload (for MPS acceleration)
+ # Set to -1 to offload all layers, 0 for CPU only
+ n_gpu_layers: int = -1
+
+ # Number of threads for CPU computation
+ n_threads: int = 8
+
+ # Batch size for prompt processing
+ n_batch: int = 512
+
+ # Device for inference
+ device: str = "mps"
+
+
+# =============================================================================
+# DISEASE CLASS MAPPINGS
+# =============================================================================
+
+# YOLOv11 class index to disease key mapping
+# These matches the class indices from our trained model
+CLASS_INDEX_TO_KEY: Dict[int, str] = {
+ 0: "cassava_bacterial_blight",
+ 1: "cassava_healthy",
+ 2: "cassava_mosaic_disease",
+ 3: "cocoa_healthy",
+ 4: "cocoa_monilia_disease",
+ 5: "cocoa_phytophthora_disease",
+ 6: "tomato_gray_mold",
+ 7: "tomato_healthy",
+ 8: "tomato_viral_disease",
+ 9: "tomato_wilt_disease"
+}
+
+# Reverse mapping: disease key to class index
+KEY_TO_CLASS_INDEX: Dict[str, int] = {v: k for k, v in CLASS_INDEX_TO_KEY.items()}
+
+# Class names as they appear in YOLO training (macthes our data.yaml file)
+CLASS_NAMES: List[str] = [
+ "Cassava Bacteria Blight",
+ "Cassava Healthy Leaf",
+ "Cassava Mosaic Disease",
+ "Cocoa Healthy Leaf",
+ "Cocoa Monilia Disease",
+ "Cocoa Phytophthora Disease",
+ "Tomato Gray Mold Disease",
+ "Tomato Healthy Leaf",
+ "Tomato Viral Disease",
+ "Tomato Wilt Disease"
+]
+
+# Healthy class indices (for quick identification)
+HEALTHY_CLASS_INDICES: List[int] = [1, 3, 7] # cassava_healthy, cocoa_healthy, tomato_healthy
+
+# Disease class indices (excluding healthy)
+DISEASE_CLASS_INDICES: List[int] = [0, 2, 4, 5, 6, 8, 9]
+
+# Crop type mapping
+CROP_TYPES: Dict[str, List[int]] = {
+ "cassava": [0, 1, 2],
+ "cocoa": [3, 4, 5],
+ "tomato": [6, 7, 8, 9]
+}
+
+# Reverse mapping: class index to crop type
+CLASS_TO_CROP: Dict[int, str] = {}
+for crop, indices in CROP_TYPES.items():
+ for idx in indices:
+ CLASS_TO_CROP[idx] = crop
+
+
+# =============================================================================
+# LANGUAGE CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class LanguageConfig:
+ """Configuration for supported languages"""
+
+ # Supported language codes
+ supported_languages: List[str] = field(default_factory=lambda: ["en", "ha", "yo", "ig"])
+
+ # Default language
+ default_language: str = "en"
+
+ # Language display names
+ language_names: Dict[str, str] = field(default_factory=lambda: {
+ "en": "English",
+ "ha": "Hausa",
+ "yo": "Yorùbá",
+ "ig": "Igbo"
+ })
+
+ # Language codes for N-ATLaS prompts
+ language_full_names: Dict[str, str] = field(default_factory=lambda: {
+ "en": "English",
+ "ha": "Hausa",
+ "yo": "Yoruba",
+ "ig": "Igbo"
+ })
+
+
+# =============================================================================
+# APPLICATION CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class AppConfig:
+ """General application configuration"""
+
+ # App information
+ app_name: str = "FarmEyes"
+ app_version: str = "1.0.0"
+ app_tagline: str = "AI-Powered Crop Health Assistant"
+
+ # Gradio server settings
+ server_host: str = "0.0.0.0"
+ server_port: int = 7860
+ share: bool = False # Set to True for public Gradio link
+
+ # Debug mode
+ debug: bool = True
+
+ # Maximum image file size (in bytes) - 10MB
+ max_image_size: int = 10 * 1024 * 1024
+
+ # Supported image formats
+ supported_image_formats: List[str] = field(default_factory=lambda: [
+ ".jpg", ".jpeg", ".png", ".webp", ".bmp"
+ ])
+
+ # Confidence thresholds for user feedback
+ high_confidence_threshold: float = 0.85
+ medium_confidence_threshold: float = 0.60
+ low_confidence_threshold: float = 0.40
+
+ # Enable/disable features
+ enable_voice_input: bool = False # Future feature
+ enable_offline_mode: bool = False # Future feature
+ enable_history: bool = True
+
+
+# =============================================================================
+# DEVICE CONFIGURATION (Apple Silicon Specific)
+# =============================================================================
+
+@dataclass
+class DeviceConfig:
+ """Device and hardware configuration for Apple Silicon M1 Pro"""
+
+ # Primary compute device
+ # Options: 'mps' (Apple Silicon GPU), 'cuda' (NVIDIA GPU), 'cpu'
+ compute_device: str = "mps"
+
+ # Fallback device if primary is unavailable
+ fallback_device: str = "cpu"
+
+ # Enable MPS (Metal Performance Shaders) for PyTorch
+ use_mps: bool = True
+
+ # Memory management
+ # Set to True to clear GPU cache after each inference
+ clear_cache_after_inference: bool = True
+
+ @staticmethod
+ def get_device() -> str:
+ """
+ Determine the best available device for computation.
+ Returns 'mps' for Apple Silicon, 'cuda' for NVIDIA, or 'cpu'.
+ """
+ import torch
+
+ # Check for Apple Silicon MPS
+ if torch.backends.mps.is_available():
+ return "mps"
+ # Check for NVIDIA CUDA
+ elif torch.cuda.is_available():
+ return "cuda"
+ # Fallback to CPU
+ else:
+ return "cpu"
+
+ @staticmethod
+ def get_device_info() -> Dict[str, str]:
+ """Get information about the current compute device."""
+ import torch
+ import platform
+
+ info = {
+ "platform": platform.system(),
+ "processor": platform.processor(),
+ "python_version": platform.python_version(),
+ "pytorch_version": torch.__version__,
+ "device": DeviceConfig.get_device()
+ }
+
+ if torch.backends.mps.is_available():
+ info["mps_available"] = "Yes"
+ info["mps_built"] = str(torch.backends.mps.is_built())
+
+ return info
+
+
+# =============================================================================
+# PROMPT TEMPLATES CONFIGURATION
+# =============================================================================
+
+@dataclass
+class PromptConfig:
+ """Configuration for N-ATLaS prompt templates"""
+
+ # System prompt for the N-ATLaS model
+ system_prompt: str = """You are FarmEyes, an AI agricultural assistant helping Nigerian farmers.
+You provide advice about crop diseases and treatments in a clear, simple, and helpful manner.
+Always be respectful and use language that farmers can easily understand.
+When providing treatment costs, use Nigerian Naira (₦).
+Focus on practical advice that farmers can implement."""
+
+ # Maximum length for translated text
+ max_translation_length: int = 500
+
+ # Temperature for different tasks
+ translation_temperature: float = 0.3 # Lower for more accurate translations
+ diagnosis_temperature: float = 0.7 # Higher for more natural explanations
+
+
+# =============================================================================
+# LOGGING CONFIGURATION
+# =============================================================================
+
+@dataclass
+class LogConfig:
+ """Logging configuration"""
+
+ # Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL
+ log_level: str = "INFO"
+
+ # Log file path
+ log_file: Path = BASE_DIR / "logs" / "farmeyes.log"
+
+ # Log format
+ log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+
+ # Enable console logging
+ console_logging: bool = True
+
+ # Enable file logging
+ file_logging: bool = True
+
+
+# =============================================================================
+# INSTANTIATE DEFAULT CONFIGURATIONS
+# =============================================================================
+
+# Create default configuration instances
+yolo_config = YOLOConfig()
+natlas_config = NATLaSConfig()
+language_config = LanguageConfig()
+app_config = AppConfig()
+device_config = DeviceConfig()
+prompt_config = PromptConfig()
+log_config = LogConfig()
+
+
+# =============================================================================
+# UTILITY FUNCTIONS
+# =============================================================================
+
+def get_disease_key(class_index: int) -> Optional[str]:
+ """Get disease key from class index."""
+ return CLASS_INDEX_TO_KEY.get(class_index)
+
+
+def get_class_index(disease_key: str) -> Optional[int]:
+ """Get class index from disease key."""
+ return KEY_TO_CLASS_INDEX.get(disease_key)
+
+
+def get_crop_type(class_index: int) -> Optional[str]:
+ """Get crop type from class index."""
+ return CLASS_TO_CROP.get(class_index)
+
+
+def is_healthy(class_index: int) -> bool:
+ """Check if class index represents a healthy plant."""
+ return class_index in HEALTHY_CLASS_INDICES
+
+
+def validate_config() -> Dict[str, bool]:
+ """
+ Validate that all required configuration files and paths exist.
+ Returns a dictionary with validation results.
+ """
+ validations = {
+ "knowledge_base_exists": KNOWLEDGE_BASE_PATH.exists(),
+ "ui_translations_exists": UI_TRANSLATIONS_PATH.exists(),
+ "models_dir_exists": MODELS_DIR.exists(),
+ "yolo_model_exists": yolo_config.model_path.exists(),
+ "natlas_model_exists": natlas_config.gguf_path.exists(),
+ }
+ return validations
+
+
+def print_config_summary():
+ """Print a summary of the current configuration."""
+ print("=" * 60)
+ print("FarmEyes Configuration Summary")
+ print("=" * 60)
+ print(f"\n📁 Paths:")
+ print(f" Base Directory: {BASE_DIR}")
+ print(f" Knowledge Base: {KNOWLEDGE_BASE_PATH}")
+ print(f" UI Translations: {UI_TRANSLATIONS_PATH}")
+ print(f" Models Directory: {MODELS_DIR}")
+
+ print(f"\n🤖 YOLOv11 Model:")
+ print(f" Model Path: {yolo_config.model_path}")
+ print(f" Confidence Threshold: {yolo_config.confidence_threshold}")
+ print(f" Device: {yolo_config.device}")
+
+ print(f"\n🗣️ N-ATLaS Model:")
+ print(f" HuggingFace Repo: {natlas_config.hf_repo}")
+ print(f" Model File: {natlas_config.model_filename}")
+ print(f" Context Length: {natlas_config.context_length}")
+ print(f" GPU Layers: {natlas_config.n_gpu_layers}")
+
+ print(f"\n🌍 Languages:")
+ print(f" Supported: {', '.join(language_config.supported_languages)}")
+ print(f" Default: {language_config.default_language}")
+
+ print(f"\n📱 Application:")
+ print(f" Name: {app_config.app_name} v{app_config.app_version}")
+ print(f" Server: {app_config.server_host}:{app_config.server_port}")
+ print(f" Debug Mode: {app_config.debug}")
+
+ print(f"\n💻 Device:")
+ device_info = device_config.get_device_info()
+ print(f" Platform: {device_info.get('platform', 'Unknown')}")
+ print(f" Compute Device: {device_info.get('device', 'Unknown')}")
+ print(f" PyTorch Version: {device_info.get('pytorch_version', 'Unknown')}")
+
+ print("\n" + "=" * 60)
+
+ # Validation
+ print("\n🔍 Configuration Validation:")
+ validations = validate_config()
+ for key, value in validations.items():
+ status = "✅" if value else "❌"
+ print(f" {status} {key.replace('_', ' ').title()}")
+
+ print("\n" + "=" * 60)
+
+
+# =============================================================================
+# MAIN - Run configuration check
+# =============================================================================
+
+if __name__ == "__main__":
+ print_config_summary()
diff --git a/.ipynb_checkpoints/test-checkpoint.ipynb b/.ipynb_checkpoints/test-checkpoint.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245
--- /dev/null
+++ b/.ipynb_checkpoints/test-checkpoint.ipynb
@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..08bed49d87e8deb249b8e086af50fc403c7b68e1
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,70 @@
+# =============================================================================
+# FarmEyes - HuggingFace Spaces Dockerfile
+# =============================================================================
+# AI-Powered Crop Disease Detection for African Farmers
+#
+# This Dockerfile is optimized for HuggingFace Spaces free tier:
+# - Uses Python 3.10 slim image
+# - Installs llama-cpp-python for CPU inference
+# - Downloads N-ATLaS GGUF model at runtime (~4.92GB)
+# - Runs on port 7860 (HF Spaces default)
+# =============================================================================
+
+FROM python:3.10-slim
+
+# Set working directory
+WORKDIR /app
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1
+ENV PYTHONDONTWRITEBYTECODE=1
+ENV HOST=0.0.0.0
+ENV PORT=7860
+
+# Install system dependencies
+# - ffmpeg: for audio processing (Whisper)
+# - libsm6, libxext6, libgl1: for OpenCV (image processing)
+# - build-essential, cmake: for compiling llama-cpp-python
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ ffmpeg \
+ libsm6 \
+ libxext6 \
+ libgl1-mesa-glx \
+ build-essential \
+ cmake \
+ git \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy requirements first (for Docker cache optimization)
+COPY requirements.txt .
+
+# Upgrade pip
+RUN pip install --no-cache-dir --upgrade pip
+
+# Install Python dependencies
+# Note: llama-cpp-python is compiled for CPU (no CUDA on free tier)
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Install llama-cpp-python for CPU
+# This enables GGUF model inference
+RUN pip install --no-cache-dir llama-cpp-python
+
+# Copy all application code
+COPY . .
+
+# Create necessary directories
+RUN mkdir -p /app/uploads /app/temp
+
+# Expose port 7860 (HuggingFace Spaces default)
+EXPOSE 7860
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
+ CMD curl -f http://localhost:7860/api/health || exit 1
+
+# Run the application
+# The app will:
+# 1. Start FastAPI server
+# 2. Download N-ATLaS GGUF model on first request (~5-15 min)
+# 3. Serve the web interface
+CMD ["python", "main.py"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..763abc3592051a78e89081a9a34c1e1e7bf09f2d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,143 @@
+---
+title: FarmEyes
+emoji: 🌱
+colorFrom: green
+colorTo: yellow
+sdk: docker
+app_port: 7860
+pinned: false
+suggested_hardware: cpu-basic
+---
+
+# 🌱 FarmEyes
+
+**AI-Powered Crop Disease Detection for African Farmers**
+
+[](https://awarri.com)
+[](https://huggingface.co/NCAIR1/N-ATLaS)
+
+---
+
+## 🎯 What is FarmEyes?
+
+FarmEyes is an AI application that helps African farmers identify crop diseases and get treatment recommendations in their native languages. Simply upload a photo of your crop, and FarmEyes will:
+
+1. **Detect** the disease using computer vision (YOLOv11)
+2. **Diagnose** the condition with severity assessment
+3. **Translate** all information to your preferred language
+4. **Chat** with an AI assistant for follow-up questions
+
+---
+
+## 🌍 Supported Languages
+
+| Language | Native Name |
+|----------|-------------|
+| 🇬🇧 English | English |
+| 🇳🇬 Hausa | Yaren Hausa |
+| 🇳🇬 Yoruba | Èdè Yorùbá |
+| 🇳🇬 Igbo | Asụsụ Igbo |
+
+---
+
+## 🦠 Detectable Diseases
+
+| Crop | Diseases |
+|------|----------|
+| 🌿 **Cassava** | Bacterial Blight, Mosaic Virus |
+| 🍫 **Cocoa** | Monilia Disease, Phytophthora Disease |
+| 🍅 **Tomato** | Gray Mold Disease, Wilt Disease |
+
+---
+
+## 🚀 How to Use
+
+### Step 1: Select Language
+Choose your preferred language from the welcome screen.
+
+### Step 2: Upload Image
+Take a photo of the affected crop leaf and upload it.
+
+### Step 3: View Results
+- Disease name and confidence score
+- Severity level (Low/Moderate/High/Critical)
+- Treatment recommendations
+- Cost estimates in Nigerian Naira (₦)
+
+### Step 4: Ask Questions
+Use the chat feature to ask follow-up questions about the diagnosis.
+
+---
+
+## 🔧 Technology Stack
+
+| Component | Technology |
+|-----------|------------|
+| **Disease Detection** | YOLOv11 (trained on African crops) |
+| **Language Model** | N-ATLaS (Nigerian multilingual AI) |
+| **Speech-to-Text** | OpenAI Whisper |
+| **Backend** | FastAPI |
+| **Frontend** | Custom HTML/CSS/JS |
+
+---
+
+## 📱 Features
+
+- ✅ **Image Upload** - Drag & drop or click to upload
+- ✅ **Real-time Detection** - Results in seconds
+- ✅ **Multilingual Support** - 4 Nigerian languages
+- ✅ **Voice Input** - Speak your questions
+- ✅ **Text-to-Speech** - Listen to responses
+- ✅ **Treatment Advice** - Practical farming guidance
+- ✅ **Cost Estimates** - In Nigerian Naira
+
+---
+
+## ⚠️ First Startup Notice
+
+**Please be patient on first use!**
+
+The N-ATLaS language model (~4.92GB) is downloaded automatically on first startup. This may take **5-15 minutes** depending on connection speed. Subsequent uses will be much faster.
+
+---
+
+## 🏆 About
+
+FarmEyes was built for the **Awarri Developer Challenge 2025** to address the critical need for accessible agricultural AI in Africa.
+
+**The Problem:**
+- 20-80% crop losses annually due to diseases
+- Only 1 extension worker per 10,000 farmers (FAO recommends 1:1,000)
+- Agricultural knowledge locked in English
+
+**Our Solution:**
+- AI-powered disease detection accessible via smartphone
+- Native language support through N-ATLaS
+- Practical, localized treatment recommendations
+
+---
+
+## 👨💻 Developer
+
+**Fola-AI**
+
+- 🤗 HuggingFace: [@Fola-AI](https://huggingface.co/Fola-AI)
+
+---
+
+## 📄 License
+
+Apache 2.0
+
+---
+
+## 🙏 Acknowledgments
+
+- [NCAIR](https://ncair.nitda.gov.ng/) for N-ATLaS model
+- [Ultralytics](https://ultralytics.com/) for YOLOv11
+- [HuggingFace](https://huggingface.co/) for hosting
+- [Awarri](https://awarri.com/) for the challenge opportunity
+
+---
+
+*Built with ❤️ for African Farmers*
diff --git a/api/.DS_Store b/api/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..39cb7c1e572a6f82613a427726097001c1277245
Binary files /dev/null and b/api/.DS_Store differ
diff --git a/api/__init__.py b/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ef2772a36a8c374b5fb7f8935a40921fd67b997
--- /dev/null
+++ b/api/__init__.py
@@ -0,0 +1,22 @@
+"""
+FarmEyes API Routes Package
+===========================
+REST API endpoint modules for the FarmEyes application.
+
+Endpoints:
+- /api/detect - Disease detection from images
+- /api/chat - Contextual chat with N-ATLaS
+- /api/transcribe - Voice-to-text with Whisper
+- /api/session - Session management
+- /api/translate - Text translation
+"""
+
+from api.routes.detection import router as detection_router
+from api.routes.chat import router as chat_router
+from api.routes.transcribe import router as transcribe_router
+
+__all__ = [
+ "detection_router",
+ "chat_router",
+ "transcribe_router"
+]
diff --git a/api/__pycache__/__init__.cpython-310.pyc b/api/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47e1c065b66b729a9a9a9fd2a72421f88002a516
Binary files /dev/null and b/api/__pycache__/__init__.cpython-310.pyc differ
diff --git a/api/__pycache__/__init__.cpython-312.pyc b/api/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ef71b9f1864b2b905a3fdbd64fa6289ff92ff06
Binary files /dev/null and b/api/__pycache__/__init__.cpython-312.pyc differ
diff --git a/api/routes/.DS_Store b/api/routes/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..49e3ebb2ee1d4ac6a14707db7d0a30e5023bf6ab
Binary files /dev/null and b/api/routes/.DS_Store differ
diff --git a/api/routes/__init__.py b/api/routes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..db372824ac9c774a1ebc5f7fea6f681aa72bbfc8
--- /dev/null
+++ b/api/routes/__init__.py
@@ -0,0 +1,15 @@
+"""
+FarmEyes API Routes
+===================
+Individual route modules for REST API endpoints.
+"""
+
+from api.routes.detection import router as detection_router
+from api.routes.chat import router as chat_router
+from api.routes.transcribe import router as transcribe_router
+
+__all__ = [
+ "detection_router",
+ "chat_router",
+ "transcribe_router"
+]
diff --git a/api/routes/__pycache__/__init__.cpython-310.pyc b/api/routes/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66b9216567de8a9da69a5ba3f815ce8f572e2517
Binary files /dev/null and b/api/routes/__pycache__/__init__.cpython-310.pyc differ
diff --git a/api/routes/__pycache__/__init__.cpython-312.pyc b/api/routes/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c87ea874d3019e130039dbc9366aa37a33ee4938
Binary files /dev/null and b/api/routes/__pycache__/__init__.cpython-312.pyc differ
diff --git a/api/routes/__pycache__/chat.cpython-310.pyc b/api/routes/__pycache__/chat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ecf69f888c367ae9d064ef3975bee02bc4f21e0
Binary files /dev/null and b/api/routes/__pycache__/chat.cpython-310.pyc differ
diff --git a/api/routes/__pycache__/chat.cpython-312.pyc b/api/routes/__pycache__/chat.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8f36a93ee2b2f2270abaccd9f6aabf47ded3c38
Binary files /dev/null and b/api/routes/__pycache__/chat.cpython-312.pyc differ
diff --git a/api/routes/__pycache__/detection.cpython-310.pyc b/api/routes/__pycache__/detection.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af3320ab54701e2d6dd8025bcf250dfb826cb90a
Binary files /dev/null and b/api/routes/__pycache__/detection.cpython-310.pyc differ
diff --git a/api/routes/__pycache__/detection.cpython-312.pyc b/api/routes/__pycache__/detection.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38cae35931a41a7d5ea5f25d693019df861c674a
Binary files /dev/null and b/api/routes/__pycache__/detection.cpython-312.pyc differ
diff --git a/api/routes/__pycache__/transcribe.cpython-310.pyc b/api/routes/__pycache__/transcribe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06a1417ad1eebe98b1a2e4a66c7843327137f457
Binary files /dev/null and b/api/routes/__pycache__/transcribe.cpython-310.pyc differ
diff --git a/api/routes/__pycache__/transcribe.cpython-312.pyc b/api/routes/__pycache__/transcribe.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23436c47ae632dba8071f6ee149ee8434ca3c689
Binary files /dev/null and b/api/routes/__pycache__/transcribe.cpython-312.pyc differ
diff --git a/api/routes/__pycache__/tts.cpython-310.pyc b/api/routes/__pycache__/tts.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4e7820ddd1f4be457df3ae5b0b903f5baaf2c16
Binary files /dev/null and b/api/routes/__pycache__/tts.cpython-310.pyc differ
diff --git a/api/routes/chat.py b/api/routes/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c08e40a898f3b514ddf5f9a568d6d6cf37e19c4
--- /dev/null
+++ b/api/routes/chat.py
@@ -0,0 +1,340 @@
+"""
+FarmEyes Chat API Routes
+========================
+REST API endpoints for contextual agricultural chat.
+
+Endpoints:
+- POST /api/chat - Send message and get response
+- GET /api/chat/welcome - Get welcome message for chat page
+- GET /api/chat/history - Get chat history for session
+- DELETE /api/chat/history - Clear chat history
+"""
+
+import sys
+from pathlib import Path
+from typing import Optional, List
+from datetime import datetime
+import logging
+
+from fastapi import APIRouter, HTTPException, Query
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Create router
+router = APIRouter(prefix="/api/chat", tags=["Chat"])
+
+
+# =============================================================================
+# REQUEST/RESPONSE MODELS
+# =============================================================================
+
+class ChatRequest(BaseModel):
+ """Request model for chat message."""
+ session_id: str = Field(..., description="Session ID")
+ message: str = Field(..., min_length=1, max_length=2000, description="User message")
+ language: str = Field(default="en", description="Response language (en, ha, yo, ig)")
+
+
+class ChatResponse(BaseModel):
+ """Response model for chat message."""
+ success: bool
+ response: str
+ session_id: str
+ language: str
+ is_redirect: bool = False
+ context: Optional[dict] = None
+ timestamp: str
+
+
+class WelcomeResponse(BaseModel):
+ """Response model for welcome message."""
+ success: bool
+ response: str
+ session_id: str
+ language: str
+ context: Optional[dict] = None
+ is_welcome: bool = True
+
+
+class HistoryResponse(BaseModel):
+ """Response model for chat history."""
+ success: bool
+ session_id: str
+ messages: List[dict]
+ total_messages: int
+
+
+# =============================================================================
+# ENDPOINTS
+# =============================================================================
+
+@router.post("/", response_model=ChatResponse)
+async def send_chat_message(request: ChatRequest):
+ """
+ Send a chat message and get AI response.
+
+ The assistant will:
+ - Answer questions about the diagnosed disease
+ - Provide related agricultural advice
+ - Respond in the user's preferred language
+ - Redirect off-topic questions politely
+
+ Requires an active session with a diagnosis.
+ """
+ try:
+ # Validate language
+ valid_languages = ["en", "ha", "yo", "ig"]
+ language = request.language if request.language in valid_languages else "en"
+
+ # Validate message
+ message = request.message.strip()
+ if not message:
+ raise HTTPException(status_code=400, detail="Message cannot be empty")
+
+ if len(message) > 2000:
+ raise HTTPException(status_code=400, detail="Message too long (max 2000 characters)")
+
+ # Import chat service
+ from services.chat_service import get_chat_service
+
+ chat_service = get_chat_service()
+
+ # Get response
+ logger.info(f"Chat request from session {request.session_id[:8]}...")
+ result = chat_service.chat(
+ session_id=request.session_id,
+ message=message,
+ language=language
+ )
+
+ if not result.get("success", False):
+ # Handle specific error cases
+ error_type = result.get("error", "unknown")
+
+ if error_type == "no_diagnosis":
+ raise HTTPException(
+ status_code=400,
+ detail=result.get("response", "Please analyze an image first")
+ )
+ else:
+ raise HTTPException(
+ status_code=500,
+ detail=result.get("response", "Failed to generate response")
+ )
+
+ # Build response
+ response_data = {
+ "success": True,
+ "response": result.get("response", ""),
+ "session_id": result.get("session_id", request.session_id),
+ "language": result.get("language", language),
+ "is_redirect": result.get("is_redirect", False),
+ "context": result.get("context"),
+ "timestamp": datetime.now().isoformat()
+ }
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Chat failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Chat failed: {str(e)}")
+
+
+@router.get("/welcome", response_model=WelcomeResponse)
+async def get_welcome_message(
+ session_id: str = Query(..., description="Session ID"),
+ language: str = Query(default="en", description="Language code")
+):
+ """
+ Get welcome message for chat page.
+
+ Returns a personalized welcome message based on the
+ current diagnosis in the session. Should be called
+ when user navigates to the chat page.
+ """
+ try:
+ # Validate language
+ valid_languages = ["en", "ha", "yo", "ig"]
+ language = language if language in valid_languages else "en"
+
+ # Import chat service
+ from services.chat_service import get_chat_service
+
+ chat_service = get_chat_service()
+
+ # Get welcome message
+ result = chat_service.get_welcome_message(session_id, language)
+
+ if not result.get("success", False):
+ error_type = result.get("error", "unknown")
+
+ if error_type == "no_diagnosis":
+ raise HTTPException(
+ status_code=400,
+ detail=result.get("response", "Please analyze an image first")
+ )
+ else:
+ raise HTTPException(
+ status_code=500,
+ detail="Failed to generate welcome message"
+ )
+
+ response_data = {
+ "success": True,
+ "response": result.get("response", ""),
+ "session_id": result.get("session_id", session_id),
+ "language": result.get("language", language),
+ "context": result.get("context"),
+ "is_welcome": True
+ }
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Get welcome failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/history", response_model=HistoryResponse)
+async def get_chat_history(
+ session_id: str = Query(..., description="Session ID"),
+ limit: int = Query(default=50, ge=1, le=100, description="Maximum messages to return")
+):
+ """
+ Get chat history for a session.
+
+ Returns all messages in the current chat session,
+ useful for restoring chat state when user returns
+ to the chat page.
+ """
+ try:
+ from services.chat_service import get_chat_service
+
+ chat_service = get_chat_service()
+ messages = chat_service.get_history(session_id)
+
+ # Apply limit
+ if len(messages) > limit:
+ messages = messages[-limit:]
+
+ return JSONResponse(content={
+ "success": True,
+ "session_id": session_id,
+ "messages": messages,
+ "total_messages": len(messages)
+ })
+
+ except Exception as e:
+ logger.error(f"Get history failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete("/history")
+async def clear_chat_history(
+ session_id: str = Query(..., description="Session ID")
+):
+ """
+ Clear chat history for a session.
+
+ Removes all messages but keeps the diagnosis context,
+ allowing user to start a fresh conversation about
+ the same diagnosis.
+ """
+ try:
+ from services.chat_service import get_chat_service
+
+ chat_service = get_chat_service()
+ success = chat_service.clear_history(session_id)
+
+ if success:
+ return JSONResponse(content={
+ "success": True,
+ "message": "Chat history cleared",
+ "session_id": session_id
+ })
+ else:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Clear history failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/context")
+async def get_diagnosis_context(
+ session_id: str = Query(..., description="Session ID")
+):
+ """
+ Get current diagnosis context for chat.
+
+ Returns the diagnosis information being used as
+ context for the chat assistant. Useful for displaying
+ context banner in chat UI.
+ """
+ try:
+ from services.session_manager import get_session_manager
+
+ session_manager = get_session_manager()
+ diagnosis = session_manager.get_diagnosis(session_id)
+
+ if not diagnosis or not diagnosis.is_valid():
+ raise HTTPException(
+ status_code=404,
+ detail="No diagnosis found for this session"
+ )
+
+ return JSONResponse(content={
+ "success": True,
+ "session_id": session_id,
+ "context": diagnosis.to_dict(),
+ "context_string": diagnosis.get_context_string()
+ })
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Get context failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post("/voice")
+async def chat_with_voice(
+ session_id: str = Query(..., description="Session ID"),
+ language: str = Query(default="en", description="Language code"),
+ text: str = Query(..., description="Transcribed text from voice")
+):
+ """
+ Send chat message from voice input.
+
+ Convenience endpoint that accepts already-transcribed
+ text from the voice input system. The transcription
+ is done separately via /api/transcribe.
+
+ This is the final step in the voice chat pipeline:
+ Voice → Whisper → Text → This endpoint → Response
+ """
+ try:
+ # Create request and use main chat endpoint logic
+ request = ChatRequest(
+ session_id=session_id,
+ message=text,
+ language=language
+ )
+
+ return await send_chat_message(request)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Voice chat failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/api/routes/detection.py b/api/routes/detection.py
new file mode 100644
index 0000000000000000000000000000000000000000..15c63fa42959f23cabb309defad423885bc521b2
--- /dev/null
+++ b/api/routes/detection.py
@@ -0,0 +1,381 @@
+"""
+FarmEyes Detection API Routes
+=============================
+REST API endpoints for crop disease detection.
+
+Endpoints:
+- POST /api/detect - Analyze crop image for diseases
+- GET /api/detect/status - Check model status
+- GET /api/detect/classes - Get supported disease classes
+"""
+
+import sys
+import io
+import base64
+from pathlib import Path
+from typing import Optional
+from datetime import datetime
+import logging
+
+from fastapi import APIRouter, File, UploadFile, Form, HTTPException, Query
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Create router
+router = APIRouter(prefix="/api/detect", tags=["Detection"])
+
+
+# =============================================================================
+# REQUEST/RESPONSE MODELS
+# =============================================================================
+
+class DetectionRequest(BaseModel):
+ """Request model for detection with base64 image."""
+ image_base64: str = Field(..., description="Base64 encoded image data")
+ language: str = Field(default="en", description="Language code (en, ha, yo, ig)")
+ session_id: Optional[str] = Field(default=None, description="Session ID for context")
+
+
+class DetectionResponse(BaseModel):
+ """Response model for disease detection."""
+ success: bool
+ session_id: str
+ detection: dict
+ diagnosis: dict
+ language: str
+ timestamp: str
+
+
+class StatusResponse(BaseModel):
+ """Response model for service status."""
+ status: str
+ yolo_loaded: bool
+ natlas_loaded: bool
+ knowledge_base_loaded: bool
+ supported_languages: list
+ supported_crops: list
+
+
+class ClassesResponse(BaseModel):
+ """Response model for supported classes."""
+ total_classes: int
+ classes: list
+ crops: dict
+
+
+# =============================================================================
+# HELPER FUNCTIONS
+# =============================================================================
+
+def decode_base64_image(base64_string: str) -> bytes:
+ """
+ Decode base64 image string to bytes.
+
+ Args:
+ base64_string: Base64 encoded image
+
+ Returns:
+ Image bytes
+ """
+ # Remove data URL prefix if present
+ if "," in base64_string:
+ base64_string = base64_string.split(",")[1]
+
+ try:
+ return base64.b64decode(base64_string)
+ except Exception as e:
+ raise ValueError(f"Invalid base64 image: {e}")
+
+
+def validate_image_format(filename: str) -> bool:
+ """
+ Validate image file format.
+
+ Args:
+ filename: Image filename
+
+ Returns:
+ True if valid format
+ """
+ valid_extensions = {".jpg", ".jpeg", ".png", ".webp", ".bmp"}
+ ext = Path(filename).suffix.lower()
+ return ext in valid_extensions
+
+
+# =============================================================================
+# ENDPOINTS
+# =============================================================================
+
+@router.post("/", response_model=DetectionResponse)
+async def detect_disease(
+ file: UploadFile = File(..., description="Crop image file"),
+ language: str = Form(default="en", description="Language code"),
+ session_id: Optional[str] = Form(default=None, description="Session ID")
+):
+ """
+ Detect crop disease from uploaded image.
+
+ Analyzes the image using YOLOv11 model and returns:
+ - Disease detection results
+ - Complete diagnosis with treatments
+ - All content translated to selected language
+
+ Supported formats: JPG, JPEG, PNG, WEBP, BMP
+ Maximum file size: 10MB
+ """
+ try:
+ # Validate file format
+ if not file.filename or not validate_image_format(file.filename):
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid image format. Supported: JPG, JPEG, PNG, WEBP, BMP"
+ )
+
+ # Read file content
+ contents = await file.read()
+
+ # Validate file size (10MB max)
+ max_size = 10 * 1024 * 1024
+ if len(contents) > max_size:
+ raise HTTPException(
+ status_code=400,
+ detail=f"File too large. Maximum size: {max_size // (1024*1024)}MB"
+ )
+
+ # Validate language
+ valid_languages = ["en", "ha", "yo", "ig"]
+ if language not in valid_languages:
+ language = "en"
+
+ # Import services
+ from services.session_manager import get_session_manager, DiagnosisContext
+ from services.diagnosis_generator import generate_diagnosis_with_image
+ from PIL import Image
+
+ # Get or create session
+ session_manager = get_session_manager()
+ session = session_manager.get_or_create_session(session_id, language)
+
+ # Convert bytes to PIL Image
+ image = Image.open(io.BytesIO(contents))
+
+ # Generate diagnosis
+ logger.info(f"Processing detection for session {session.session_id[:8]}...")
+ report, annotated_image = generate_diagnosis_with_image(image, language)
+
+ # Update session with diagnosis context
+ diagnosis_context = DiagnosisContext.from_diagnosis_report(report)
+ session_manager.update_diagnosis(session.session_id, diagnosis_context)
+
+ # Build response
+ response_data = {
+ "success": True,
+ "session_id": session.session_id,
+ "detection": {
+ "disease_name": report.disease_name,
+ "crop_type": report.crop_type,
+ "confidence": report.confidence,
+ "confidence_percent": round(report.confidence * 100, 1),
+ "severity_level": report.severity_level,
+ "is_healthy": report.is_healthy
+ },
+ "diagnosis": report.to_dict(),
+ "language": language,
+ "timestamp": datetime.now().isoformat()
+ }
+
+ logger.info(f"Detection complete: {report.disease_name} ({report.confidence:.1%})")
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Detection failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
+
+
+@router.post("/base64", response_model=DetectionResponse)
+async def detect_disease_base64(request: DetectionRequest):
+ """
+ Detect crop disease from base64 encoded image.
+
+ Alternative endpoint for clients that prefer sending
+ images as base64 strings rather than file uploads.
+ """
+ try:
+ # Decode base64 image
+ try:
+ image_bytes = decode_base64_image(request.image_base64)
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+ # Validate size
+ max_size = 10 * 1024 * 1024
+ if len(image_bytes) > max_size:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Image too large. Maximum size: {max_size // (1024*1024)}MB"
+ )
+
+ # Validate language
+ valid_languages = ["en", "ha", "yo", "ig"]
+ language = request.language if request.language in valid_languages else "en"
+
+ # Import services
+ from services.session_manager import get_session_manager, DiagnosisContext
+ from services.diagnosis_generator import generate_diagnosis_with_image
+ from PIL import Image
+
+ # Get or create session
+ session_manager = get_session_manager()
+ session = session_manager.get_or_create_session(request.session_id, language)
+
+ # Convert bytes to PIL Image
+ image = Image.open(io.BytesIO(image_bytes))
+
+ # Generate diagnosis
+ logger.info(f"Processing base64 detection for session {session.session_id[:8]}...")
+ report, annotated_image = generate_diagnosis_with_image(image, language)
+
+ # Update session
+ diagnosis_context = DiagnosisContext.from_diagnosis_report(report)
+ session_manager.update_diagnosis(session.session_id, diagnosis_context)
+
+ # Build response
+ response_data = {
+ "success": True,
+ "session_id": session.session_id,
+ "detection": {
+ "disease_name": report.disease_name,
+ "crop_type": report.crop_type,
+ "confidence": report.confidence,
+ "confidence_percent": round(report.confidence * 100, 1),
+ "severity_level": report.severity_level,
+ "is_healthy": report.is_healthy
+ },
+ "diagnosis": report.to_dict(),
+ "language": language,
+ "timestamp": datetime.now().isoformat()
+ }
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Base64 detection failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Detection failed: {str(e)}")
+
+
+@router.get("/status", response_model=StatusResponse)
+async def get_detection_status():
+ """
+ Get status of detection service.
+
+ Returns information about:
+ - Model loading status
+ - Supported languages
+ - Supported crops
+ """
+ try:
+ # Try to get service status
+ status_info = {
+ "status": "operational",
+ "yolo_loaded": False,
+ "natlas_loaded": False,
+ "knowledge_base_loaded": False,
+ "supported_languages": ["en", "ha", "yo", "ig"],
+ "supported_crops": ["cassava", "cocoa", "tomato"]
+ }
+
+ try:
+ from services.disease_detector import get_disease_detector
+ detector = get_disease_detector()
+ status_info["knowledge_base_loaded"] = detector._knowledge_base is not None
+ status_info["yolo_loaded"] = (
+ detector._yolo_model is not None and
+ detector._yolo_model._is_loaded
+ )
+ except Exception as e:
+ logger.warning(f"Could not get detector status: {e}")
+
+ try:
+ from models.natlas_model import get_natlas_model
+ natlas = get_natlas_model()
+ status_info["natlas_loaded"] = natlas.is_loaded
+ except Exception as e:
+ logger.warning(f"Could not get N-ATLaS status: {e}")
+
+ return JSONResponse(content=status_info)
+
+ except Exception as e:
+ logger.error(f"Status check failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/classes", response_model=ClassesResponse)
+async def get_supported_classes():
+ """
+ Get list of supported disease classes.
+
+ Returns:
+ - Total number of classes
+ - Class names with indices
+ - Mapping of crops to class indices
+ """
+ try:
+ from config import CLASS_NAMES, CROP_TYPES, CLASS_TO_CROP, CLASS_INDEX_TO_KEY
+
+ classes_list = []
+ for idx, name in enumerate(CLASS_NAMES):
+ classes_list.append({
+ "index": idx,
+ "name": name,
+ "key": CLASS_INDEX_TO_KEY.get(idx, ""),
+ "crop": CLASS_TO_CROP.get(idx, "unknown")
+ })
+
+ return JSONResponse(content={
+ "total_classes": len(CLASS_NAMES),
+ "classes": classes_list,
+ "crops": CROP_TYPES
+ })
+
+ except Exception as e:
+ logger.error(f"Get classes failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.delete("/session/{session_id}")
+async def clear_session_diagnosis(session_id: str):
+ """
+ Clear diagnosis data for a session.
+
+ Clears the current diagnosis and chat history,
+ allowing user to start fresh with a new image.
+ """
+ try:
+ from services.session_manager import get_session_manager
+
+ session_manager = get_session_manager()
+ success = session_manager.clear_diagnosis(session_id)
+
+ if success:
+ return JSONResponse(content={
+ "success": True,
+ "message": "Diagnosis cleared",
+ "session_id": session_id
+ })
+ else:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Clear session failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/api/routes/transcribe.py b/api/routes/transcribe.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f119b398e7a6d14fe49392da74bf0403fbd92af
--- /dev/null
+++ b/api/routes/transcribe.py
@@ -0,0 +1,418 @@
+"""
+FarmEyes Transcribe API Routes
+==============================
+REST API endpoints for speech-to-text transcription.
+
+Endpoints:
+- POST /api/transcribe - Transcribe audio to text
+- GET /api/transcribe/status - Check Whisper model status
+- GET /api/transcribe/formats - Get supported audio formats
+"""
+
+import sys
+import io
+import base64
+from pathlib import Path
+from typing import Optional
+from datetime import datetime
+import logging
+
+from fastapi import APIRouter, File, UploadFile, Form, HTTPException, Query
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Create router
+router = APIRouter(prefix="/api/transcribe", tags=["Transcription"])
+
+
+# =============================================================================
+# REQUEST/RESPONSE MODELS
+# =============================================================================
+
+class TranscribeRequest(BaseModel):
+ """Request model for base64 audio transcription."""
+ audio_base64: str = Field(..., description="Base64 encoded audio data")
+ filename: str = Field(default="audio.wav", description="Original filename for format detection")
+ language_hint: Optional[str] = Field(default=None, description="Language hint (en, ha, yo, ig)")
+
+
+class TranscribeResponse(BaseModel):
+ """Response model for transcription."""
+ success: bool
+ text: str
+ language: Optional[str] = None
+ confidence: float = 0.0
+ duration: float = 0.0
+ processing_time: Optional[float] = None
+
+
+class StatusResponse(BaseModel):
+ """Response model for service status."""
+ status: str
+ model_loaded: bool
+ model_size: str
+ device: str
+ supported_formats: list
+
+
+class FormatsResponse(BaseModel):
+ """Response model for supported formats."""
+ formats: list
+ max_file_size_mb: int
+ max_duration_seconds: int
+
+
+# =============================================================================
+# HELPER FUNCTIONS
+# =============================================================================
+
+def decode_base64_audio(base64_string: str) -> bytes:
+ """
+ Decode base64 audio string to bytes.
+
+ Args:
+ base64_string: Base64 encoded audio
+
+ Returns:
+ Audio bytes
+ """
+ # Remove data URL prefix if present
+ if "," in base64_string:
+ base64_string = base64_string.split(",")[1]
+
+ try:
+ return base64.b64decode(base64_string)
+ except Exception as e:
+ raise ValueError(f"Invalid base64 audio: {e}")
+
+
+def validate_audio_format(filename: str) -> bool:
+ """
+ Validate audio file format.
+
+ Args:
+ filename: Audio filename
+
+ Returns:
+ True if valid format
+ """
+ valid_extensions = {".wav", ".mp3", ".m4a", ".ogg", ".flac", ".webm"}
+ ext = Path(filename).suffix.lower()
+ return ext in valid_extensions
+
+
+# =============================================================================
+# ENDPOINTS
+# =============================================================================
+
+@router.post("/", response_model=TranscribeResponse)
+async def transcribe_audio(
+ file: UploadFile = File(..., description="Audio file"),
+ language_hint: Optional[str] = Form(default=None, description="Language hint (en, ha, yo, ig)")
+):
+ """
+ Transcribe audio file to text.
+
+ Uses OpenAI Whisper model for accurate speech-to-text,
+ with special optimization for Nigerian languages.
+
+ Supported formats: WAV, MP3, M4A, OGG, FLAC, WEBM
+ Maximum file size: 5MB
+ Maximum duration: 30 seconds
+
+ Language hints improve accuracy:
+ - en: English
+ - ha: Hausa
+ - yo: Yoruba
+ - ig: Igbo
+ """
+ try:
+ # Validate file format
+ if not file.filename:
+ raise HTTPException(status_code=400, detail="No filename provided")
+
+ if not validate_audio_format(file.filename):
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid audio format. Supported: WAV, MP3, M4A, OGG, FLAC, WEBM"
+ )
+
+ # Read file content
+ contents = await file.read()
+
+ # Validate file size (5MB max)
+ max_size = 5 * 1024 * 1024
+ if len(contents) > max_size:
+ raise HTTPException(
+ status_code=400,
+ detail=f"File too large. Maximum size: {max_size // (1024*1024)}MB"
+ )
+
+ # Validate language hint
+ valid_languages = ["en", "ha", "yo", "ig"]
+ if language_hint and language_hint not in valid_languages:
+ language_hint = None
+
+ # Import Whisper service
+ from services.whisper_service import get_whisper_service
+
+ whisper_service = get_whisper_service()
+
+ # Transcribe
+ logger.info(f"Transcribing audio: {file.filename}")
+ result = whisper_service.transcribe_bytes(
+ audio_bytes=contents,
+ filename=file.filename,
+ language_hint=language_hint
+ )
+
+ if not result.get("success", False):
+ error_msg = result.get("error", "Transcription failed")
+ raise HTTPException(status_code=500, detail=error_msg)
+
+ # Build response
+ response_data = {
+ "success": True,
+ "text": result.get("text", ""),
+ "language": result.get("language"),
+ "confidence": result.get("confidence", 0.0),
+ "duration": result.get("duration", 0.0),
+ "processing_time": result.get("processing_time")
+ }
+
+ logger.info(f"Transcription complete: {len(response_data['text'])} chars")
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Transcription failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Transcription failed: {str(e)}")
+
+
+@router.post("/base64", response_model=TranscribeResponse)
+async def transcribe_audio_base64(request: TranscribeRequest):
+ """
+ Transcribe base64 encoded audio to text.
+
+ Alternative endpoint for clients that prefer sending
+ audio as base64 strings (e.g., from web recordings).
+ """
+ try:
+ # Decode base64 audio
+ try:
+ audio_bytes = decode_base64_audio(request.audio_base64)
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+ # Validate size (5MB max)
+ max_size = 5 * 1024 * 1024
+ if len(audio_bytes) > max_size:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Audio too large. Maximum size: {max_size // (1024*1024)}MB"
+ )
+
+ # Validate format from filename
+ if not validate_audio_format(request.filename):
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid audio format. Supported: WAV, MP3, M4A, OGG, FLAC, WEBM"
+ )
+
+ # Validate language hint
+ valid_languages = ["en", "ha", "yo", "ig"]
+ language_hint = request.language_hint
+ if language_hint and language_hint not in valid_languages:
+ language_hint = None
+
+ # Import Whisper service
+ from services.whisper_service import get_whisper_service
+
+ whisper_service = get_whisper_service()
+
+ # Transcribe
+ logger.info(f"Transcribing base64 audio: {request.filename}")
+ result = whisper_service.transcribe_bytes(
+ audio_bytes=audio_bytes,
+ filename=request.filename,
+ language_hint=language_hint
+ )
+
+ if not result.get("success", False):
+ error_msg = result.get("error", "Transcription failed")
+ raise HTTPException(status_code=500, detail=error_msg)
+
+ response_data = {
+ "success": True,
+ "text": result.get("text", ""),
+ "language": result.get("language"),
+ "confidence": result.get("confidence", 0.0),
+ "duration": result.get("duration", 0.0),
+ "processing_time": result.get("processing_time")
+ }
+
+ return JSONResponse(content=response_data)
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Base64 transcription failed: {e}")
+ raise HTTPException(status_code=500, detail=f"Transcription failed: {str(e)}")
+
+
+@router.get("/status", response_model=StatusResponse)
+async def get_transcription_status():
+ """
+ Get status of transcription service.
+
+ Returns information about:
+ - Whisper model loading status
+ - Model size and device
+ - Supported formats
+ """
+ try:
+ from services.whisper_service import get_whisper_service
+
+ whisper_service = get_whisper_service()
+ info = whisper_service.get_model_info()
+
+ return JSONResponse(content={
+ "status": "operational" if info.get("is_loaded") else "model_not_loaded",
+ "model_loaded": info.get("is_loaded", False),
+ "model_size": info.get("model_size", "base"),
+ "device": info.get("device", "cpu"),
+ "supported_formats": info.get("supported_formats", [])
+ })
+
+ except Exception as e:
+ logger.error(f"Status check failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/formats", response_model=FormatsResponse)
+async def get_supported_formats():
+ """
+ Get supported audio formats and limits.
+
+ Returns:
+ - List of supported audio formats
+ - Maximum file size
+ - Maximum audio duration
+ """
+ try:
+ from services.whisper_service import AudioProcessor
+
+ return JSONResponse(content={
+ "formats": list(AudioProcessor.SUPPORTED_FORMATS),
+ "max_file_size_mb": 5,
+ "max_duration_seconds": AudioProcessor.MAX_DURATION
+ })
+
+ except Exception as e:
+ logger.error(f"Get formats failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post("/detect-language")
+async def detect_audio_language(
+ file: UploadFile = File(..., description="Audio file")
+):
+ """
+ Detect language in audio file.
+
+ Uses Whisper's language detection to identify
+ the spoken language without full transcription.
+ Faster than full transcription for language detection.
+ """
+ try:
+ # Validate file
+ if not file.filename or not validate_audio_format(file.filename):
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid audio format"
+ )
+
+ # Read content
+ contents = await file.read()
+
+ # Validate size
+ max_size = 5 * 1024 * 1024
+ if len(contents) > max_size:
+ raise HTTPException(status_code=400, detail="File too large")
+
+ # Import service
+ from services.whisper_service import get_whisper_service
+
+ whisper_service = get_whisper_service()
+
+ # Detect language
+ result = whisper_service.detect_language(contents)
+
+ if not result.get("success", False):
+ raise HTTPException(
+ status_code=500,
+ detail=result.get("error", "Language detection failed")
+ )
+
+ return JSONResponse(content={
+ "success": True,
+ "language": result.get("language"),
+ "confidence": result.get("confidence", 0.0),
+ "top_languages": result.get("all_probabilities", {})
+ })
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Language detection failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.post("/load-model")
+async def load_whisper_model():
+ """
+ Explicitly load the Whisper model.
+
+ Useful for warming up the model before user
+ starts using voice input. Model loads automatically
+ on first use, but pre-loading improves UX.
+ """
+ try:
+ from services.whisper_service import get_whisper_service
+
+ whisper_service = get_whisper_service()
+
+ if whisper_service.is_loaded:
+ return JSONResponse(content={
+ "success": True,
+ "message": "Model already loaded",
+ "model_size": whisper_service.model_size
+ })
+
+ # Load model
+ logger.info("Pre-loading Whisper model...")
+ success = whisper_service.load_model()
+
+ if success:
+ return JSONResponse(content={
+ "success": True,
+ "message": "Model loaded successfully",
+ "model_size": whisper_service.model_size
+ })
+ else:
+ raise HTTPException(
+ status_code=500,
+ detail="Failed to load Whisper model"
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Load model failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
diff --git a/api/routes/tts.py b/api/routes/tts.py
new file mode 100644
index 0000000000000000000000000000000000000000..73a7b42b72d6436627ae40254356b34f18aa95b7
--- /dev/null
+++ b/api/routes/tts.py
@@ -0,0 +1,182 @@
+"""
+FarmEyes TTS API Routes
+=======================
+REST API endpoints for text-to-speech synthesis.
+
+Endpoints:
+- POST /api/tts - Synthesize text to speech
+- GET /api/tts/languages - Get supported languages
+- GET /api/tts/status - Check TTS service status
+"""
+
+import sys
+from pathlib import Path
+from typing import Optional
+from datetime import datetime
+import logging
+
+from fastapi import APIRouter, HTTPException, Query
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Create router
+router = APIRouter(prefix="/api/tts", tags=["Text-to-Speech"])
+
+
+# =============================================================================
+# REQUEST/RESPONSE MODELS
+# =============================================================================
+
+class TTSRequest(BaseModel):
+ """Request model for TTS synthesis."""
+ text: str = Field(..., description="Text to convert to speech", max_length=2000)
+ language: str = Field(default="en", description="Language code (en, ha, yo, ig)")
+
+
+class TTSResponse(BaseModel):
+ """Response model for TTS synthesis."""
+ success: bool
+ audio_base64: Optional[str] = None
+ content_type: str = "audio/flac"
+ duration: float = 0.0
+ language: str = "en"
+ text_length: int = 0
+ processing_time: Optional[float] = None
+ error: Optional[str] = None
+
+
+class LanguagesResponse(BaseModel):
+ """Response model for supported languages."""
+ success: bool
+ languages: dict
+
+
+class StatusResponse(BaseModel):
+ """Response model for service status."""
+ success: bool
+ status: str
+ has_token: bool
+ supported_languages: list
+
+
+# =============================================================================
+# ENDPOINTS
+# =============================================================================
+
+@router.post("", response_model=TTSResponse)
+@router.post("/", response_model=TTSResponse)
+async def synthesize_speech(request: TTSRequest):
+ """
+ Synthesize text to speech.
+
+ Converts the provided text to audio using Meta MMS-TTS.
+ Returns base64 encoded audio data.
+
+ Supported languages:
+ - en: English
+ - ha: Hausa
+ - yo: Yoruba
+ - ig: Igbo
+ """
+ try:
+ from services.tts_service import get_tts_service
+
+ logger.info(f"TTS request: lang={request.language}, text_len={len(request.text)}")
+
+ # Get TTS service
+ tts_service = get_tts_service()
+
+ # Check language support
+ if not tts_service.is_language_supported(request.language):
+ raise HTTPException(
+ status_code=400,
+ detail=f"Language '{request.language}' is not supported. Use: en, ha, yo, ig"
+ )
+
+ # Synthesize
+ result = tts_service.synthesize(request.text, request.language)
+
+ if result["success"]:
+ return TTSResponse(
+ success=True,
+ audio_base64=result["audio_base64"],
+ content_type=result.get("content_type", "audio/flac"),
+ duration=result.get("duration", 0.0),
+ language=result["language"],
+ text_length=result.get("text_length", len(request.text)),
+ processing_time=result.get("processing_time")
+ )
+ else:
+ # Return error but don't raise exception (for fallback handling)
+ return TTSResponse(
+ success=False,
+ language=request.language,
+ text_length=len(request.text),
+ error=result.get("error", "TTS synthesis failed")
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"TTS synthesis failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/languages", response_model=LanguagesResponse)
+async def get_supported_languages():
+ """
+ Get list of supported TTS languages.
+
+ Returns language codes and their display names.
+ """
+ try:
+ from services.tts_service import get_tts_service
+
+ tts_service = get_tts_service()
+ languages = tts_service.get_supported_languages()
+
+ return LanguagesResponse(
+ success=True,
+ languages=languages
+ )
+
+ except Exception as e:
+ logger.error(f"Get languages failed: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+@router.get("/status", response_model=StatusResponse)
+async def get_tts_status():
+ """
+ Get TTS service status.
+
+ Returns whether the service is configured and ready.
+ """
+ try:
+ from services.tts_service import get_tts_service
+
+ tts_service = get_tts_service()
+ has_token = bool(tts_service.hf_token)
+ languages = list(tts_service.get_supported_languages().keys())
+
+ status = "ready" if has_token else "no_token"
+
+ return StatusResponse(
+ success=True,
+ status=status,
+ has_token=has_token,
+ supported_languages=languages
+ )
+
+ except Exception as e:
+ logger.error(f"Get status failed: {e}")
+ return StatusResponse(
+ success=False,
+ status="error",
+ has_token=False,
+ supported_languages=[]
+ )
diff --git a/config.py b/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..249b1afb922d864d3c079bb5b67578c25bd07f98
--- /dev/null
+++ b/config.py
@@ -0,0 +1,635 @@
+"""
+FarmEyes Configuration File
+===========================
+Central configuration for the FarmEyes crop disease detection application.
+Contains model paths, class mappings, device settings, API configurations,
+session management, and Whisper speech-to-text settings.
+
+Device: Apple Silicon M1 Pro with MPS (Metal Performance Shaders) acceleration
+Deployment: Local development + HuggingFace Spaces
+
+Model: Custom trained YOLOv11 for 6 disease classes
+Crops: Cassava, Cocoa, Tomato
+"""
+
+import os
+from pathlib import Path
+from typing import Dict, List, Optional
+from dataclasses import dataclass, field
+
+
+# =============================================================================
+# PATH CONFIGURATIONS
+# =============================================================================
+
+# Base project directory
+BASE_DIR = Path(__file__).parent.resolve()
+
+# Data directories
+DATA_DIR = BASE_DIR / "data"
+STATIC_DIR = BASE_DIR / "static"
+MODELS_DIR = BASE_DIR / "models"
+OUTPUTS_DIR = BASE_DIR / "outputs"
+FRONTEND_DIR = BASE_DIR / "frontend"
+UPLOADS_DIR = BASE_DIR / "uploads"
+
+# Create directories if they don't exist
+for directory in [DATA_DIR, STATIC_DIR, MODELS_DIR, OUTPUTS_DIR, UPLOADS_DIR]:
+ directory.mkdir(parents=True, exist_ok=True)
+
+# Knowledge base and UI translations paths
+KNOWLEDGE_BASE_PATH = DATA_DIR / "knowledge_base.json"
+UI_TRANSLATIONS_PATH = STATIC_DIR / "ui_translations.json"
+
+
+# =============================================================================
+# API CONFIGURATION
+# =============================================================================
+
+@dataclass
+class APIConfig:
+ """Configuration for FastAPI backend"""
+
+ # Server settings
+ host: str = "0.0.0.0"
+ port: int = 7860 # Default HuggingFace Spaces port
+
+ # API metadata
+ title: str = "FarmEyes API"
+ description: str = "AI-Powered Crop Disease Detection for Nigerian Farmers"
+ version: str = "2.0.0"
+
+ # CORS settings (for frontend access)
+ cors_origins: List[str] = field(default_factory=lambda: [
+ "http://localhost:7860",
+ "http://127.0.0.1:7860",
+ "https://*.hf.space", # HuggingFace Spaces
+ "*" # Allow all for development - restrict in production
+ ])
+
+ # Request limits
+ max_upload_size: int = 10 * 1024 * 1024 # 10MB max image upload
+ request_timeout: int = 60 # seconds
+
+ # Rate limiting (basic)
+ rate_limit_requests: int = 100 # requests per minute
+ rate_limit_window: int = 60 # seconds
+
+ # Debug mode
+ debug: bool = os.environ.get("DEBUG", "false").lower() == "true"
+
+ # Environment detection
+ @property
+ def is_huggingface(self) -> bool:
+ """Check if running on HuggingFace Spaces"""
+ return os.environ.get("SPACE_ID") is not None
+
+ @property
+ def base_url(self) -> str:
+ """Get base URL based on environment"""
+ if self.is_huggingface:
+ space_id = os.environ.get("SPACE_ID", "")
+ return f"https://{space_id.replace('/', '-')}.hf.space"
+ return f"http://{self.host}:{self.port}"
+
+
+# =============================================================================
+# SESSION CONFIGURATION
+# =============================================================================
+
+@dataclass
+class SessionConfig:
+ """Configuration for session management"""
+
+ # Session settings
+ session_lifetime: int = 3600 # 1 hour in seconds
+ max_sessions: int = 1000 # Maximum concurrent sessions
+
+ # Chat history settings
+ max_chat_history: int = 50 # Maximum messages per session
+ max_message_length: int = 2000 # Maximum characters per message
+
+ # Context retention
+ retain_diagnosis: bool = True # Keep diagnosis context in session
+
+ # Cleanup settings
+ cleanup_interval: int = 300 # 5 minutes - check for expired sessions
+
+
+# =============================================================================
+# WHISPER CONFIGURATION
+# =============================================================================
+
+@dataclass
+class WhisperConfig:
+ """Configuration for Whisper speech-to-text"""
+
+ # Model settings
+ model_size: str = "base" # tiny, base, small, medium, large
+
+ # Supported model sizes with approximate VRAM/RAM requirements
+ # tiny: ~1GB - Fastest, least accurate
+ # base: ~1GB - Good balance (SELECTED)
+ # small: ~2GB - Better accuracy
+ # medium: ~5GB - High accuracy
+ # large: ~10GB - Best accuracy
+
+ # Device settings
+ device: str = "cpu" # Use CPU for broader compatibility
+ # Note: On Apple Silicon, Whisper runs well on CPU
+ # For GPU: set to "cuda" (NVIDIA) or use mlx-whisper for Apple Silicon
+
+ # Audio settings
+ sample_rate: int = 16000 # Whisper expects 16kHz audio
+ max_audio_duration: int = 30 # Maximum seconds of audio to process
+
+ # Language settings - Whisper auto-detects but we can hint
+ language_hints: Dict[str, str] = field(default_factory=lambda: {
+ "en": "english",
+ "ha": "hausa",
+ "yo": "yoruba",
+ "ig": "igbo"
+ })
+
+ # Transcription settings
+ task: str = "transcribe" # transcribe or translate
+
+ # Performance settings
+ fp16: bool = False # Use FP32 for CPU compatibility
+
+ # Supported audio formats
+ supported_formats: List[str] = field(default_factory=lambda: [
+ ".wav", ".mp3", ".m4a", ".ogg", ".flac", ".webm"
+ ])
+
+ # Maximum audio file size (5MB)
+ max_file_size: int = 5 * 1024 * 1024
+
+
+# =============================================================================
+# MODEL CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class YOLOConfig:
+ """Configuration for YOLOv11 disease detection model"""
+
+ # Path to trained YOLOv11 model weights (.pt file)
+ model_path: Path = MODELS_DIR / "farmeyes_yolov11.pt"
+
+ # Confidence threshold for detections (0.0 - 1.0)
+ confidence_threshold: float = 0.5
+
+ # IoU threshold for non-maximum suppression
+ iou_threshold: float = 0.45
+
+ # Input image size (YOLOv11 default)
+ input_size: int = 640
+
+ # Maximum number of detections per image
+ max_detections: int = 10
+
+ # Device for inference ('mps' for Apple Silicon, 'cuda' for NVIDIA, 'cpu' for CPU)
+ device: str = "mps"
+
+
+@dataclass
+class NATLaSConfig:
+ """Configuration for N-ATLaS language model (GGUF format)"""
+
+ # Hugging Face model repository
+ hf_repo: str = "tosinamuda/N-ATLaS-GGUF"
+
+ # GGUF model filename (Q4_K_M quantized version - smaller, faster)
+ model_filename: str = "N-ATLaS-GGUF-Q4_K_M.gguf"
+
+ # Local path where model will be downloaded/cached
+ model_path: Path = MODELS_DIR / "natlas"
+
+ # Full path to the GGUF file
+ @property
+ def gguf_path(self) -> Path:
+ return self.model_path / self.model_filename
+
+ # Context window size (tokens)
+ context_length: int = 4096
+
+ # Maximum tokens to generate in response
+ max_tokens: int = 1024
+
+ # Chat-specific max tokens (shorter for responsiveness)
+ chat_max_tokens: int = 512
+
+ # Temperature for text generation (0.0 = deterministic, 1.0 = creative)
+ temperature: float = 0.7
+
+ # Chat temperature (slightly lower for more focused responses)
+ chat_temperature: float = 0.6
+
+ # Top-p (nucleus) sampling
+ top_p: float = 0.9
+
+ # Number of GPU layers to offload (for MPS acceleration)
+ # Set to -1 to offload all layers, 0 for CPU only
+ n_gpu_layers: int = -1
+
+ # Number of threads for CPU computation
+ n_threads: int = 8
+
+ # Batch size for prompt processing
+ n_batch: int = 512
+
+ # Device for inference
+ device: str = "mps"
+
+
+# =============================================================================
+# DISEASE CLASS MAPPINGS (6 CLASSES - NO HEALTHY CLASSES)
+# =============================================================================
+
+# YOLOv11 class index to disease key mapping
+CLASS_INDEX_TO_KEY: Dict[int, str] = {
+ 0: "cassava_bacterial_blight",
+ 1: "cassava_mosaic_virus",
+ 2: "cocoa_monilia_disease",
+ 3: "cocoa_phytophthora_disease",
+ 4: "tomato_gray_mold",
+ 5: "tomato_wilt_disease"
+}
+
+# Reverse mapping: disease key to class index
+KEY_TO_CLASS_INDEX: Dict[str, int] = {v: k for k, v in CLASS_INDEX_TO_KEY.items()}
+
+# Class names as they appear in YOLO training (6 classes)
+CLASS_NAMES: List[str] = [
+ "Cassava Bacteria Blight", # Index 0
+ "Cassava Mosaic Virus", # Index 1
+ "Cocoa Monilia Disease", # Index 2
+ "Cocoa Phytophthora Disease", # Index 3
+ "Tomato Gray Mold Disease", # Index 4
+ "Tomato Wilt Disease" # Index 5
+]
+
+# No healthy class indices in 6-class model
+HEALTHY_CLASS_INDICES: List[int] = []
+
+# All class indices are disease classes
+DISEASE_CLASS_INDICES: List[int] = [0, 1, 2, 3, 4, 5]
+
+# Crop type mapping (6 classes only)
+CROP_TYPES: Dict[str, List[int]] = {
+ "cassava": [0, 1],
+ "cocoa": [2, 3],
+ "tomato": [4, 5]
+}
+
+# Reverse mapping: class index to crop type
+CLASS_TO_CROP: Dict[int, str] = {}
+for crop, indices in CROP_TYPES.items():
+ for idx in indices:
+ CLASS_TO_CROP[idx] = crop
+
+
+# =============================================================================
+# LANGUAGE CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class LanguageConfig:
+ """Configuration for supported languages"""
+
+ # Supported language codes
+ supported_languages: List[str] = field(default_factory=lambda: ["en", "ha", "yo", "ig"])
+
+ # Default language
+ default_language: str = "en"
+
+ # Language display names
+ language_names: Dict[str, str] = field(default_factory=lambda: {
+ "en": "English",
+ "ha": "Hausa",
+ "yo": "Yorùbá",
+ "ig": "Igbo"
+ })
+
+ # Language codes for N-ATLaS prompts
+ language_full_names: Dict[str, str] = field(default_factory=lambda: {
+ "en": "English",
+ "ha": "Hausa",
+ "yo": "Yoruba",
+ "ig": "Igbo"
+ })
+
+ # Native language names (for display in selector)
+ native_names: Dict[str, str] = field(default_factory=lambda: {
+ "en": "English",
+ "ha": "Hausa",
+ "yo": "Yorùbá",
+ "ig": "Asụsụ Igbo"
+ })
+
+
+# =============================================================================
+# CHAT CONFIGURATION
+# =============================================================================
+
+@dataclass
+class ChatConfig:
+ """Configuration for contextual chatbot"""
+
+ # System prompt for agricultural chat
+ system_prompt: str = """You are FarmEyes, an AI agricultural assistant helping Nigerian farmers.
+You are currently discussing a specific crop disease diagnosis with the farmer.
+Your role is to:
+1. Answer questions ONLY about the diagnosed disease and related agricultural topics
+2. Provide practical, actionable advice in simple language
+3. Use local context (Nigerian farming practices, costs in Naira)
+4. Be respectful, patient, and supportive
+5. If asked about unrelated topics, politely redirect to agricultural matters
+
+IMPORTANT: Stay focused on the diagnosis context provided. Do not make up information.
+If you don't know something, say so honestly and suggest consulting a local agricultural extension officer."""
+
+ # Context template for chat
+ context_template: str = """CURRENT DIAGNOSIS CONTEXT:
+- Crop: {crop_type}
+- Disease: {disease_name}
+- Confidence: {confidence}%
+- Severity: {severity}
+- Key symptoms: {symptoms}
+- Recommended treatment: {treatment_summary}
+
+The farmer may ask follow-up questions about this diagnosis."""
+
+ # Allowed topic keywords (for moderate context restriction)
+ allowed_topics: List[str] = field(default_factory=lambda: [
+ # Disease-related
+ "disease", "infection", "symptom", "treatment", "cure", "prevention",
+ "spread", "cause", "severity", "recovery",
+ # Crop-related
+ "crop", "plant", "leaf", "stem", "root", "fruit", "harvest", "yield",
+ "cassava", "cocoa", "tomato", "farming", "agriculture",
+ # Treatment-related
+ "medicine", "chemical", "organic", "traditional", "spray", "apply",
+ "fungicide", "pesticide", "fertilizer", "cost", "price", "naira",
+ # General farming
+ "farm", "field", "soil", "water", "weather", "season", "planting",
+ "seed", "variety", "resistant", "healthy"
+ ])
+
+ # Response length limits
+ max_response_tokens: int = 400
+
+ # Welcome message template
+ welcome_template: str = """Hello! I'm your FarmEyes assistant. I've analyzed your {crop_type} plant and detected {disease_name} with {confidence}% confidence.
+
+I can help you understand:
+• More about this disease and its symptoms
+• Treatment options and costs
+• Prevention methods
+• When to seek expert help
+
+What would you like to know?"""
+
+
+# =============================================================================
+# APPLICATION CONFIGURATIONS
+# =============================================================================
+
+@dataclass
+class AppConfig:
+ """General application configuration"""
+
+ # App information
+ app_name: str = "FarmEyes"
+ app_version: str = "2.0.0"
+ app_tagline: str = "AI-Powered Crop Disease Detection for Nigerian Farmers"
+
+ # Server settings (legacy Gradio support)
+ server_host: str = "0.0.0.0"
+ server_port: int = 7860
+ share: bool = False
+
+ # Debug mode
+ debug: bool = True
+
+ # Maximum image file size (in bytes) - 10MB
+ max_image_size: int = 10 * 1024 * 1024
+
+ # Supported image formats
+ supported_image_formats: List[str] = field(default_factory=lambda: [
+ ".jpg", ".jpeg", ".png", ".webp", ".bmp"
+ ])
+
+ # Confidence thresholds for user feedback
+ high_confidence_threshold: float = 0.85
+ medium_confidence_threshold: float = 0.60
+ low_confidence_threshold: float = 0.40
+
+ # Enable/disable features
+ enable_voice_input: bool = True # Voice input with Whisper
+ enable_chat: bool = True # Contextual chat
+ enable_history: bool = True # Session history
+
+
+# =============================================================================
+# DEVICE CONFIGURATION (Apple Silicon Specific)
+# =============================================================================
+
+@dataclass
+class DeviceConfig:
+ """Device and hardware configuration for Apple Silicon M1 Pro"""
+
+ # Primary compute device
+ compute_device: str = "mps"
+
+ # Fallback device if primary is unavailable
+ fallback_device: str = "cpu"
+
+ # Enable MPS (Metal Performance Shaders) for PyTorch
+ use_mps: bool = True
+
+ # Memory management
+ clear_cache_after_inference: bool = True
+
+ @staticmethod
+ def get_device() -> str:
+ """Determine the best available device for computation."""
+ import torch
+
+ if torch.backends.mps.is_available():
+ return "mps"
+ elif torch.cuda.is_available():
+ return "cuda"
+ else:
+ return "cpu"
+
+ @staticmethod
+ def get_device_info() -> Dict[str, str]:
+ """Get information about the current compute device."""
+ import torch
+ import platform
+
+ info = {
+ "platform": platform.system(),
+ "processor": platform.processor(),
+ "python_version": platform.python_version(),
+ "pytorch_version": torch.__version__,
+ "device": DeviceConfig.get_device()
+ }
+
+ if torch.backends.mps.is_available():
+ info["mps_available"] = "Yes"
+ info["mps_built"] = str(torch.backends.mps.is_built())
+
+ return info
+
+
+# =============================================================================
+# PROMPT TEMPLATES CONFIGURATION
+# =============================================================================
+
+@dataclass
+class PromptConfig:
+ """Configuration for N-ATLaS prompt templates"""
+
+ # System prompt for the N-ATLaS model
+ system_prompt: str = """You are FarmEyes, an AI agricultural assistant helping Nigerian farmers.
+You provide advice about crop diseases and treatments in a clear, simple, and helpful manner.
+Always be respectful and use language that farmers can easily understand.
+When providing treatment costs, use Nigerian Naira (₦).
+Focus on practical advice that farmers can implement."""
+
+ # Maximum length for translated text
+ max_translation_length: int = 500
+
+ # Temperature for different tasks
+ translation_temperature: float = 0.3
+ diagnosis_temperature: float = 0.7
+ chat_temperature: float = 0.6
+
+
+# =============================================================================
+# LOGGING CONFIGURATION
+# =============================================================================
+
+@dataclass
+class LogConfig:
+ """Logging configuration"""
+
+ # Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL
+ log_level: str = "INFO"
+
+ # Log file path
+ log_file: Path = BASE_DIR / "logs" / "farmeyes.log"
+
+ # Log format
+ log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+
+ # Enable console logging
+ console_logging: bool = True
+
+ # Enable file logging
+ file_logging: bool = True
+
+
+# =============================================================================
+# INSTANTIATE DEFAULT CONFIGURATIONS
+# =============================================================================
+
+# Create default configuration instances
+api_config = APIConfig()
+session_config = SessionConfig()
+whisper_config = WhisperConfig()
+yolo_config = YOLOConfig()
+natlas_config = NATLaSConfig()
+language_config = LanguageConfig()
+chat_config = ChatConfig()
+app_config = AppConfig()
+device_config = DeviceConfig()
+prompt_config = PromptConfig()
+log_config = LogConfig()
+
+
+# =============================================================================
+# UTILITY FUNCTIONS
+# =============================================================================
+
+def get_disease_key(class_index: int) -> Optional[str]:
+ """Get disease key from class index."""
+ return CLASS_INDEX_TO_KEY.get(class_index)
+
+
+def get_class_index(disease_key: str) -> Optional[int]:
+ """Get class index from disease key."""
+ return KEY_TO_CLASS_INDEX.get(disease_key)
+
+
+def get_crop_type(class_index: int) -> Optional[str]:
+ """Get crop type from class index."""
+ return CLASS_TO_CROP.get(class_index)
+
+
+def is_healthy(class_index: int) -> bool:
+ """Check if class index represents a healthy plant (always False for 6-class)."""
+ return class_index in HEALTHY_CLASS_INDICES
+
+
+def validate_config() -> Dict[str, bool]:
+ """Validate that all required configuration files and paths exist."""
+ validations = {
+ "knowledge_base_exists": KNOWLEDGE_BASE_PATH.exists(),
+ "ui_translations_exists": UI_TRANSLATIONS_PATH.exists(),
+ "models_dir_exists": MODELS_DIR.exists(),
+ "yolo_model_exists": yolo_config.model_path.exists(),
+ "natlas_model_exists": natlas_config.gguf_path.exists(),
+ "frontend_dir_exists": FRONTEND_DIR.exists(),
+ }
+ return validations
+
+
+def print_config_summary():
+ """Print a summary of the current configuration."""
+ print("=" * 60)
+ print("FarmEyes Configuration Summary v2.0")
+ print("=" * 60)
+
+ print(f"\n📁 Paths:")
+ print(f" Base Directory: {BASE_DIR}")
+ print(f" Knowledge Base: {KNOWLEDGE_BASE_PATH}")
+ print(f" Frontend: {FRONTEND_DIR}")
+
+ print(f"\n🌐 API Configuration:")
+ print(f" Host: {api_config.host}:{api_config.port}")
+ print(f" Debug: {api_config.debug}")
+ print(f" HuggingFace: {api_config.is_huggingface}")
+
+ print(f"\n🤖 YOLOv11 Model:")
+ print(f" Model Path: {yolo_config.model_path}")
+ print(f" Confidence: {yolo_config.confidence_threshold}")
+ print(f" Classes: {len(CLASS_NAMES)}")
+
+ print(f"\n🗣️ N-ATLaS Model:")
+ print(f" HF Repo: {natlas_config.hf_repo}")
+ print(f" Chat Max Tokens: {natlas_config.chat_max_tokens}")
+
+ print(f"\n🎤 Whisper (Voice):")
+ print(f" Model Size: {whisper_config.model_size}")
+ print(f" Device: {whisper_config.device}")
+
+ print(f"\n💬 Chat:")
+ print(f" Enabled: {app_config.enable_chat}")
+ print(f" Voice Input: {app_config.enable_voice_input}")
+
+ print(f"\n🌍 Languages:")
+ print(f" Supported: {', '.join(language_config.supported_languages)}")
+
+ print("\n" + "=" * 60)
+
+
+# =============================================================================
+# MAIN - Run configuration check
+# =============================================================================
+
+if __name__ == "__main__":
+ print_config_summary()
diff --git a/data/knowledge_base.json b/data/knowledge_base.json
new file mode 100644
index 0000000000000000000000000000000000000000..8dd61644505a60a972ee839233cea62e60590fd2
--- /dev/null
+++ b/data/knowledge_base.json
@@ -0,0 +1,1115 @@
+{
+ "_metadata": {
+ "version": "1.0.0",
+ "created": "2025-12-13",
+ "description": "FarmEyes Disease Knowledge Base - 6 disease classes for Nigerian farmers. All content in English - N-ATLaS handles runtime translation to Hausa, Yoruba, Igbo.",
+ "crops_covered": [
+ "cassava",
+ "cocoa",
+ "tomato"
+ ],
+ "total_classes": 6,
+ "currency": "NGN",
+ "last_updated": "2025-12-13",
+ "note": "6-class model (diseases only, no healthy classes). N-ATLaS model performs all translations to local languages during app usage."
+ },
+ "diseases": {
+ "cassava_bacterial_blight": {
+ "id": "CBB_001",
+ "class_name": "Cassava Bacteria Blight",
+ "display_name": "Cassava Bacterial Blight",
+ "scientific_name": "Xanthomonas axonopodis pv. manihotis",
+ "crop": "cassava",
+ "category": "bacterial",
+ "is_disease": true,
+ "severity": {
+ "level": "high",
+ "scale": 4,
+ "max_scale": 5,
+ "description": "Severe bacterial disease that can cause significant yield losses, especially during rainy season"
+ },
+ "symptoms": [
+ "Angular leaf spots that appear water-soaked",
+ "Leaf wilting and yellowing starting from the edges",
+ "Gum exudation (bacterial ooze) from stems - sticky yellowish substance",
+ "Dieback of shoot tips and young branches",
+ "Blighting and death of leaves",
+ "Vascular discoloration (brown streaks) when stem is cut",
+ "Canker formation on stems in severe cases"
+ ],
+ "how_it_spreads": [
+ "Infected planting materials (stem cuttings) - most common source",
+ "Rain splash spreading bacteria between plants",
+ "Contaminated cutting tools and farm equipment",
+ "Wind-driven rain carrying bacteria",
+ "Workers' hands and clothing after touching infected plants"
+ ],
+ "favorable_conditions": {
+ "temperature": "25-30°C",
+ "humidity": "Above 80%",
+ "season": "Rainy season (May-October in Nigeria)",
+ "other": "Waterlogged soils, poor drainage, dense plant spacing"
+ },
+ "yield_loss": {
+ "min_percent": 20,
+ "max_percent": 100,
+ "average_percent": 50,
+ "description": "Can cause 20-100% yield loss depending on severity, variety susceptibility, and time of infection"
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Use disease-free planting materials",
+ "description": "Select healthy stems from certified disease-free fields. Inspect stems carefully before planting.",
+ "effectiveness": "high",
+ "cost_ngn": 0,
+ "timing": "Before planting"
+ },
+ {
+ "method": "Roguing (remove infected plants)",
+ "description": "Immediately remove and burn all infected plants. Do not leave debris in field.",
+ "effectiveness": "high",
+ "cost_ngn_per_hectare": 5000,
+ "timing": "As soon as symptoms appear"
+ },
+ {
+ "method": "Crop rotation",
+ "description": "Plant non-host crops like maize, sorghum, or legumes for 2-3 years before returning cassava to the field.",
+ "effectiveness": "medium",
+ "cost_ngn": 0,
+ "timing": "Seasonal planning"
+ },
+ {
+ "method": "Tool sanitation",
+ "description": "Disinfect cutting tools with 10% bleach solution between plants and fields.",
+ "effectiveness": "high",
+ "cost_ngn": 500,
+ "timing": "During all field operations"
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Copper-based bactericide",
+ "active_ingredient": "Copper hydroxide",
+ "local_brands": [
+ "Kocide 101",
+ "Nordox 75",
+ "Funguran-OH",
+ "Champion WP"
+ ],
+ "cost_ngn_min": 8000,
+ "cost_ngn_max": 15000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2-3 kg per hectare in 400-500L water",
+ "frequency": "Every 2-3 weeks during rainy season",
+ "application_method": "Spray thoroughly on leaves and stems, especially undersides of leaves",
+ "effectiveness": "medium",
+ "safety_precautions": [
+ "Wear protective clothing, gloves, and face mask",
+ "Do not spray on windy days",
+ "Avoid contact with skin and eyes",
+ "Wait 7 days before harvest"
+ ]
+ }
+ ],
+ "resistant_varieties": [
+ {
+ "variety_name": "TMS 30572",
+ "resistance_level": "high",
+ "source": "IITA Ibadan, NRCRI Umudike",
+ "cost_ngn_per_bundle": 15000,
+ "notes": "Widely available, good yield potential"
+ },
+ {
+ "variety_name": "TMS 4(2)1425",
+ "resistance_level": "high",
+ "source": "IITA Ibadan",
+ "cost_ngn_per_bundle": 18000,
+ "notes": "High yielding with good disease resistance"
+ },
+ {
+ "variety_name": "NR 8083",
+ "resistance_level": "medium",
+ "source": "NRCRI Umudike",
+ "cost_ngn_per_bundle": 15000,
+ "notes": "Good for multiple disease resistance"
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Wood ash application",
+ "description": "Apply wood ash around plant base after rain. Creates alkaline environment less favorable for bacteria.",
+ "effectiveness": "low",
+ "cost_ngn": 0
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 8000,
+ "max_ngn": 25000,
+ "per": "hectare",
+ "notes": "Using resistant varieties is the most cost-effective long-term solution"
+ },
+ "prevention": [
+ "Use certified disease-free planting materials from reputable sources (IITA, NRCRI, accredited seed companies)",
+ "Plant resistant varieties (TMS 30572, TMS 4(2)1425, NR 8083)",
+ "Practice field sanitation - remove all crop debris after harvest",
+ "Avoid working in fields when plants are wet from rain or dew",
+ "Disinfect cutting tools with 10% bleach solution between plants",
+ "Implement 2-3 year crop rotation with non-host crops (maize, legumes)",
+ "Ensure proper drainage to reduce humidity around plants",
+ "Maintain recommended plant spacing (1m x 1m) for good air circulation",
+ "Scout fields regularly for early disease detection"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 80,
+ "message": "If treated within 2 weeks of first symptoms appearing, approximately 80% of your field can be saved. Remove infected plants immediately and apply copper spray to remaining plants."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 50,
+ "message": "With moderate infection (less than 30% of plants affected), expect 50% yield recovery with immediate treatment. Focus on protecting healthy plants."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 20,
+ "message": "Severe infection requires removing all affected plants. Only 20% may be salvageable. Consider replanting with resistant varieties next season."
+ }
+ },
+ "expert_contact": {
+ "institution": "National Root Crops Research Institute (NRCRI)",
+ "location": "Umudike, Abia State, Nigeria",
+ "services": "Disease diagnosis, resistant variety seeds, extension services"
+ }
+ },
+ "cassava_mosaic_virus": {
+ "id": "CMD_001",
+ "class_name": "Cassava Mosaic Virus",
+ "display_name": "Cassava Mosaic Virus",
+ "scientific_name": "Cassava mosaic geminiviruses (CMGs) - African cassava mosaic virus, East African cassava mosaic virus",
+ "crop": "cassava",
+ "category": "viral",
+ "is_disease": true,
+ "severity": {
+ "level": "very_high",
+ "scale": 5,
+ "max_scale": 5,
+ "description": "Most devastating cassava disease in Africa. Can cause complete crop failure in susceptible varieties."
+ },
+ "symptoms": [
+ "Mosaic pattern of yellow/light green and dark green patches on leaves",
+ "Leaf curling, twisting, and distortion",
+ "Reduced leaf size compared to healthy plants",
+ "Severely stunted plant growth",
+ "Misshapen, small, or no tuber formation",
+ "Chlorosis (yellowing) along leaf veins",
+ "Leaves may become completely yellow in severe cases"
+ ],
+ "how_it_spreads": [
+ "Whiteflies (Bemisia tabaci) - primary vector, transmit virus while feeding",
+ "Infected stem cuttings used for planting - carries virus to new fields",
+ "Mechanical transmission through contaminated tools (minor)",
+ "NOT spread by contact between plants or through soil"
+ ],
+ "favorable_conditions": {
+ "temperature": "25-35°C",
+ "humidity": "Variable - disease occurs in all humidity levels",
+ "season": "Year-round, but more severe in dry season when whitefly populations peak",
+ "other": "High whitefly populations, planting infected cuttings, presence of infected plants nearby"
+ },
+ "yield_loss": {
+ "min_percent": 30,
+ "max_percent": 95,
+ "average_percent": 50,
+ "description": "Can cause 30-95% yield loss. Severely infected plants may produce no usable tubers at all."
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Use virus-free planting materials",
+ "description": "Source stem cuttings only from certified disease-free multiplication sites. Never take cuttings from infected plants.",
+ "effectiveness": "very_high",
+ "cost_ngn": 0,
+ "timing": "Before planting"
+ },
+ {
+ "method": "Roguing infected plants",
+ "description": "Remove and destroy infected plants as soon as symptoms appear. This prevents whiteflies from spreading virus to healthy plants.",
+ "effectiveness": "high",
+ "cost_ngn_per_hectare": 5000,
+ "timing": "Weekly scouting and removal"
+ },
+ {
+ "method": "Early planting",
+ "description": "Plant at onset of rains when whitefly populations are lower. This gives plants time to establish before peak whitefly season.",
+ "effectiveness": "medium",
+ "cost_ngn": 0,
+ "timing": "Start of rainy season"
+ },
+ {
+ "method": "Remove volunteer plants",
+ "description": "Remove any cassava plants that grow from previous season's debris. These can harbor virus.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 3000,
+ "timing": "Before and during planting"
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Imidacloprid (for whitefly control)",
+ "active_ingredient": "Imidacloprid",
+ "local_brands": [
+ "Confidor",
+ "Gaucho",
+ "Admire",
+ "Kohinor"
+ ],
+ "cost_ngn_min": 5000,
+ "cost_ngn_max": 12000,
+ "cost_unit": "per hectare per application",
+ "dosage": "Follow label instructions - typically 100-200ml per hectare",
+ "frequency": "Every 2-3 weeks when whitefly pressure is high",
+ "application_method": "Spray on leaves, targeting undersides where whiteflies feed",
+ "effectiveness": "medium",
+ "important_note": "This controls whiteflies but does NOT cure already infected plants. Infected plants must be removed.",
+ "safety_precautions": [
+ "Highly toxic to bees - apply in evening when bees are not active",
+ "Wear protective clothing and gloves",
+ "Do not spray near water sources",
+ "Follow pre-harvest interval on label"
+ ]
+ }
+ ],
+ "resistant_varieties": [
+ {
+ "variety_name": "TME 419",
+ "resistance_level": "very_high",
+ "source": "IITA Ibadan, state ADPs",
+ "cost_ngn_per_bundle": 20000,
+ "notes": "Most widely recommended CMD-resistant variety. High yield."
+ },
+ {
+ "variety_name": "UMUCASS 36 (TMS 01/1368)",
+ "resistance_level": "very_high",
+ "source": "NRCRI Umudike",
+ "cost_ngn_per_bundle": 22000,
+ "notes": "Excellent CMD resistance with high dry matter content"
+ },
+ {
+ "variety_name": "UMUCASS 37 (TMS 01/1412)",
+ "resistance_level": "very_high",
+ "source": "NRCRI Umudike",
+ "cost_ngn_per_bundle": 22000,
+ "notes": "Good for garri processing"
+ },
+ {
+ "variety_name": "UMUCASS 38 (TMS 01/1371)",
+ "resistance_level": "high",
+ "source": "NRCRI Umudike",
+ "cost_ngn_per_bundle": 20000,
+ "notes": "Multiple disease resistance"
+ },
+ {
+ "variety_name": "TMS 98/0581",
+ "resistance_level": "high",
+ "source": "IITA Ibadan",
+ "cost_ngn_per_bundle": 18000,
+ "notes": "Good yield with CMD tolerance"
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Neem leaf extract spray",
+ "description": "Crush 1kg fresh neem leaves, soak in 5 liters of water overnight, strain and spray. Repels whiteflies.",
+ "effectiveness": "low",
+ "cost_ngn": 2000
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 5000,
+ "max_ngn": 35000,
+ "per": "hectare",
+ "notes": "IMPORTANT: There is NO CURE for viral diseases. The best investment is planting resistant varieties. Infected plants cannot be cured and must be removed."
+ },
+ "prevention": [
+ "Plant CMD-resistant varieties (TME 419, UMUCASS 36, 37, 38) - MOST IMPORTANT",
+ "Source planting materials only from certified disease-free sources",
+ "Never take cuttings from plants showing any mosaic symptoms",
+ "Control whitefly populations with insecticides or neem extracts",
+ "Remove and burn all volunteer cassava plants from previous seasons",
+ "Practice thorough field sanitation after harvest",
+ "Avoid planting new cassava fields adjacent to infected fields",
+ "Inspect plants weekly and remove infected ones immediately",
+ "Do not transport cuttings from areas with high CMD incidence"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 70,
+ "message": "If you detect CMD early and immediately remove infected plants, you can protect approximately 70% of your yield. The key is stopping spread to healthy plants."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 40,
+ "message": "With moderate infection across the field, focus on removing all infected plants and protecting the remaining healthy ones. Expected yield recovery is about 40%."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 10,
+ "message": "Severe CMD infection has spread widely. This season's harvest will be significantly reduced. Plan to replant next season using resistant varieties only."
+ }
+ },
+ "expert_contact": {
+ "institution": "International Institute of Tropical Agriculture (IITA)",
+ "location": "Ibadan, Oyo State, Nigeria",
+ "services": "CMD-resistant varieties, disease diagnosis, training on CMD management"
+ }
+ },
+ "cocoa_monilia_disease": {
+ "id": "CMN_001",
+ "class_name": "Cocoa Monilia Disease",
+ "display_name": "Frosty Pod Rot (Monilia Disease)",
+ "scientific_name": "Moniliophthora roreri",
+ "crop": "cocoa",
+ "category": "fungal",
+ "is_disease": true,
+ "severity": {
+ "level": "high",
+ "scale": 4,
+ "max_scale": 5,
+ "description": "Serious fungal disease that can destroy entire pod harvests. The white 'frosty' spore covering produces millions of spores that spread rapidly."
+ },
+ "symptoms": [
+ "White or cream-colored powdery coating on pods giving a 'frosty' appearance",
+ "Brown spots that enlarge rapidly on pod surface",
+ "Irregular swelling or lumps on pods before external symptoms appear",
+ "Internal pod rot with liquefied, foul-smelling pulp",
+ "Premature ripening or blackening of pods",
+ "Beans inside become sticky, clumped together, and unusable",
+ "Strong unpleasant odor from infected pods"
+ ],
+ "how_it_spreads": [
+ "Wind dispersal of spores from infected pods - can travel several kilometers",
+ "Rain splash spreading spores to nearby pods",
+ "Contact with infected pods during harvesting",
+ "Contaminated harvesting tools (machetes, baskets)",
+ "Leaving infected pods on trees or ground provides continuous spore source"
+ ],
+ "favorable_conditions": {
+ "temperature": "20-28°C (optimal around 25°C)",
+ "humidity": "Above 85%",
+ "season": "Peak during rainy season, especially with prolonged wet periods",
+ "other": "Poor air circulation, excessive shade, leaving infected pods in field"
+ },
+ "yield_loss": {
+ "min_percent": 25,
+ "max_percent": 90,
+ "average_percent": 50,
+ "description": "Can cause 25-90% pod losses in favorable conditions. Unmanaged outbreaks can destroy nearly entire harvests."
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Weekly removal and destruction of infected pods",
+ "description": "Inspect all trees weekly. Remove any pod showing symptoms. Bury pods 30cm deep or burn them. Never leave infected pods on ground.",
+ "effectiveness": "high",
+ "cost_ngn_per_hectare": 10000,
+ "cost_frequency": "per month (labor)"
+ },
+ {
+ "method": "Shade management",
+ "description": "Reduce shade canopy to 50% to improve air circulation and reduce humidity in the canopy.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 15000,
+ "cost_frequency": "one-time pruning cost"
+ },
+ {
+ "method": "Tree pruning",
+ "description": "Regular pruning to open up tree canopy, improve air flow, and make pods more accessible for inspection and harvesting.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 20000,
+ "cost_frequency": "annually"
+ },
+ {
+ "method": "Prompt harvesting",
+ "description": "Harvest mature pods immediately. Overripe pods are more susceptible to infection.",
+ "effectiveness": "medium",
+ "cost_ngn": 0
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Copper-based fungicide",
+ "active_ingredient": "Copper hydroxide or Copper oxychloride",
+ "local_brands": [
+ "Kocide 101",
+ "Nordox 75",
+ "Koka Blue 50 WG",
+ "Funguran-OH"
+ ],
+ "cost_ngn_min": 15000,
+ "cost_ngn_max": 25000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2.5-3 kg per hectare in 500L water",
+ "frequency": "Monthly during pod development season",
+ "application_method": "Spray pods thoroughly, especially young developing pods. Focus on lower trunk where pods form.",
+ "effectiveness": "medium",
+ "safety_precautions": [
+ "Wear protective clothing and mask",
+ "Apply in calm weather conditions",
+ "Avoid spraying during rain"
+ ]
+ },
+ {
+ "product_name": "Metalaxyl + Mancozeb combination",
+ "active_ingredient": "Metalaxyl 12% + Mancozeb 60%",
+ "local_brands": [
+ "Ridomil Gold MZ",
+ "Agro-laxyl 63.5 WP"
+ ],
+ "cost_ngn_min": 20000,
+ "cost_ngn_max": 35000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2-2.5 kg per hectare",
+ "frequency": "Every 3-4 weeks during critical period (peak rainy season)",
+ "application_method": "Apply as preventive treatment before disease onset for best results",
+ "effectiveness": "high",
+ "safety_precautions": [
+ "Wear full protective equipment",
+ "Do not apply within 14 days of harvest",
+ "Store away from food items"
+ ]
+ }
+ ],
+ "biological": [
+ {
+ "method": "Trichoderma-based biocontrol",
+ "description": "Beneficial fungi that compete with and suppress disease fungi. Spray on pods and trunk.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare_min": 12000,
+ "cost_ngn_per_hectare_max": 20000,
+ "source": "Available from CRIN (Cocoa Research Institute of Nigeria) and some agro-dealers",
+ "notes": "Best used as part of integrated management, not as sole treatment"
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Palm oil coating on pods",
+ "description": "Mix palm oil with water, spray on young pods. Creates a physical barrier against spore infection.",
+ "effectiveness": "low",
+ "cost_ngn": 5000,
+ "notes": "Traditional method with limited scientific validation"
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 15000,
+ "max_ngn": 50000,
+ "per": "hectare per season",
+ "notes": "Combination of regular pod removal (sanitation) with fungicide application gives best results"
+ },
+ "prevention": [
+ "Inspect trees and remove infected pods at least weekly",
+ "Maintain shade canopy at 50% for good air circulation",
+ "Prune trees regularly to reduce humidity in canopy",
+ "Harvest mature pods promptly - do not leave overripe pods",
+ "Never leave infected or rotting pods on trees or ground",
+ "Bury removed pods at least 30cm deep or burn them",
+ "Clean harvesting tools between trees using soap solution",
+ "Apply preventive fungicide sprays before peak disease season",
+ "Maintain good drainage in plantation"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 75,
+ "message": "Early detection with immediate pod removal can save approximately 75% of your harvest. Start weekly inspections now and remove every infected pod."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 50,
+ "message": "Moderate infection requires intensive sanitation combined with fungicide application. With immediate action, expect to save about 50% of remaining pods."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 25,
+ "message": "Severe frosty pod rot outbreak. Remove all infected pods, apply fungicide to protect remaining healthy pods. Focus on protecting next season's production."
+ }
+ },
+ "expert_contact": {
+ "institution": "Cocoa Research Institute of Nigeria (CRIN)",
+ "location": "Ibadan, Oyo State, Nigeria",
+ "services": "Disease diagnosis, fungicide recommendations, resistant varieties, extension services"
+ }
+ },
+ "cocoa_phytophthora_disease": {
+ "id": "CPH_001",
+ "class_name": "Cocoa Phytophthora Disease",
+ "display_name": "Black Pod Disease",
+ "scientific_name": "Phytophthora palmivora and Phytophthora megakarya",
+ "crop": "cocoa",
+ "category": "oomycete",
+ "is_disease": true,
+ "severity": {
+ "level": "very_high",
+ "scale": 5,
+ "max_scale": 5,
+ "description": "Most serious cocoa disease in West Africa. P. megakarya (found in Nigeria) is more aggressive than P. palmivora and can destroy 60-100% of pods in severe outbreaks."
+ },
+ "symptoms": [
+ "Dark brown to black lesions starting at any point on the pod",
+ "Lesions spread very rapidly, covering entire pod within 10-14 days",
+ "White or grayish mold growth on pod surface in humid conditions",
+ "Firm pod becomes soft as internal rot progresses",
+ "Beans inside become shriveled, stuck together, and turn black",
+ "Canker lesions on stem bark with reddish-brown gum exudation",
+ "Wilting of leaves and dieback of branches in severe trunk infections"
+ ],
+ "how_it_spreads": [
+ "Rain splash from infected pods - most important method",
+ "Infected pods on ground serve as continuous source of spores",
+ "Ants (especially Crematogaster striatula) carry spores between pods",
+ "Wind-driven rain spreading spores",
+ "Contaminated harvesting tools",
+ "Spores can survive in soil and plant debris"
+ ],
+ "favorable_conditions": {
+ "temperature": "20-30°C (optimal around 25°C)",
+ "humidity": "Above 85%",
+ "season": "Peak during rainy season (May-October in southern Nigeria)",
+ "other": "High rainfall, poor drainage, excessive shade, infected pods left in field"
+ },
+ "yield_loss": {
+ "min_percent": 30,
+ "max_percent": 90,
+ "average_percent": 60,
+ "description": "Causes 30-90% pod losses. P. megakarya infections are faster and more destructive than P. palmivora. Annual losses estimated at over $700 million globally."
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Frequent pod removal",
+ "description": "Remove all infected pods every 5-7 days. Bury pods at least 30cm deep or burn them. Never leave on ground surface.",
+ "effectiveness": "high",
+ "cost_ngn_per_hectare": 15000,
+ "cost_frequency": "per month (labor)"
+ },
+ {
+ "method": "Improve drainage",
+ "description": "Create drainage channels to prevent waterlogging. Remove stagnant water from around trees.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 30000,
+ "cost_frequency": "one-time installation"
+ },
+ {
+ "method": "Shade and canopy management",
+ "description": "Maintain 50% shade, prune lower branches, and thin canopy to improve air circulation and reduce humidity.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 25000,
+ "cost_frequency": "annually"
+ },
+ {
+ "method": "Ant control",
+ "description": "Destroy ant nests around trees. Ants spread disease spores between pods.",
+ "effectiveness": "low",
+ "cost_ngn_per_hectare": 5000
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Metalaxyl + Copper combination",
+ "active_ingredient": "Metalaxyl 12% + Copper-1-oxide 60%",
+ "local_brands": [
+ "Ridomil 72 Plus",
+ "Foko",
+ "Ridomil Gold Plus"
+ ],
+ "cost_ngn_min": 25000,
+ "cost_ngn_max": 40000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2.5-3 kg per hectare in 500L water",
+ "frequency": "3-4 applications during peak season (June, August, September, October)",
+ "application_method": "Spray on all pods and lower trunk. Most effective when applied before disease onset.",
+ "effectiveness": "very_high",
+ "safety_precautions": [
+ "Wear full protective equipment",
+ "Apply in calm weather",
+ "Follow label directions exactly",
+ "Observe pre-harvest interval"
+ ]
+ },
+ {
+ "product_name": "Copper fungicide",
+ "active_ingredient": "Copper hydroxide",
+ "local_brands": [
+ "Kocide 101",
+ "Nordox 75",
+ "Blue Shield",
+ "Funguran-OH"
+ ],
+ "cost_ngn_min": 15000,
+ "cost_ngn_max": 25000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2-3 kg per hectare",
+ "frequency": "Every 3-4 weeks during rainy season",
+ "application_method": "Thorough coverage of all pods. Contact fungicide - must cover pod surface to protect.",
+ "effectiveness": "medium"
+ },
+ {
+ "product_name": "Phosphonate (trunk injection)",
+ "active_ingredient": "Fosetyl-Al or Phosphorous acid",
+ "local_brands": [
+ "Foli-R-Fos 400",
+ "Aliette"
+ ],
+ "cost_ngn_min": 35000,
+ "cost_ngn_max": 50000,
+ "cost_unit": "per hectare per application",
+ "dosage": "As per label - injected into trunk",
+ "frequency": "1-2 times per season",
+ "application_method": "Inject directly into main trunk. Provides systemic protection throughout tree.",
+ "effectiveness": "very_high",
+ "notes": "Requires training for proper application technique"
+ }
+ ],
+ "biological": [
+ {
+ "method": "Trichoderma asperellum",
+ "description": "Beneficial fungus that parasitizes Phytophthora. Applied as spray to pods and trunk.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare_min": 15000,
+ "cost_ngn_per_hectare_max": 25000,
+ "source": "Research stage in Nigeria - contact CRIN for availability",
+ "notes": "Reduces disease but not as effective as chemical fungicides in severe outbreaks"
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Ash application",
+ "description": "Apply wood ash around base of trees. May help reduce soil moisture and spore survival.",
+ "effectiveness": "low",
+ "cost_ngn": 2000
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 25000,
+ "max_ngn": 80000,
+ "per": "hectare per season",
+ "notes": "6-8 fungicide applications may be needed in severe areas. Combining sanitation with fewer fungicide sprays is most cost-effective."
+ },
+ "prevention": [
+ "Remove and destroy infected pods every 5-7 days - most important practice",
+ "Maintain proper tree spacing and reduce shade to 50%",
+ "Improve drainage in waterlogged areas",
+ "Harvest pods as soon as they mature - do not leave overripe",
+ "Control ant populations that spread spores",
+ "Apply preventive fungicide sprays before rainy season peak",
+ "Remove all pods from ground surface",
+ "Prune lower branches to reduce humidity near pods",
+ "Clean tools between trees with soap solution",
+ "Remove mummified pods from previous seasons"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 80,
+ "message": "With immediate treatment and sanitation, approximately 80% of remaining healthy pods can be saved. Begin fungicide application and twice-weekly pod removal immediately."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 50,
+ "message": "Apply fungicide immediately and intensify pod removal to every 5 days. With aggressive management, expect to save about 50% of your crop."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 20,
+ "message": "Severe black pod outbreak. This season's harvest is significantly compromised. Focus sanitation and fungicide efforts on protecting next season's production."
+ }
+ },
+ "expert_contact": {
+ "institution": "Cocoa Research Institute of Nigeria (CRIN)",
+ "location": "Ibadan, Oyo State, Nigeria",
+ "services": "Disease diagnosis, fungicide recommendations, integrated management training, tolerant varieties"
+ }
+ },
+ "tomato_gray_mold": {
+ "id": "TGM_001",
+ "class_name": "Tomato Gray Mold Disease",
+ "display_name": "Gray Mold (Botrytis Blight)",
+ "scientific_name": "Botrytis cinerea",
+ "crop": "tomato",
+ "category": "fungal",
+ "is_disease": true,
+ "severity": {
+ "level": "high",
+ "scale": 4,
+ "max_scale": 5,
+ "description": "Common and destructive fungal disease especially in humid conditions. Can affect all above-ground plant parts and cause significant post-harvest losses."
+ },
+ "symptoms": [
+ "Soft, water-soaked spots on leaves, stems, and fruits",
+ "Distinctive gray fuzzy mold growth (spores) on infected areas",
+ "Brown to tan lesions on stems, often at pruning wounds or leaf scars",
+ "Blossom blight - flowers turn brown, wither, and fall off",
+ "Ghost spots on fruits - pale rings with darker centers",
+ "Fruit rot starting from stem end, wounds, or where fruit touches ground",
+ "Stem cankers that can girdle and kill plant"
+ ],
+ "how_it_spreads": [
+ "Airborne spores (conidia) - primary spread method, released in clouds when disturbed",
+ "Splashing water from rain or overhead irrigation",
+ "Contaminated hands, tools, and clothing",
+ "Infected plant debris in soil - fungus survives as sclerotia",
+ "Entry through wounds, pruning cuts, flower scars, or senescent tissue"
+ ],
+ "favorable_conditions": {
+ "temperature": "15-25°C (optimal around 20°C)",
+ "humidity": "Above 93% for at least 8-12 hours",
+ "season": "Cool, cloudy, humid weather conditions",
+ "other": "Poor air circulation, overhead irrigation, wounded plants, dense plant canopy"
+ },
+ "yield_loss": {
+ "min_percent": 15,
+ "max_percent": 50,
+ "average_percent": 25,
+ "description": "Can cause 15-50% losses in greenhouses. Field losses typically lower but can be severe in prolonged wet weather."
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Improve air circulation",
+ "description": "Increase plant spacing, stake plants properly, prune lower leaves, and ensure good ventilation in greenhouses.",
+ "effectiveness": "high",
+ "cost_ngn": 0
+ },
+ {
+ "method": "Remove infected plant parts",
+ "description": "Immediately remove and destroy (burn or bury) any infected leaves, stems, flowers, or fruits. Do not compost.",
+ "effectiveness": "high",
+ "cost_ngn_per_week": 3000
+ },
+ {
+ "method": "Avoid overhead irrigation",
+ "description": "Use drip irrigation to keep foliage dry. Water early in day so plants dry before evening.",
+ "effectiveness": "high",
+ "cost_ngn_per_hectare": 50000,
+ "notes": "One-time drip system installation cost"
+ },
+ {
+ "method": "Prune lower leaves",
+ "description": "Remove leaves touching the ground and lower leaves to improve air flow around plants.",
+ "effectiveness": "medium",
+ "cost_ngn_per_week": 5000
+ },
+ {
+ "method": "Reduce humidity in greenhouse",
+ "description": "Ventilate greenhouse, especially in evening. Heat and vent to reduce humidity below 85%.",
+ "effectiveness": "high",
+ "cost_ngn": 0
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Fludioxonil",
+ "active_ingredient": "Fludioxonil",
+ "local_brands": [
+ "Scholar",
+ "Medallion",
+ "Geoxe"
+ ],
+ "cost_ngn_min": 12000,
+ "cost_ngn_max": 20000,
+ "cost_unit": "per hectare per application",
+ "dosage": "Follow label directions",
+ "frequency": "Every 7-10 days during humid periods",
+ "application_method": "Spray to thorough coverage of all plant parts",
+ "effectiveness": "very_high",
+ "notes": "One of the most effective fungicides for gray mold"
+ },
+ {
+ "product_name": "Chlorothalonil",
+ "active_ingredient": "Chlorothalonil",
+ "local_brands": [
+ "Daconil",
+ "Bravo",
+ "Echo"
+ ],
+ "cost_ngn_min": 8000,
+ "cost_ngn_max": 15000,
+ "cost_unit": "per hectare per application",
+ "dosage": "2-2.5 L per hectare",
+ "frequency": "Every 7-14 days",
+ "application_method": "Apply as preventive spray before disease onset",
+ "effectiveness": "medium",
+ "safety_precautions": [
+ "Wait 7 days between last spray and harvest",
+ "Wear protective equipment",
+ "Do not apply in extreme heat"
+ ]
+ },
+ {
+ "product_name": "Iprodione",
+ "active_ingredient": "Iprodione",
+ "local_brands": [
+ "Rovral",
+ "Chipco"
+ ],
+ "cost_ngn_min": 10000,
+ "cost_ngn_max": 18000,
+ "cost_unit": "per hectare per application",
+ "dosage": "1-1.5 kg per hectare",
+ "frequency": "Every 10-14 days",
+ "application_method": "Spray on foliage and stems",
+ "effectiveness": "high",
+ "notes": "Rotate with other fungicide classes to prevent resistance"
+ }
+ ],
+ "biological": [
+ {
+ "method": "Bacillus subtilis biofungicide",
+ "description": "Biological fungicide that colonizes plant surfaces and competes with disease fungi.",
+ "product_names": [
+ "Serenade",
+ "Cease"
+ ],
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare_min": 10000,
+ "cost_ngn_per_hectare_max": 18000,
+ "notes": "Best used preventively. Approved for organic production."
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Neem oil spray",
+ "description": "Mix 5ml neem oil per liter of water with small amount of liquid soap. Spray weekly as preventive.",
+ "effectiveness": "low",
+ "cost_ngn": 5000
+ },
+ {
+ "method": "Garlic extract spray",
+ "description": "Crush 100g garlic, soak in 1 liter water for 24 hours, strain and spray. Has some antifungal properties.",
+ "effectiveness": "low",
+ "cost_ngn": 3000
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 8000,
+ "max_ngn": 25000,
+ "per": "hectare per application",
+ "notes": "Prevention through cultural practices (spacing, irrigation method, pruning) is most cost-effective approach"
+ },
+ "prevention": [
+ "Maintain good air circulation between plants with proper spacing",
+ "Use drip irrigation instead of overhead watering",
+ "Remove plant debris and fallen leaves promptly",
+ "Prune lower leaves to improve air flow at plant base",
+ "Avoid working with plants when foliage is wet",
+ "Sanitize pruning tools with 10% bleach solution between plants",
+ "Ventilate greenhouses to reduce humidity, especially at night",
+ "Apply preventive fungicides during cool, humid weather forecasts",
+ "Avoid excessive nitrogen fertilization which creates dense, soft growth",
+ "Remove crop debris thoroughly at end of season"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 85,
+ "message": "Early treatment with fungicide and good sanitation can protect approximately 85% of your crop. Remove all infected parts immediately and improve air circulation."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 60,
+ "message": "Remove all infected plant parts immediately, apply fungicide, and reduce humidity. With aggressive management, about 60% of crop can be saved."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 30,
+ "message": "Severe gray mold outbreak requires intensive fungicide program and complete removal of infected plants. Expect significant yield reduction this season."
+ }
+ }
+ },
+ "tomato_wilt_disease": {
+ "id": "TWD_001",
+ "class_name": "Tomato Wilt Disease",
+ "display_name": "Tomato Wilt Disease",
+ "scientific_name": "Fusarium oxysporum f. sp. lycopersici (Fusarium wilt) or Ralstonia solanacearum (Bacterial wilt)",
+ "crop": "tomato",
+ "category": "fungal_or_bacterial",
+ "is_disease": true,
+ "severity": {
+ "level": "very_high",
+ "scale": 5,
+ "max_scale": 5,
+ "description": "Devastating soil-borne diseases that block water transport in plants. Bacterial wilt (common in tropical Nigeria) can kill plants within days and has no chemical cure."
+ },
+ "symptoms": [
+ "Wilting of leaves and stems, often starting on one side of plant",
+ "Yellowing of lower leaves, progressing upward",
+ "Wilting during hottest part of day, partial recovery at night (early stage)",
+ "Permanent wilting that does not recover even with watering",
+ "Brown discoloration of vascular tissue (cut stem to see brown streaks)",
+ "Bacterial wilt: milky white bacterial ooze when cut stem is placed in water",
+ "Stunted growth and eventual plant death",
+ "Fusarium wilt: symptoms may appear on one side of plant or leaf first"
+ ],
+ "how_it_spreads": [
+ "Contaminated soil - pathogens survive in soil for many years",
+ "Infected transplants from nurseries",
+ "Contaminated water (bacterial wilt spreads easily in irrigation water)",
+ "Tools and equipment that moved soil between fields",
+ "Root-to-root contact between plants",
+ "Nematode damage to roots facilitates infection",
+ "Workers' boots and clothing carrying contaminated soil"
+ ],
+ "favorable_conditions": {
+ "temperature": "Fusarium: 27-28°C optimal; Bacterial wilt: 30-35°C optimal",
+ "humidity": "High soil moisture favors bacterial wilt",
+ "season": "Year-round in Nigeria, worse during rainy season",
+ "other": "Poor drainage, root damage from nematodes, acidic soil (for Fusarium), continuous cropping"
+ },
+ "yield_loss": {
+ "min_percent": 30,
+ "max_percent": 100,
+ "average_percent": 60,
+ "description": "Can cause 30-100% losses. Bacterial wilt can destroy an entire field within 2-3 weeks in favorable conditions."
+ },
+ "treatments": {
+ "cultural": [
+ {
+ "method": "Remove and destroy infected plants",
+ "description": "Immediately remove wilted plants including roots. Burn or bury at least 1 meter deep away from field. Do not compost.",
+ "effectiveness": "medium",
+ "cost_ngn_per_plant": 100
+ },
+ {
+ "method": "Long crop rotation",
+ "description": "Rotate away from tomatoes and related crops (pepper, eggplant, potato) for 4-5 years minimum.",
+ "effectiveness": "medium",
+ "cost_ngn": 0,
+ "notes": "Pathogens can survive in soil for many years"
+ },
+ {
+ "method": "Improve drainage",
+ "description": "Plant on raised beds or ridges. Ensure good soil drainage. Avoid waterlogging.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 20000
+ },
+ {
+ "method": "Soil solarization",
+ "description": "Cover moist soil with clear plastic during hottest months for 4-6 weeks. Heat kills pathogens in top soil layer.",
+ "effectiveness": "medium",
+ "cost_ngn_per_hectare": 30000
+ },
+ {
+ "method": "Use disease-free transplants",
+ "description": "Purchase transplants only from certified disease-free nurseries. Inspect roots before planting.",
+ "effectiveness": "high",
+ "cost_ngn_premium": 5000
+ },
+ {
+ "method": "Grafting onto resistant rootstocks",
+ "description": "Graft susceptible varieties onto wilt-resistant rootstocks. Provides excellent protection.",
+ "effectiveness": "very_high",
+ "cost_ngn_per_plant": 150,
+ "notes": "Labor intensive but very effective"
+ }
+ ],
+ "chemical": [
+ {
+ "product_name": "Soil fumigant (for Fusarium)",
+ "active_ingredient": "Metam sodium or Dazomet",
+ "local_brands": [
+ "Vapam",
+ "Basamid"
+ ],
+ "cost_ngn_min": 80000,
+ "cost_ngn_max": 150000,
+ "cost_unit": "per hectare",
+ "dosage": "Follow label directions carefully",
+ "application_method": "Apply to soil before planting, cover with plastic, wait 2-3 weeks before planting",
+ "effectiveness": "medium",
+ "important_note": "Expensive and may harm beneficial soil organisms. Not effective against bacterial wilt.",
+ "safety_precautions": [
+ "Highly toxic - requires professional application",
+ "Keep people and animals away during treatment",
+ "Follow all waiting periods before planting"
+ ]
+ },
+ {
+ "product_name": "Note on bacterial wilt",
+ "description": "There are NO effective chemical treatments for bacterial wilt. Focus entirely on prevention and resistant varieties.",
+ "effectiveness": "none"
+ }
+ ],
+ "resistant_varieties": [
+ {
+ "resistance_type": "Fusarium wilt resistant (F, F2, F3)",
+ "description": "Many commercial varieties have resistance to Fusarium races. Look for F, F2, or F3 on seed packets.",
+ "effectiveness": "high",
+ "notes": "Different races exist - variety may resist some but not all"
+ },
+ {
+ "resistance_type": "Bacterial wilt resistant",
+ "description": "Few varieties have true resistance. Some have tolerance. Grafting onto resistant rootstocks is most effective.",
+ "effectiveness": "medium",
+ "notes": "Check with local extension for recommended varieties"
+ }
+ ],
+ "biological": [
+ {
+ "method": "Trichoderma application",
+ "description": "Apply Trichoderma-based products to soil before planting. Colonizes roots and provides some protection.",
+ "effectiveness": "low_to_medium",
+ "cost_ngn_per_hectare": 15000,
+ "notes": "Best as part of integrated management, not sole treatment"
+ },
+ {
+ "method": "Mycorrhizal inoculants",
+ "description": "Apply mycorrhizal fungi to transplant roots. Improves root health and provides some disease suppression.",
+ "effectiveness": "low",
+ "cost_ngn_per_hectare": 10000
+ }
+ ],
+ "traditional": [
+ {
+ "method": "Organic matter addition",
+ "description": "Add well-composted organic matter to soil. Improves soil health and beneficial microbe populations.",
+ "effectiveness": "low",
+ "cost_ngn_per_hectare": 20000
+ },
+ {
+ "method": "Lime application (for Fusarium)",
+ "description": "Raise soil pH to 6.5-7.0 with agricultural lime. Fusarium prefers acidic soils.",
+ "effectiveness": "low",
+ "cost_ngn_per_hectare": 15000
+ }
+ ]
+ },
+ "total_treatment_cost": {
+ "min_ngn": 20000,
+ "max_ngn": 150000,
+ "per": "hectare",
+ "notes": "Prevention is far more cost-effective than treatment. Once wilt pathogens are in soil, they persist for years. Invest in resistant varieties and grafted transplants."
+ },
+ "prevention": [
+ "Plant resistant varieties - most important for Fusarium wilt",
+ "Use grafted plants with resistant rootstocks - best for bacterial wilt",
+ "Purchase transplants only from certified disease-free nurseries",
+ "Practice long crop rotation (4-5 years) away from solanaceous crops",
+ "Improve soil drainage - plant on raised beds in wet areas",
+ "Sanitize tools and boots when moving between fields",
+ "Avoid introducing contaminated soil to clean fields",
+ "Control root-knot nematodes that facilitate infection",
+ "Use clean irrigation water - bacterial wilt spreads in water",
+ "Add organic matter to support beneficial soil microorganisms",
+ "Solarize soil in heavily infected areas before replanting",
+ "Never plant in fields with known wilt history without taking precautions"
+ ],
+ "health_projection": {
+ "early_detection": {
+ "recovery_chance_percent": 50,
+ "message": "Remove wilted plants immediately to prevent spread. Remaining plants have about 50% chance if action is taken quickly. Do not replant in same location this season."
+ },
+ "moderate_infection": {
+ "recovery_chance_percent": 25,
+ "message": "Multiple plants affected indicates pathogen is established in soil. Remove all affected plants. Remaining healthy plants are at high risk. Consider abandoning field for this season."
+ },
+ "severe_infection": {
+ "recovery_chance_percent": 5,
+ "message": "Severe wilt outbreak means soil is heavily contaminated. This season's crop is lost. Do not plant tomatoes or related crops in this field for at least 4-5 years. Consider soil solarization before future use."
+ }
+ },
+ "diagnostic_tip": "To distinguish between Fusarium and Bacterial wilt: Cut a stem and place cut end in clear glass of water. If milky white bacterial streaming appears within minutes, it is Bacterial wilt. Fusarium wilt shows brown vascular discoloration but no bacterial ooze."
+ }
+ }
+}
\ No newline at end of file
diff --git a/frontend/.DS_Store b/frontend/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..26d5aeb4f5ea617008bce41e51f467bf9c37fd4c
Binary files /dev/null and b/frontend/.DS_Store differ
diff --git a/frontend/css/.DS_Store b/frontend/css/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..6e03d204b7c23b422dc298ae9a1676999d7547ae
Binary files /dev/null and b/frontend/css/.DS_Store differ
diff --git a/frontend/css/main.css b/frontend/css/main.css
new file mode 100644
index 0000000000000000000000000000000000000000..79b76e34a494dbda89da762008d898338a5c7cd2
--- /dev/null
+++ b/frontend/css/main.css
@@ -0,0 +1,1451 @@
+/**
+ * FarmEyes Main Stylesheet
+ * Modern/Classy design for Diagnosis, ChatGPT-style for Chat only
+ */
+
+/* ==========================================================================
+ CSS VARIABLES
+ ========================================================================== */
+:root {
+ --bg-dark: #0D0D0D;
+ --bg-card: #1A1A1A;
+ --bg-elevated: #252525;
+ --bg-hover: #2D2D2D;
+
+ --text-primary: #FFFFFF;
+ --text-secondary: #B0B0B0;
+ --text-muted: #707070;
+
+ --accent: #10B981;
+ --accent-hover: #34D399;
+ --accent-muted: rgba(16, 185, 129, 0.15);
+
+ --border: #333333;
+ --border-light: #2A2A2A;
+
+ --success: #10B981;
+ --warning: #F59E0B;
+ --error: #EF4444;
+ --info: #3B82F6;
+
+ --severity-low: #10B981;
+ --severity-medium: #F59E0B;
+ --severity-high: #F97316;
+ --severity-very-high: #EF4444;
+
+ --radius-sm: 6px;
+ --radius-md: 10px;
+ --radius-lg: 14px;
+ --radius-xl: 20px;
+
+ --shadow: 0 4px 20px rgba(0,0,0,0.3);
+ --transition: 200ms ease;
+}
+
+/* ==========================================================================
+ RESET & BASE
+ ========================================================================== */
+*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
+
+html {
+ font-size: 16px;
+ -webkit-font-smoothing: antialiased;
+}
+
+body {
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
+ background: var(--bg-dark);
+ color: var(--text-primary);
+ line-height: 1.5;
+ min-height: 100vh;
+}
+
+button { font-family: inherit; cursor: pointer; border: none; background: none; }
+input, textarea { font-family: inherit; }
+
+/* ==========================================================================
+ LAYOUT
+ ========================================================================== */
+.app-container { width: 100%; min-height: 100vh; }
+.page { display: none; min-height: 100vh; }
+.page.active { display: flex; flex-direction: column; }
+.hidden { display: none !important; }
+
+/* ==========================================================================
+ PAGE 1: LANGUAGE SELECTOR
+ ========================================================================== */
+.language-page {
+ min-height: 100vh;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ padding: 24px;
+ text-align: center;
+ background: linear-gradient(180deg, #0D0D0D 0%, #1A1A1A 100%);
+}
+
+.language-content { max-width: 500px; }
+
+.logo-large {
+ font-size: 72px;
+ margin-bottom: 16px;
+ animation: float 3s ease-in-out infinite;
+}
+
+@keyframes float {
+ 0%, 100% { transform: translateY(0); }
+ 50% { transform: translateY(-8px); }
+}
+
+.app-title {
+ font-size: 42px;
+ font-weight: 700;
+ color: var(--accent);
+ margin-bottom: 8px;
+}
+
+.app-tagline {
+ font-size: 16px;
+ color: var(--text-secondary);
+ margin-bottom: 40px;
+}
+
+.language-selection { margin-bottom: 20px; }
+
+.selection-title {
+ font-size: 22px;
+ font-weight: 600;
+ margin-bottom: 8px;
+}
+
+.selection-subtitle {
+ font-size: 14px;
+ color: var(--text-muted);
+ margin-bottom: 24px;
+}
+
+.language-grid {
+ display: grid;
+ grid-template-columns: repeat(2, 1fr);
+ gap: 12px;
+ margin-bottom: 28px;
+}
+
+.language-btn {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 8px;
+ padding: 20px 16px;
+ background: var(--bg-card);
+ border: 2px solid var(--border);
+ border-radius: var(--radius-lg);
+ transition: all var(--transition);
+}
+
+.language-btn:hover {
+ border-color: var(--accent);
+ background: var(--accent-muted);
+}
+
+.language-btn.selected {
+ border-color: var(--accent);
+ background: var(--accent-muted);
+ box-shadow: 0 0 20px rgba(16, 185, 129, 0.2);
+}
+
+.lang-flag { font-size: 28px; }
+.lang-name { font-size: 15px; font-weight: 600; color: var(--text-primary); }
+
+.btn-continue {
+ width: 100%;
+ max-width: 280px;
+ padding: 14px 24px;
+ font-size: 16px;
+ font-weight: 600;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+}
+
+.page-footer {
+ position: absolute;
+ bottom: 20px;
+ left: 0;
+ right: 0;
+ text-align: center;
+}
+
+.page-footer p {
+ font-size: 12px;
+ color: var(--text-muted);
+}
+
+/* ==========================================================================
+ PAGE 2: DIAGNOSIS (Modern/Classy)
+ ========================================================================== */
+.diagnosis-page {
+ min-height: 100vh;
+ display: flex;
+ flex-direction: column;
+ background: var(--bg-dark);
+}
+
+/* Header - STICKY */
+.main-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 16px 20px;
+ background: var(--bg-card);
+ border-bottom: 1px solid var(--border);
+ position: sticky;
+ top: 0;
+ z-index: 100;
+}
+
+.header-brand {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.header-logo { font-size: 36px; }
+
+.header-title {
+ font-size: 28px;
+ font-weight: 800;
+ color: var(--accent);
+ margin: 0;
+ letter-spacing: -0.5px;
+}
+
+.header-subtitle {
+ font-size: 14px;
+ color: var(--text-secondary);
+ margin: 4px 0 0 0;
+}
+
+.header-actions { position: relative; }
+
+.btn-language {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ padding: 12px 18px;
+ background: var(--accent);
+ border-radius: var(--radius-md);
+ color: #FFFFFF;
+ font-size: 15px;
+ font-weight: 700;
+ border: 2px solid var(--accent);
+ transition: var(--transition);
+}
+
+.btn-language:hover {
+ background: var(--accent-hover);
+ border-color: var(--accent-hover);
+ transform: scale(1.02);
+}
+
+.dropdown-menu {
+ position: absolute;
+ top: 100%;
+ right: 0;
+ margin-top: 4px;
+ background: var(--bg-elevated);
+ border: 1px solid var(--border);
+ border-radius: var(--radius-md);
+ overflow: hidden;
+ min-width: 120px;
+ z-index: 100;
+}
+
+.dropdown-item {
+ display: block;
+ width: 100%;
+ padding: 10px 14px;
+ text-align: left;
+ color: var(--text-primary);
+ font-size: 13px;
+}
+
+.dropdown-item:hover { background: var(--bg-hover); }
+.dropdown-item.active { color: var(--accent); }
+
+/* Main Content */
+.diagnosis-main {
+ flex: 1;
+ padding: 20px;
+ max-width: 900px;
+ margin: 0 auto;
+ width: 100%;
+}
+
+/* Upload Section */
+.upload-section { margin-bottom: 20px; }
+
+.upload-card {
+ background: var(--bg-card);
+ border-radius: var(--radius-lg);
+ padding: 24px;
+ text-align: center;
+}
+
+.upload-header {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 10px;
+ margin-bottom: 8px;
+}
+
+.upload-icon-small { font-size: 24px; }
+
+.upload-header h2 {
+ font-size: 20px;
+ font-weight: 600;
+}
+
+.upload-desc {
+ font-size: 14px;
+ color: var(--text-secondary);
+ margin-bottom: 20px;
+}
+
+.upload-zone {
+ border: 2px dashed var(--border);
+ border-radius: var(--radius-md);
+ padding: 32px 20px;
+ cursor: pointer;
+ transition: all var(--transition);
+ margin-bottom: 16px;
+}
+
+.upload-zone:hover, .upload-zone.dragover {
+ border-color: var(--accent);
+ background: var(--accent-muted);
+}
+
+.upload-icon { margin-bottom: 12px; color: var(--text-muted); }
+
+.upload-text {
+ font-size: 15px;
+ font-weight: 500;
+ color: var(--text-primary);
+ margin-bottom: 4px;
+}
+
+.upload-formats {
+ font-size: 12px;
+ color: var(--text-muted);
+}
+
+.image-preview-container {
+ position: relative;
+ margin-bottom: 16px;
+ border-radius: var(--radius-md);
+ overflow: hidden;
+ background: var(--bg-elevated);
+}
+
+.image-preview {
+ width: 100%;
+ max-height: 250px;
+ object-fit: contain;
+}
+
+.btn-remove-image {
+ position: absolute;
+ top: 8px;
+ right: 8px;
+ width: 28px;
+ height: 28px;
+ background: rgba(0,0,0,0.7);
+ color: #fff;
+ border-radius: 50%;
+ font-size: 14px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
+
+.btn-remove-image:hover { background: var(--error); }
+
+.btn-analyze {
+ width: 100%;
+ padding: 14px;
+ font-size: 15px;
+ font-weight: 600;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+ margin-bottom: 16px;
+}
+
+.analyzing-loader {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 12px;
+ padding: 20px;
+}
+
+.analyzing-loader p {
+ font-size: 14px;
+ color: var(--text-secondary);
+}
+
+.supported-crops {
+ display: flex;
+ justify-content: center;
+ gap: 12px;
+ flex-wrap: wrap;
+}
+
+.crop-tag {
+ font-size: 13px;
+ color: var(--text-secondary);
+ background: var(--bg-elevated);
+ padding: 6px 12px;
+ border-radius: 20px;
+}
+
+/* Results Section */
+.results-section { animation: fadeIn 0.3s ease; }
+
+@keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
+
+.results-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ margin-bottom: 16px;
+}
+
+.results-header h2 {
+ font-size: 18px;
+ font-weight: 600;
+}
+
+.btn-text {
+ font-size: 13px;
+ color: var(--accent);
+ font-weight: 500;
+}
+
+.btn-text:hover { text-decoration: underline; }
+
+/* Disease Card */
+.disease-card {
+ background: var(--bg-card);
+ border-radius: var(--radius-lg);
+ padding: 16px;
+ margin-bottom: 16px;
+}
+
+.disease-top {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+ margin-bottom: 14px;
+}
+
+.disease-icon {
+ font-size: 36px;
+ width: 50px;
+ height: 50px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: var(--bg-elevated);
+ border-radius: var(--radius-md);
+}
+
+.disease-info { flex: 1; }
+
+.disease-info h3 {
+ font-size: 17px;
+ font-weight: 600;
+ margin-bottom: 2px;
+}
+
+.crop-label {
+ font-size: 13px;
+ color: var(--text-secondary);
+ text-transform: capitalize;
+}
+
+.severity-badge {
+ padding: 5px 12px;
+ border-radius: 20px;
+ font-size: 12px;
+ font-weight: 600;
+ text-transform: capitalize;
+}
+
+.severity-badge.low { background: rgba(16,185,129,0.15); color: var(--severity-low); }
+.severity-badge.medium { background: rgba(245,158,11,0.15); color: var(--severity-medium); }
+.severity-badge.high { background: rgba(249,115,22,0.15); color: var(--severity-high); }
+.severity-badge.very-high, .severity-badge.very_high { background: rgba(239,68,68,0.15); color: var(--severity-very-high); }
+
+.disease-confidence {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.conf-label {
+ font-size: 13px;
+ color: var(--text-secondary);
+}
+
+.conf-bar-wrap {
+ flex: 1;
+ height: 8px;
+ background: var(--bg-elevated);
+ border-radius: 4px;
+ overflow: hidden;
+}
+
+.conf-bar {
+ height: 100%;
+ background: linear-gradient(90deg, var(--accent), var(--accent-hover));
+ border-radius: 4px;
+ transition: width 0.5s ease;
+}
+
+.conf-value {
+ font-size: 14px;
+ font-weight: 600;
+ color: var(--accent);
+ min-width: 40px;
+ text-align: right;
+}
+
+/* Info Card / Tabs */
+.info-card {
+ background: var(--bg-card);
+ border-radius: var(--radius-lg);
+ overflow: hidden;
+ margin-bottom: 16px;
+}
+
+.tabs {
+ display: flex;
+ background: var(--bg-elevated);
+ padding: 4px;
+ gap: 4px;
+}
+
+.tab-btn {
+ flex: 1;
+ padding: 10px;
+ font-size: 13px;
+ font-weight: 500;
+ color: var(--text-secondary);
+ border-radius: var(--radius-sm);
+ transition: all var(--transition);
+}
+
+.tab-btn:hover { color: var(--text-primary); }
+.tab-btn.active { background: var(--bg-card); color: var(--text-primary); }
+
+.tab-content { padding: 16px; }
+
+.info-list {
+ list-style: none;
+ margin: 0 0 16px 0;
+}
+
+.info-list li {
+ position: relative;
+ padding-left: 16px;
+ margin-bottom: 10px;
+ font-size: 14px;
+ color: var(--text-secondary);
+ line-height: 1.5;
+}
+
+.info-list li::before {
+ content: "";
+ position: absolute;
+ left: 0;
+ top: 7px;
+ width: 6px;
+ height: 6px;
+ background: var(--accent);
+ border-radius: 50%;
+}
+
+.info-block {
+ margin-bottom: 16px;
+}
+
+.info-block h4 {
+ font-size: 14px;
+ font-weight: 600;
+ margin-bottom: 8px;
+ color: var(--text-primary);
+}
+
+.info-block p {
+ font-size: 14px;
+ color: var(--text-secondary);
+}
+
+.recovery-block {
+ background: var(--bg-elevated);
+ padding: 12px;
+ border-radius: var(--radius-md);
+}
+
+.recovery-bar-wrap {
+ height: 10px;
+ background: var(--bg-dark);
+ border-radius: 5px;
+ overflow: hidden;
+ margin-bottom: 6px;
+}
+
+.recovery-bar {
+ height: 100%;
+ background: var(--success);
+ border-radius: 5px;
+ transition: width 0.5s ease;
+}
+
+.recovery-block span {
+ font-size: 13px;
+ color: var(--success);
+}
+
+.treatment-grid {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+.treatment-item {
+ background: var(--bg-elevated);
+ padding: 10px 12px;
+ border-radius: var(--radius-sm);
+}
+
+.treatment-item strong {
+ font-size: 13px;
+ display: block;
+ margin-bottom: 2px;
+}
+
+.treatment-item span {
+ font-size: 12px;
+ color: var(--text-secondary);
+}
+
+.cost-block {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ background: var(--accent-muted);
+ padding: 12px 16px;
+ border-radius: var(--radius-md);
+ margin-top: 12px;
+}
+
+.cost-label {
+ font-size: 14px;
+ color: var(--text-secondary);
+}
+
+.cost-value {
+ font-size: 18px;
+ font-weight: 700;
+ color: var(--accent);
+}
+
+/* Chat Button (Simple but visible) */
+.btn-chat {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+ width: 100%;
+ padding: 14px;
+ background: var(--accent);
+ color: #fff;
+ font-size: 15px;
+ font-weight: 600;
+ border-radius: var(--radius-md);
+ transition: all var(--transition);
+}
+
+.btn-chat:hover {
+ background: var(--accent-hover);
+ transform: translateY(-1px);
+}
+
+/* Footer */
+.main-footer {
+ padding: 16px;
+ text-align: center;
+ border-top: 1px solid var(--border-light);
+}
+
+.main-footer p {
+ font-size: 12px;
+ color: var(--text-muted);
+}
+
+/* ==========================================================================
+ PAGE 3: CHAT (ChatGPT-Inspired)
+ ========================================================================== */
+.chat-page {
+ height: 100vh;
+ display: flex;
+ flex-direction: column;
+ background: #0D0D0D;
+}
+
+.chat-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 12px 16px;
+ background: #1A1A1A;
+ border-bottom: 1px solid #2D2D2D;
+}
+
+.btn-back {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ color: var(--text-secondary);
+ font-size: 14px;
+ padding: 6px 10px;
+ border-radius: var(--radius-sm);
+}
+
+.btn-back:hover { background: #2D2D2D; color: #fff; }
+
+.chat-title {
+ font-size: 15px;
+ font-weight: 600;
+ display: flex;
+ align-items: center;
+ gap: 6px;
+}
+
+.chat-lang {
+ font-size: 12px;
+ color: var(--text-muted);
+ background: #2D2D2D;
+ padding: 4px 10px;
+ border-radius: 12px;
+}
+
+.chat-context {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 10px 16px;
+ background: rgba(16,185,129,0.08);
+ font-size: 13px;
+ color: var(--text-secondary);
+ border-bottom: 1px solid #2D2D2D;
+ flex-wrap: wrap;
+}
+
+.chat-context strong { color: var(--accent); }
+
+.chat-messages {
+ flex: 1;
+ overflow-y: auto;
+ padding: 16px;
+}
+
+.chat-welcome {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ height: 100%;
+ text-align: center;
+ padding: 40px 20px;
+}
+
+.welcome-icon {
+ font-size: 48px;
+ margin-bottom: 16px;
+ opacity: 0.6;
+}
+
+.chat-welcome h3 {
+ font-size: 18px;
+ margin-bottom: 8px;
+}
+
+.chat-welcome p {
+ font-size: 14px;
+ color: var(--text-muted);
+ max-width: 300px;
+}
+
+/* Chat Messages */
+.message {
+ display: flex;
+ gap: 12px;
+ margin-bottom: 16px;
+ animation: slideUp 0.3s ease;
+}
+
+@keyframes slideUp {
+ from { opacity: 0; transform: translateY(10px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+.message.user { flex-direction: row-reverse; }
+
+.message-avatar {
+ width: 32px;
+ height: 32px;
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 16px;
+ flex-shrink: 0;
+}
+
+.message.assistant .message-avatar { background: var(--accent); }
+.message.user .message-avatar { background: #4B5563; }
+
+.message-content {
+ max-width: 80%;
+ padding: 12px 16px;
+ border-radius: 16px;
+ font-size: 14px;
+ line-height: 1.5;
+}
+
+/* Message content wrapper for Listen button */
+.message-content-wrapper {
+ display: flex;
+ flex-direction: column;
+ align-items: flex-start;
+ max-width: 80%;
+}
+
+.message.user .message-content-wrapper {
+ align-items: flex-end;
+}
+
+.message-content-wrapper .message-content {
+ max-width: 100%;
+}
+
+.message.assistant .message-content {
+ background: #2D2D2D;
+ border-bottom-left-radius: 4px;
+}
+
+.message.user .message-content {
+ background: var(--accent);
+ color: #fff;
+ border-bottom-right-radius: 4px;
+}
+
+.typing-indicator {
+ display: flex;
+ gap: 4px;
+ padding: 8px 0;
+}
+
+.typing-dot {
+ width: 8px;
+ height: 8px;
+ background: var(--text-muted);
+ border-radius: 50%;
+ animation: bounce 1.4s infinite;
+}
+
+.typing-dot:nth-child(2) { animation-delay: 0.2s; }
+.typing-dot:nth-child(3) { animation-delay: 0.4s; }
+
+@keyframes bounce {
+ 0%, 60%, 100% { transform: translateY(0); }
+ 30% { transform: translateY(-6px); }
+}
+
+/* Chat Input */
+.chat-input-wrap {
+ padding: 12px 16px;
+ background: #1A1A1A;
+ border-top: 1px solid #2D2D2D;
+}
+
+.chat-input-box {
+ display: flex;
+ align-items: flex-end;
+ gap: 8px;
+ background: #2D2D2D;
+ border-radius: 12px;
+ padding: 8px 12px;
+}
+
+.chat-input-box textarea {
+ flex: 1;
+ background: transparent;
+ border: none;
+ color: #fff;
+ font-size: 14px;
+ resize: none;
+ min-height: 24px;
+ max-height: 120px;
+ padding: 4px 0;
+}
+
+.chat-input-box textarea:focus { outline: none; }
+.chat-input-box textarea::placeholder { color: #6B6B6B; }
+
+.btn-icon {
+ width: 36px;
+ height: 36px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ border-radius: 8px;
+ color: var(--text-secondary);
+ transition: all var(--transition);
+}
+
+.btn-icon:hover { background: #3D3D3D; color: #fff; }
+
+.btn-voice.recording {
+ background: var(--error);
+ color: #fff;
+ animation: pulse 1.5s infinite;
+}
+
+@keyframes pulse {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.6; }
+}
+
+.btn-send {
+ background: var(--accent);
+ color: #fff;
+}
+
+.btn-send:disabled {
+ background: #3D3D3D;
+ color: #6B6B6B;
+ cursor: not-allowed;
+}
+
+.btn-send:not(:disabled):hover { background: var(--accent-hover); }
+
+.chat-note {
+ font-size: 11px;
+ color: var(--text-muted);
+ text-align: center;
+ margin-top: 8px;
+}
+
+/* Voice Overlay - LEGACY (hidden, replaced by inline indicator) */
+.voice-overlay {
+ position: fixed;
+ inset: 0;
+ background: rgba(0,0,0,0.9);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 200;
+}
+
+.voice-modal {
+ text-align: center;
+ padding: 40px;
+}
+
+.voice-anim {
+ display: flex;
+ justify-content: center;
+ gap: 8px;
+ margin-bottom: 20px;
+}
+
+.voice-anim span {
+ width: 12px;
+ height: 12px;
+ background: var(--accent);
+ border-radius: 50%;
+ animation: voicePulse 1.2s infinite;
+}
+
+.voice-anim span:nth-child(2) { animation-delay: 0.2s; }
+.voice-anim span:nth-child(3) { animation-delay: 0.4s; }
+
+@keyframes voicePulse {
+ 0%, 100% { transform: scale(1); opacity: 0.5; }
+ 50% { transform: scale(1.4); opacity: 1; }
+}
+
+.voice-modal p {
+ font-size: 18px;
+ margin-bottom: 20px;
+}
+
+/* ==========================================================================
+ INLINE LISTENING INDICATOR (New - replaces full-screen overlay)
+ ========================================================================== */
+.listening-indicator {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+ flex: 1;
+ padding: 8px 12px;
+ background: rgba(239, 68, 68, 0.15);
+ border: 1px solid var(--error);
+ border-radius: 8px;
+ animation: listenFadeIn 0.3s ease;
+}
+
+@keyframes listenFadeIn {
+ from { opacity: 0; transform: scale(0.95); }
+ to { opacity: 1; transform: scale(1); }
+}
+
+/* Pulsing red dot */
+.listening-pulse {
+ width: 12px;
+ height: 12px;
+ background: var(--error);
+ border-radius: 50%;
+ animation: listenPulse 1.5s ease-in-out infinite;
+ flex-shrink: 0;
+}
+
+@keyframes listenPulse {
+ 0%, 100% {
+ transform: scale(1);
+ opacity: 1;
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.7);
+ }
+ 50% {
+ transform: scale(1.1);
+ opacity: 0.8;
+ box-shadow: 0 0 0 8px rgba(239, 68, 68, 0);
+ }
+}
+
+/* Listening text */
+.listening-text {
+ font-size: 15px;
+ font-weight: 600;
+ color: var(--error);
+ flex: 1;
+}
+
+/* Timer display */
+.listening-timer {
+ font-size: 14px;
+ font-weight: 600;
+ color: var(--text-primary);
+ font-family: 'SF Mono', 'Monaco', 'Consolas', monospace;
+ background: rgba(255, 255, 255, 0.1);
+ padding: 4px 10px;
+ border-radius: 6px;
+ min-width: 50px;
+ text-align: center;
+}
+
+/* Inline stop button */
+.btn-stop-inline {
+ width: 36px;
+ height: 36px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: var(--error);
+ color: #fff;
+ border-radius: 8px;
+ transition: all var(--transition);
+ flex-shrink: 0;
+}
+
+.btn-stop-inline:hover {
+ background: #DC2626;
+ transform: scale(1.05);
+}
+
+/* Hide elements when recording */
+.chat-input-box textarea.hidden,
+.chat-input-box .btn-send.hidden {
+ display: none;
+}
+
+/* Voice button recording state */
+.btn-voice.recording {
+ background: var(--error);
+ color: #fff;
+ animation: none; /* Remove pulse, we have the indicator now */
+}
+
+/* ==========================================================================
+ COMMON COMPONENTS
+ ========================================================================== */
+
+/* ==========================================================================
+ TTS - LISTEN BUTTON & AUDIO PLAYER
+ ========================================================================== */
+
+/* Listen button on each assistant message */
+.btn-listen {
+ display: inline-flex;
+ align-items: center;
+ gap: 6px;
+ margin-top: 10px;
+ padding: 8px 14px;
+ background: var(--accent);
+ color: #fff;
+ font-size: 13px;
+ font-weight: 600;
+ border-radius: 20px;
+ cursor: pointer;
+ transition: all var(--transition);
+ border: none;
+}
+
+.btn-listen:hover {
+ background: var(--accent-hover);
+ transform: scale(1.02);
+}
+
+.btn-listen.loading {
+ background: var(--bg-elevated);
+ color: var(--text-secondary);
+ cursor: wait;
+}
+
+.btn-listen.loading::after {
+ content: '';
+ width: 12px;
+ height: 12px;
+ border: 2px solid var(--text-muted);
+ border-top-color: var(--accent);
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin-left: 6px;
+}
+
+.btn-listen.playing {
+ background: var(--error);
+}
+
+.btn-listen-icon {
+ font-size: 14px;
+}
+
+/* Floating TTS Player */
+.tts-player {
+ position: fixed;
+ bottom: 100px;
+ left: 50%;
+ transform: translateX(-50%) translateY(100px);
+ width: 90%;
+ max-width: 400px;
+ background: var(--bg-elevated);
+ border: 1px solid var(--border);
+ border-radius: var(--radius-lg);
+ padding: 16px;
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.4);
+ z-index: 150;
+ opacity: 0;
+ visibility: hidden;
+ transition: all 0.3s ease;
+}
+
+.tts-player.active {
+ transform: translateX(-50%) translateY(0);
+ opacity: 1;
+ visibility: visible;
+}
+
+/* Player header */
+.tts-player-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ margin-bottom: 12px;
+}
+
+.tts-player-title {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ font-size: 14px;
+ font-weight: 600;
+ color: var(--text-primary);
+}
+
+.tts-player-title-icon {
+ font-size: 16px;
+}
+
+.btn-tts-close {
+ width: 28px;
+ height: 28px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: var(--bg-hover);
+ color: var(--text-secondary);
+ border-radius: 6px;
+ font-size: 18px;
+ cursor: pointer;
+ transition: all var(--transition);
+}
+
+.btn-tts-close:hover {
+ background: var(--error);
+ color: #fff;
+}
+
+/* Progress bar */
+.tts-progress-container {
+ height: 6px;
+ background: var(--bg-hover);
+ border-radius: 3px;
+ overflow: hidden;
+ margin-bottom: 12px;
+ cursor: pointer;
+}
+
+.tts-progress-bar {
+ height: 100%;
+ background: var(--accent);
+ border-radius: 3px;
+ width: 0%;
+ transition: width 0.1s linear;
+}
+
+/* Controls row */
+.tts-controls {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 12px;
+}
+
+/* Playback controls */
+.tts-playback-controls {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+}
+
+.btn-tts-control {
+ width: 40px;
+ height: 40px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: var(--accent);
+ color: #fff;
+ border-radius: 50%;
+ cursor: pointer;
+ transition: all var(--transition);
+}
+
+.btn-tts-control:hover {
+ background: var(--accent-hover);
+ transform: scale(1.05);
+}
+
+.btn-tts-control.stop {
+ background: var(--bg-hover);
+ color: var(--text-secondary);
+ width: 36px;
+ height: 36px;
+}
+
+.btn-tts-control.stop:hover {
+ background: var(--error);
+ color: #fff;
+}
+
+/* Time display */
+.tts-time {
+ font-size: 12px;
+ color: var(--text-muted);
+ font-family: 'SF Mono', 'Monaco', 'Consolas', monospace;
+ min-width: 70px;
+ text-align: center;
+}
+
+/* Speed controls */
+.tts-speed-controls {
+ display: flex;
+ align-items: center;
+ gap: 4px;
+}
+
+.tts-speed-label {
+ font-size: 11px;
+ color: var(--text-muted);
+ margin-right: 4px;
+}
+
+.tts-speed-btn {
+ padding: 4px 8px;
+ font-size: 11px;
+ font-weight: 600;
+ color: var(--text-muted);
+ background: var(--bg-hover);
+ border-radius: 4px;
+ cursor: pointer;
+ transition: all var(--transition);
+}
+
+.tts-speed-btn:hover {
+ color: var(--text-primary);
+ background: var(--bg-card);
+}
+
+.tts-speed-btn.active {
+ color: #fff;
+ background: var(--accent);
+}
+
+/* Responsive adjustments */
+@media (max-width: 480px) {
+ .tts-player {
+ bottom: 80px;
+ width: 95%;
+ padding: 12px;
+ }
+
+ .tts-speed-label {
+ display: none;
+ }
+
+ .tts-time {
+ font-size: 11px;
+ min-width: 60px;
+ }
+}
+
+/* Buttons */
+.btn-primary {
+ background: var(--accent);
+ color: #fff;
+ border-radius: var(--radius-md);
+ font-weight: 600;
+ transition: all var(--transition);
+}
+
+.btn-primary:hover:not(:disabled) {
+ background: var(--accent-hover);
+}
+
+.btn-primary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-secondary {
+ background: #2D2D2D;
+ color: #fff;
+ padding: 12px 24px;
+ border-radius: var(--radius-md);
+ font-weight: 500;
+}
+
+.btn-secondary:hover { background: #3D3D3D; }
+
+/* Loader */
+.loader-spinner {
+ width: 24px;
+ height: 24px;
+ border: 3px solid #3D3D3D;
+ border-top-color: var(--accent);
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+}
+
+.loader-spinner.large { width: 40px; height: 40px; }
+
+@keyframes spin { to { transform: rotate(360deg); } }
+
+/* Loading Overlay */
+.loading-overlay {
+ position: fixed;
+ inset: 0;
+ background: rgba(13,13,13,0.95);
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 16px;
+ z-index: 300;
+}
+
+.loading-overlay p {
+ color: var(--text-secondary);
+ font-size: 14px;
+}
+
+/* Toast */
+.toast-container {
+ position: fixed;
+ bottom: 20px;
+ left: 50%;
+ transform: translateX(-50%);
+ z-index: 400;
+ width: 90%;
+ max-width: 360px;
+}
+
+.toast {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+ padding: 14px 16px;
+ background: #2D2D2D;
+ border-radius: var(--radius-md);
+ margin-bottom: 8px;
+ animation: slideUp 0.3s ease;
+}
+
+.toast.success { border-left: 4px solid var(--success); }
+.toast.error { border-left: 4px solid var(--error); }
+.toast.warning { border-left: 4px solid var(--warning); }
+
+.toast-message { flex: 1; font-size: 14px; }
+
+.toast-close {
+ color: var(--text-muted);
+ font-size: 18px;
+ cursor: pointer;
+}
+
+/* ==========================================================================
+ RESPONSIVE
+ ========================================================================== */
+@media (max-width: 480px) {
+ .header-subtitle { display: none; }
+ .header-title { font-size: 18px; }
+ .app-title { font-size: 36px; }
+ .language-grid { gap: 10px; }
+ .language-btn { padding: 16px 12px; }
+}
+
+@media (min-width: 768px) {
+ .diagnosis-main { max-width: 900px; padding: 30px; }
+ .upload-card { padding: 32px; }
+ .language-grid { grid-template-columns: repeat(4, 1fr); }
+}
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..274c2d38673023c096c7bd0e788d4597ca75199e
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,321 @@
+
+
+
+
+
+
+
+
+
+
+ FarmEyes - Crop Disease Detection
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
🌱
+
FarmEyes
+
AI-Powered Crop Disease Detection for African Farmers
+
+
+
Select Your Language
+
Choose your preferred language to continue
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Take a clear photo of the affected leaf or plant
+
+
+
+
+
Click or drag image here
+
JPG, PNG, WEBP (max 10MB)
+
+
+
+
![Preview]()
+
+
+
+
+
+
+
+
Analyzing your crop...
+
+
+
+ 🌿 Cassava
+ 🍫 Cocoa
+ 🍅 Tomato
+
+
+
+
+
+
+
+
+
+
+
+
🦠
+
+
Disease Name
+ Crop
+
+
--
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Estimated Cost:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Discussing:
+ Disease
+ •
+ 0%
+ •
+ Severity
+
+
+
+
+
🌱
+
FarmEyes Assistant
+
Ask me anything about your diagnosis, treatments, or prevention tips.
+
+
+
+
+
+
+
+
+
+
+
Listening...
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/frontend/js/api.js b/frontend/js/api.js
new file mode 100644
index 0000000000000000000000000000000000000000..58114675444f3ed223e8838537fe52ca0543385c
--- /dev/null
+++ b/frontend/js/api.js
@@ -0,0 +1,417 @@
+/**
+ * FarmEyes API Client
+ * ===================
+ * Handles all communication with the FastAPI backend.
+ * Provides clean async methods for detection, chat, and transcription.
+ */
+
+const FarmEyesAPI = {
+ // Base URL - auto-detect based on environment
+ baseUrl: window.location.origin,
+
+ // Current session ID
+ sessionId: null,
+
+ // Current language
+ language: 'en',
+
+ /**
+ * Initialize API client
+ */
+ async init() {
+ // Try to get existing session from storage
+ this.sessionId = localStorage.getItem('farmeyes_session');
+ this.language = localStorage.getItem('farmeyes_language') || 'en';
+
+ // Create new session if none exists
+ if (!this.sessionId) {
+ await this.createSession(this.language);
+ }
+
+ console.log('[API] Initialized with session:', this.sessionId?.substring(0, 8));
+ return this;
+ },
+
+ /**
+ * Make an API request
+ * @param {string} endpoint - API endpoint
+ * @param {object} options - Fetch options
+ * @returns {Promise