vivek12coder commited on
Commit
c8df794
·
verified ·
1 Parent(s): 86c71af

Upload 20960 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +57 -0
  2. .gitattributes +5 -0
  3. .gitignore +80 -0
  4. Dockerfile +36 -0
  5. README.md +539 -0
  6. api/Dockerfile +34 -0
  7. api/__init__.py +0 -0
  8. api/main.py +475 -0
  9. api/main_optimized.py +401 -0
  10. api/requirements.txt +13 -0
  11. data/dataset_stats.txt +110 -0
  12. data/processed/dataset_info.json +102 -0
  13. data/processed/test/Pepper__bell___Bacterial_spot/01940b6d-7dea-4889-a7b8-a35f4e9bba34___NREC_B.Spot 9120.JPG +0 -0
  14. data/processed/test/Pepper__bell___Bacterial_spot/0719e8e8-c1ae-4d5a-b29c-dbadc36d13f3___NREC_B.Spot 1947.JPG +0 -0
  15. data/processed/test/Pepper__bell___Bacterial_spot/080b107a-192f-40ce-8942-d8ccca8dfc52___NREC_B.Spot 1872.JPG +0 -0
  16. data/processed/test/Pepper__bell___Bacterial_spot/0915c9a9-25b0-4728-be01-86e5cecb57df___NREC_B.Spot 1816.JPG +0 -0
  17. data/processed/test/Pepper__bell___Bacterial_spot/09ae534a-e931-4f83-8545-cf330dfebae9___NREC_B.Spot 9210.JPG +0 -0
  18. data/processed/test/Pepper__bell___Bacterial_spot/0c99cb45-b4e0-4ade-bba5-fab3b678f0bb___JR_B.Spot 8912.JPG +0 -0
  19. data/processed/test/Pepper__bell___Bacterial_spot/0d2635e7-df23-4ceb-b3ba-3af50bb58357___NREC_B.Spot 1874.JPG +0 -0
  20. data/processed/test/Pepper__bell___Bacterial_spot/0fe8a42b-b943-43d6-88c4-78abdcbfe02d___NREC_B.Spot 9236.JPG +0 -0
  21. data/processed/test/Pepper__bell___Bacterial_spot/11e6ce0a-8511-485a-b22c-21b978d28e5e___JR_B.Spot 3383.JPG +0 -0
  22. data/processed/test/Pepper__bell___Bacterial_spot/12c953a0-bd3e-45d0-aaea-5139f5d63e01___JR_B.Spot 8863.JPG +0 -0
  23. data/processed/test/Pepper__bell___Bacterial_spot/12f47cff-1a75-47ec-99d2-01720786e478___NREC_B.Spot 1859.JPG +0 -0
  24. data/processed/test/Pepper__bell___Bacterial_spot/144ae14f-dbf4-4dfa-9d47-98fb33009a48___JR_B.Spot 3364.JPG +0 -0
  25. data/processed/test/Pepper__bell___Bacterial_spot/146d24cd-0c7e-458b-9f82-7b27525b04e4___JR_B.Spot 3329.JPG +0 -0
  26. data/processed/test/Pepper__bell___Bacterial_spot/168a11c9-159b-468c-a6d9-07d0b61c42c9___JR_B.Spot 3193.JPG +0 -0
  27. data/processed/test/Pepper__bell___Bacterial_spot/17557939-f9e2-435a-a4e7-f4d3cff8aa8b___JR_B.Spot 3107.JPG +0 -0
  28. data/processed/test/Pepper__bell___Bacterial_spot/176a9f0a-b815-4e4d-88d4-0960610f723b___NREC_B.Spot 1820.JPG +0 -0
  29. data/processed/test/Pepper__bell___Bacterial_spot/179067a6-1012-4a23-8f09-e413300e9f32___NREC_B.Spot 9085.JPG +0 -0
  30. data/processed/test/Pepper__bell___Bacterial_spot/188f102a-6f64-4180-9d38-f98b61aaec60___JR_B.Spot 9014.JPG +0 -0
  31. data/processed/test/Pepper__bell___Bacterial_spot/18df58d7-c6ac-48e5-8cb0-596b70252a8e___NREC_B.Spot 9153.JPG +0 -0
  32. data/processed/test/Pepper__bell___Bacterial_spot/1b8d3e98-43d9-441d-93ef-a359e6e9ddc2___NREC_B.Spot 9052.JPG +0 -0
  33. data/processed/test/Pepper__bell___Bacterial_spot/1f838b54-c372-4b51-b398-6988377b2218___JR_B.Spot 8977.JPG +0 -0
  34. data/processed/test/Pepper__bell___Bacterial_spot/2433614e-78d3-45ae-b719-59efb0397572___JR_B.Spot 8966.JPG +0 -0
  35. data/processed/test/Pepper__bell___Bacterial_spot/260e0075-466d-4aa2-8ad6-825cce898cdb___JR_B.Spot 9065.JPG +0 -0
  36. data/processed/test/Pepper__bell___Bacterial_spot/28c448aa-4d2c-4a96-baf0-e7ed99ae2495___JR_B.Spot 3385.JPG +0 -0
  37. data/processed/test/Pepper__bell___Bacterial_spot/29896da5-a228-4e67-8d23-930c40ebb03b___JR_B.Spot 8920.JPG +0 -0
  38. data/processed/test/Pepper__bell___Bacterial_spot/2b562d10-5ccf-4f20-aadc-2e1480bd303e___JR_B.Spot 3256.JPG +0 -0
  39. data/processed/test/Pepper__bell___Bacterial_spot/2b710a19-f4cd-4bcf-afbf-e48face96045___NREC_B.Spot 1931.JPG +0 -0
  40. data/processed/test/Pepper__bell___Bacterial_spot/2cdea224-6f10-4a3d-a3f1-1debd1a42640___JR_B.Spot 8926.JPG +0 -0
  41. data/processed/test/Pepper__bell___Bacterial_spot/3233e1db-99e9-4107-8711-c8b28a8bfda1___JR_B.Spot 3165.JPG +0 -0
  42. data/processed/test/Pepper__bell___Bacterial_spot/33763f26-1135-458f-8b2b-34897b8bb647___JR_B.Spot 3323.JPG +0 -0
  43. data/processed/test/Pepper__bell___Bacterial_spot/35b1d344-1bb9-4976-9ba2-de290dd167dd___NREC_B.Spot 9056.JPG +0 -0
  44. data/processed/test/Pepper__bell___Bacterial_spot/3610f357-8b9f-4f98-8e7f-c4297daf3b20___NREC_B.Spot 1971.JPG +0 -0
  45. data/processed/test/Pepper__bell___Bacterial_spot/36223df9-bc20-4672-8938-6f1f60fb4a0e___NREC_B.Spot 1873.JPG +0 -0
  46. data/processed/test/Pepper__bell___Bacterial_spot/3700475e-b0b1-4b8b-90bd-374be22dbfd0___NREC_B.Spot 1860.JPG +0 -0
  47. data/processed/test/Pepper__bell___Bacterial_spot/378ed86f-f435-44e8-93b3-3e05aa569492___JR_B.Spot 8864.JPG +0 -0
  48. data/processed/test/Pepper__bell___Bacterial_spot/37e9bf3a-85da-4114-8e69-185498b9a9af___JR_B.Spot 3337.JPG +0 -0
  49. data/processed/test/Pepper__bell___Bacterial_spot/37fbacb5-ed9d-48f2-bf19-37656ff7c317___JR_B.Spot 3307.JPG +0 -0
  50. data/processed/test/Pepper__bell___Bacterial_spot/389480c3-0209-45e3-b3ee-2447b22de68f___JR_B.Spot 9016.JPG +0 -0
.dockerignore ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Git
2
+ .git
3
+ .gitignore
4
+
5
+ # Documentation
6
+ *.md
7
+ !HF_SPACES_README.md
8
+ !DEPLOYMENT_README.md
9
+
10
+ # Python
11
+ __pycache__
12
+ *.pyc
13
+ *.pyo
14
+ *.pyd
15
+ .Python
16
+ env
17
+ .venv
18
+ venv/
19
+ .env
20
+
21
+ # Jupyter Notebooks
22
+ *.ipynb
23
+ notebooks/
24
+
25
+ # IDE
26
+ .vscode/
27
+ .idea/
28
+ *.swp
29
+ *.swo
30
+
31
+ # OS
32
+ .DS_Store
33
+ Thumbs.db
34
+
35
+ # Logs
36
+ *.log
37
+ logs/
38
+
39
+ # Test files
40
+ tests/
41
+ test_*.py
42
+ *_test.py
43
+
44
+ # Development files
45
+ uselessfiles/
46
+ *.bak
47
+ *.tmp
48
+
49
+ # Large data files (keep only necessary ones)
50
+ data/raw/
51
+ data/train/
52
+ data/test/
53
+ data/val/
54
+
55
+ # Keep only essential outputs
56
+ outputs/logs/
57
+ outputs/heatmaps/
.gitattributes ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ *.pth filter=lfs diff=lfs merge=lfs -text
2
+ crop_disease_v3_model.pth filter=lfs diff=lfs merge=lfs -text
3
+ data/processed/train/Pepper__bell___healthy/42f083e2-272d-4f83-ad9a-573ee90e50ec___Screen[[:space:]]Shot[[:space:]]2015-05-06[[:space:]]at[[:space:]]4.01.13[[:space:]]PM.png filter=lfs diff=lfs merge=lfs -text
4
+ outputs/comprehensive_training_analysis.png filter=lfs diff=lfs merge=lfs -text
5
+ outputs/training_curves.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+ MANIFEST
23
+
24
+ # PyTorch models (keep only the main model)
25
+ models/*.pth
26
+ !models/crop_disease_v3_model.pth
27
+ !models/crop_disease_v2_model.pth
28
+
29
+ # IDE
30
+ .vscode/
31
+ .idea/
32
+ *.swp
33
+ *.swo
34
+
35
+ # Environment
36
+ .env
37
+ .venv
38
+ venv/
39
+ ENV/
40
+ env/
41
+
42
+ # Logs
43
+ logs/
44
+ *.log
45
+
46
+ # Temporary files
47
+ temp/
48
+ tmp/
49
+ *.tmp
50
+
51
+ # OS
52
+ .DS_Store
53
+ Thumbs.db
54
+
55
+ # Data (keep structure but not large datasets)
56
+ data/raw/*
57
+ !data/raw/README.txt
58
+ data/train/*
59
+ !data/train/README.txt
60
+ data/val/*
61
+ !data/val/README.txt
62
+ data/test/*
63
+ !data/test/README.txt
64
+
65
+ # Outputs (keep structure)
66
+ outputs/*.png
67
+ outputs/*.jpg
68
+ outputs/*.json
69
+ outputs/heatmaps/*
70
+ !outputs/heatmaps/README.txt
71
+ outputs/logs/*
72
+ !outputs/logs/README.txt
73
+
74
+ # Jupyter notebook checkpoints
75
+ .ipynb_checkpoints/
76
+
77
+ # Testing artifacts
78
+ .pytest_cache/
79
+ .coverage
80
+ htmlcov/
Dockerfile ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.10 slim image for better compatibility with HF Spaces
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ libglib2.0-0 \
10
+ libsm6 \
11
+ libxext6 \
12
+ libxrender-dev \
13
+ libgomp1 \
14
+ libglib2.0-0 \
15
+ && rm -rf /var/lib/apt/lists/*
16
+
17
+ # Copy requirements first to leverage Docker cache
18
+ COPY requirements.txt .
19
+
20
+ # Install Python dependencies
21
+ RUN pip install --no-cache-dir -r requirements.txt
22
+
23
+ # Copy the entire project
24
+ COPY . .
25
+
26
+ # Create necessary directories
27
+ RUN mkdir -p models knowledge_base src data outputs
28
+
29
+ # Expose port 7860 for Hugging Face Spaces
30
+ EXPOSE 7860
31
+
32
+ # Set environment variable for Hugging Face Spaces
33
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
34
+
35
+ # Run the FastAPI application with Uvicorn on port 7860
36
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Crop Disease Detection AI
3
+ emoji: 🌱
4
+ colorFrom: green
5
+ colorTo: yellow
6
+ sdk: docker
7
+ app_port: 7860
8
+ python_version: 3.10
9
+ suggested_hardware: cpu-basic
10
+ suggested_storage: small
11
+ license: apache-2.0
12
+ tags:
13
+ - computer-vision
14
+ - agriculture
15
+ - disease-detection
16
+ - fastapi
17
+ - pytorch
18
+ - gradcam
19
+ - ai
20
+ - deep-learning
21
+ - crop-monitoring
22
+ ---
23
+
24
+ # Crop Disease Detection AI 🌱🔍
25
+
26
+ > **Advanced Computer Vision System for Agricultural Disease Detection**
27
+
28
+ This folder contains a state-of-the-art PyTorch-based deep learning system for detecting diseases in crop images using ResNet50 architecture with comprehensive visual explanations and real-time risk assessment.
29
+
30
+ ## 🚀 Key Features
31
+
32
+ - **Multi-Crop Disease Detection**: Supports Pepper (Bell), Potato, and Tomato crops
33
+ - **15 Disease Classes**: Comprehensive coverage of common agricultural diseases
34
+ - **Visual AI Explanations**: Grad-CAM and LIME explanations for prediction transparency
35
+ - **FastAPI Backend**: High-performance RESTful API with real-time predictions
36
+ - **High Accuracy**: 90.09% test accuracy on validation dataset (v3.0 model)
37
+ - **Risk Assessment**: Automated severity scoring and treatment recommendations
38
+ - **Memory Optimized**: Multiple model variants for different deployment scenarios
39
+ - **Production Ready**: Docker support, comprehensive testing, and monitoring
40
+
41
+ ## 🧠 AI Model Architecture
42
+
43
+ ### Core Model: Enhanced ResNet50
44
+ - **Base Architecture**: Pre-trained ResNet50 on ImageNet with custom classifier head
45
+ - **Fine-tuning**: Specialized transfer learning for agricultural disease detection
46
+ - **Input Specifications**: 224x224 RGB images, normalized with ImageNet statistics
47
+ - **Output**: 15-class disease classification with confidence scores
48
+ - **Model Depth**: 50 layers with residual connections for stable training
49
+
50
+ ### Advanced Architecture Details
51
+ ```
52
+ ResNet50 Feature Extractor (frozen/unfrozen)
53
+ ├── Custom Classifier Head:
54
+ │ ├── Dropout(0.5)
55
+ │ ├── Linear(2048 → 1024) + BatchNorm + ReLU
56
+ │ ├── Dropout(0.3)
57
+ │ ├── Linear(1024 → 512) + BatchNorm + ReLU
58
+ │ ├── Dropout(0.2)
59
+ │ └── Linear(512 → 15) [Output Layer]
60
+ ```
61
+
62
+ ### Model Versions & Performance
63
+ - **v3.0** (Current): Retrained ResNet50 - 90.09% test accuracy
64
+ - **v2.0**: Enhanced feature extraction - 87.5% accuracy
65
+ - **v1.0**: Initial baseline model - 85.2% accuracy
66
+ - **Lite Variants**: Memory-optimized models for edge deployment
67
+
68
+ ## 📊 Supported Disease Classes
69
+
70
+ ### Pepper (Bell) - 2 Classes
71
+ 1. **Bacterial Spot** - Xanthomonas infection
72
+ 2. **Healthy** - No disease detected
73
+
74
+ ### Potato - 3 Classes
75
+ 1. **Early Blight** - Alternaria solani
76
+ 2. **Late Blight** - Phytophthora infestans
77
+ 3. **Healthy** - No disease detected
78
+
79
+ ### Tomato - 10 Classes
80
+ 1. **Bacterial Spot** - Xanthomonas perforans
81
+ 2. **Early Blight** - Alternaria solani
82
+ 3. **Late Blight** - Phytophthora infestans
83
+ 4. **Leaf Mold** - Passalora fulva
84
+ 5. **Septoria Leaf Spot** - Septoria lycopersici
85
+ 6. **Spider Mites (Two-spotted)** - Tetranychus urticae
86
+ 7. **Target Spot** - Corynespora cassiicola
87
+ 8. **Yellow Leaf Curl Virus** - Begomovirus
88
+ 9. **Mosaic Virus** - Tobacco mosaic virus
89
+ 10. **Healthy** - No disease detected
90
+
91
+ ## 🔧 Tech Stack
92
+
93
+ ### Core AI/ML
94
+ - **Deep Learning**: PyTorch 2.1.0, TorchVision 0.16.0
95
+ - **Computer Vision**: OpenCV 4.8.1, PIL (Pillow) 10.0.1
96
+ - **Model Architecture**: ResNet50 with custom classification head
97
+
98
+ ### API & Backend
99
+ - **Web Framework**: FastAPI 0.104.1 with async support
100
+ - **API Documentation**: Automatic OpenAPI/Swagger generation
101
+ - **CORS Support**: Configurable cross-origin resource sharing
102
+
103
+ ### AI Explainability
104
+ - **Grad-CAM**: Gradient-weighted Class Activation Mapping
105
+ - **LIME**: Local Interpretable Model-agnostic Explanations
106
+ - **Custom Visualization**: matplotlib, seaborn for result plotting
107
+
108
+ ### Data Processing
109
+ - **Numerical**: NumPy 1.24.3, Pandas 2.0.3
110
+ - **Image Processing**: Albumentations for augmentation
111
+ - **Serialization**: JSON, Pickle for model and data handling
112
+
113
+ ## 📁 Project Structure
114
+
115
+ ```
116
+ diseases_detection_ai/
117
+ ├── main.py # FastAPI application entry point (477 lines)
118
+ ├── requirements.txt # Python dependencies and versions
119
+ ├── README.md # Comprehensive documentation (405 lines)
120
+ ├── api/ # API implementations
121
+ │ ├── main.py # Main API server with full features
122
+ │ ├── main_optimized.py # Memory-optimized API variant
123
+ │ ├── Dockerfile # Container configuration for deployment
124
+ │ ├── requirements.txt # API-specific dependencies
125
+ │ └── __init__.py # Package initialization
126
+ ├── src/ # Core AI modules (10 files)
127
+ │ ├── model.py # ResNet50 model architecture (193 lines)
128
+ │ ├── model_lite.py # Lightweight model variants for edge deployment
129
+ │ ├── explain.py # Grad-CAM visual explanation system
130
+ │ ├── explain_lite.py # Optimized explanation for mobile
131
+ │ ├── explain_new.py # Latest explanation implementations
132
+ │ ├── dataset.py # Data loading, preprocessing, and augmentation
133
+ │ ├── train.py # Complete model training pipeline
134
+ │ ├── evaluate.py # Model evaluation and metrics calculation
135
+ │ ├── risk_level.py # Disease severity assessment algorithms
136
+ │ └── __init__.py # Package initialization
137
+ ├── models/ # Trained model checkpoints
138
+ │ ├── crop_disease_v3_model.pth # Latest model (v3.0) - Primary
139
+ │ ├── crop_disease_v2_model.pth # Previous stable version
140
+ │ ├── crop_disese_v0.pth # Initial baseline model
141
+ │ ├── README.txt # Model information and usage notes
142
+ │ └── .gitattributes # Git LFS configuration for large files
143
+ ├── knowledge_base/ # Disease information database
144
+ │ └── disease_info.json # Comprehensive disease database (552 lines)
145
+ ├── data/ # Training and test datasets
146
+ │ ├── raw/ # Original dataset images
147
+ │ └── processed/ # Preprocessed and augmented data
148
+ ├── notebooks/ # Jupyter analysis and research notebooks
149
+ ├── outputs/ # Generated visualizations and results
150
+ ├── tests/ # Comprehensive testing suite
151
+ │ ├── test_model.py # Model functionality tests
152
+ │ ├── test_api.py # API endpoint testing
153
+ │ └── test_explain.py # Explanation system tests
154
+ └── uselessfiles/ # Development artifacts and experimental code
155
+ ```
156
+
157
+ ## 🛠️ Setup Instructions
158
+
159
+ ### System Requirements
160
+ - **Python**: 3.8+ (tested with 3.9, 3.10, 3.11)
161
+ - **GPU**: CUDA-compatible GPU recommended (NVIDIA RTX series optimal)
162
+ - **Memory**: 8GB+ RAM (16GB recommended for training)
163
+ - **Storage**: 2GB+ free space for models and datasets
164
+ - **OS**: Windows 10/11, Linux (Ubuntu 18.04+), macOS 10.15+
165
+
166
+ ### Installation Steps
167
+
168
+ 1. **Environment Setup**:
169
+ ```powershell
170
+ # Navigate to project directory
171
+ cd diseases_detection_ai
172
+
173
+ # Create isolated virtual environment
174
+ python -m venv disease_detection_env
175
+ disease_detection_env\Scripts\activate # Windows
176
+ # source disease_detection_env/bin/activate # Linux/Mac
177
+ ```
178
+
179
+ 2. **Install Dependencies**:
180
+ ```powershell
181
+ # Install all required packages
182
+ pip install -r requirements.txt
183
+
184
+ # Verify PyTorch installation with CUDA support
185
+ python -c "import torch; print(f'PyTorch: {torch.__version__}, CUDA: {torch.cuda.is_available()}')"
186
+ ```
187
+
188
+ 3. **Model Preparation**:
189
+ ```powershell
190
+ # Models are included in the repository
191
+ # Verify model files exist
192
+ dir models\*.pth
193
+ ```
194
+
195
+ 4. **Test Installation**:
196
+ ```powershell
197
+ # Quick functionality test
198
+ python -c "from src.model import CropDiseaseResNet50; print('Installation successful!')"
199
+ ```
200
+
201
+ ### Quick Start Guide
202
+
203
+ 1. **Launch API Server**:
204
+ ```powershell
205
+ # Start FastAPI development server
206
+ python main.py
207
+
208
+ # Server will start on http://localhost:8000
209
+ # API documentation available at http://localhost:8000/docs
210
+ ```
211
+
212
+ 2. **Test Disease Detection**:
213
+ ```powershell
214
+ # Using PowerShell with Invoke-RestMethod
215
+ $response = Invoke-RestMethod -Uri "http://localhost:8000/predict" -Method Post -InFile "test_image.jpg" -ContentType "multipart/form-data"
216
+ $response | ConvertTo-Json
217
+ ```
218
+
219
+ 3. **Alternative API Testing**:
220
+ ```powershell
221
+ # Using curl (if available)
222
+ curl -X POST "http://localhost:8000/predict" -H "accept: application/json" -H "Content-Type: multipart/form-data" -F "file=@test_crop_image.jpg"
223
+ ```
224
+
225
+ ## 🔬 Model Training & Evaluation
226
+
227
+ ### Training Dataset Statistics
228
+ - **Total Training Samples**: 14,440 high-quality crop images
229
+ - **Validation Samples**: 3,089 images for model validation
230
+ - **Test Samples**: 3,109 images for final evaluation
231
+ - **Image Resolution**: Variable (224x224 after preprocessing)
232
+ - **Data Augmentation**: Rotation, flip, brightness, contrast adjustments
233
+ - **Last Training Date**: September 9, 2025
234
+
235
+ ### Training Configuration
236
+ ```python
237
+ # Training hyperparameters for v3.0 model
238
+ {
239
+ "epochs": 50,
240
+ "batch_size": 32,
241
+ "learning_rate": 0.001,
242
+ "optimizer": "Adam",
243
+ "scheduler": "ReduceLROnPlateau",
244
+ "early_stopping": "patience=7",
245
+ "data_augmentation": True
246
+ }
247
+ ```
248
+
249
+ ### Model Performance Metrics
250
+ - **Test Accuracy**: 90.09% (v3.0)
251
+ - **Validation Accuracy**: 90.06% (v3.0)
252
+ - **Model Size**: ~100MB (full model), ~25MB (lite variant)
253
+ - **Average Inference Time**: <200ms per image on GPU, <800ms on CPU
254
+ - **Memory Usage**: ~2GB GPU memory (full model), ~500MB (lite model)
255
+
256
+ ### Training Commands
257
+ ```powershell
258
+ # Train new model from scratch
259
+ python src\train.py --epochs 50 --batch_size 32 --lr 0.001 --save_best
260
+
261
+ # Resume training from checkpoint
262
+ python src\train.py --resume models\crop_disease_v2_model.pth --epochs 20
263
+
264
+ # Evaluate existing model
265
+ python src\evaluate.py --model_path models\crop_disease_v3_model.pth --test_data data\test
266
+
267
+ # Generate visual explanations
268
+ python src\explain.py --image_path test_images\tomato_blight.jpg --output_dir outputs\
269
+ ```
270
+
271
+ ## 🌐 API Documentation
272
+
273
+ ### Core Endpoints
274
+
275
+ #### Disease Prediction
276
+ ```http
277
+ POST /predict
278
+ Content-Type: multipart/form-data
279
+ Parameters:
280
+ - file: image file (JPG, PNG, JPEG)
281
+ - explain: boolean (optional, default: true)
282
+ - confidence_threshold: float (optional, default: 0.7)
283
+
284
+ Response Example:
285
+ {
286
+ "disease": "Tomato___Early_blight",
287
+ "disease_display": "Early Blight",
288
+ "crop": "Tomato",
289
+ "confidence": 0.9456,
290
+ "severity": "High",
291
+ "risk_level": 8.5,
292
+ "symptoms": ["Brown spots with concentric rings", "Yellowing leaves"],
293
+ "treatment": {
294
+ "immediate": ["Remove affected leaves", "Apply fungicide"],
295
+ "preventive": ["Improve air circulation", "Avoid overhead watering"]
296
+ },
297
+ "explanation": {
298
+ "gradcam_regions": "base64_image_data",
299
+ "attention_map": "visualization_data"
300
+ },
301
+ "processing_time": 0.184
302
+ }
303
+ ```
304
+
305
+ #### Batch Prediction
306
+ ```http
307
+ POST /predict/batch
308
+ Content-Type: multipart/form-data
309
+ Parameters:
310
+ - files: multiple image files
311
+
312
+ Response: Array of prediction objects
313
+ ```
314
+
315
+ #### Health Check
316
+ ```http
317
+ GET /health
318
+ Response: {
319
+ "status": "healthy",
320
+ "model_loaded": true,
321
+ "version": "3.0",
322
+ "gpu_available": true,
323
+ "memory_usage": "1.2GB"
324
+ }
325
+ ```
326
+
327
+ #### Model Information
328
+ ```http
329
+ GET /model/info
330
+ Response: {
331
+ "version": "3.0",
332
+ "classes": 15,
333
+ "accuracy": 0.9009,
334
+ "training_date": "2025-09-09",
335
+ "supported_crops": ["Pepper (Bell)", "Potato", "Tomato"]
336
+ }
337
+ ```
338
+
339
+ ## 🔍 Visual Explanation System
340
+
341
+ ### Grad-CAM Implementation
342
+ Gradient-weighted Class Activation Mapping highlights the most important regions:
343
+
344
+ ```python
345
+ from src.explain import CropDiseaseExplainer
346
+
347
+ # Initialize explainer with trained model
348
+ explainer = CropDiseaseExplainer(
349
+ model_path="models/crop_disease_v3_model.pth",
350
+ device="cuda" if torch.cuda.is_available() else "cpu"
351
+ )
352
+
353
+ # Generate explanation for image
354
+ explanation = explainer.explain_prediction(
355
+ image_path="test_image.jpg",
356
+ save_path="outputs/explanation.jpg",
357
+ alpha=0.4 # Overlay transparency
358
+ )
359
+ ```
360
+
361
+ ### LIME Integration
362
+ Local Interpretable Model-agnostic Explanations for segment-based analysis:
363
+
364
+ ```python
365
+ # Generate LIME explanation
366
+ lime_explanation = explainer.lime_explanation(
367
+ image_path="test_image.jpg",
368
+ num_samples=1000,
369
+ num_features=100
370
+ )
371
+ ```
372
+
373
+ ## 🧪 Testing & Quality Assurance
374
+
375
+ ### Automated Testing Suite
376
+ ```powershell
377
+ # Run complete test suite
378
+ python -m pytest tests\ -v --cov=src --cov-report=html
379
+
380
+ # Run specific test categories
381
+ python -m pytest tests\test_model.py -v # Model functionality
382
+ python -m pytest tests\test_api.py -v # API endpoints
383
+ python -m pytest tests\test_explain.py -v # Explanation system
384
+ ```
385
+
386
+ ### Manual Testing
387
+ ```powershell
388
+ # Test model loading and inference
389
+ python tests\manual_test_model.py
390
+
391
+ # Test API with sample images
392
+ python tests\manual_test_api.py
393
+
394
+ # Performance benchmarking
395
+ python tests\benchmark_inference.py
396
+ ```
397
+
398
+ ### Integration Testing
399
+ ```powershell
400
+ # End-to-end API testing
401
+ python tests\integration_test.py --host localhost --port 8000
402
+ ```
403
+
404
+ ## 🚀 Production Deployment
405
+
406
+ ### Docker Deployment
407
+ ```powershell
408
+ # Build optimized container
409
+ docker build -t crop-disease-detection-api .\api
410
+
411
+ # Run with GPU support
412
+ docker run --gpus all -p 8000:8000 crop-disease-detection-api
413
+
414
+ # Run CPU-only version
415
+ docker run -p 8000:8000 -e USE_GPU=false crop-disease-detection-api
416
+ ```
417
+
418
+ ### Environment Configuration
419
+ ```powershell
420
+ # Production environment variables
421
+ $env:ENVIRONMENT = "production"
422
+ $env:MODEL_PATH = "models/crop_disease_v3_model.pth"
423
+ $env:CONFIDENCE_THRESHOLD = "0.8"
424
+ $env:ENABLE_EXPLANATIONS = "true"
425
+ $env:MAX_IMAGE_SIZE = "10MB"
426
+ ```
427
+
428
+ ### Production Considerations
429
+ - **Load Balancing**: Use multiple API instances behind load balancer
430
+ - **Monitoring**: Implement comprehensive logging and metrics
431
+ - **Security**: Configure proper CORS, rate limiting, and authentication
432
+ - **Performance**: Use GPU acceleration and model quantization
433
+ - **Scalability**: Consider serverless deployment for variable workloads
434
+
435
+ ## 📈 Performance Optimization
436
+
437
+ ### Memory Optimization Strategies
438
+ ```python
439
+ # Use lightweight model for resource-constrained environments
440
+ from src.model_lite import TinyDiseaseClassifier
441
+
442
+ model = TinyDiseaseClassifier(num_classes=15) # ~5MB model size
443
+ ```
444
+
445
+ ### Speed Optimization
446
+ - **Model Quantization**: INT8 quantization for 4x speed improvement
447
+ - **Batch Processing**: Process multiple images simultaneously
448
+ - **Async API**: Non-blocking request handling
449
+ - **Caching**: Cache frequent predictions and explanations
450
+
451
+ ### Edge Deployment
452
+ - **Model Pruning**: Remove unnecessary parameters
453
+ - **Knowledge Distillation**: Train smaller student models
454
+ - **ONNX Export**: Cross-platform deployment support
455
+
456
+ ## 🤝 Development Workflow
457
+
458
+ ### Contributing Guidelines
459
+ 1. **Fork Repository**: Create personal fork for development
460
+ 2. **Feature Branch**: Create descriptive branch name
461
+ 3. **Code Standards**: Follow PEP 8 and add type hints
462
+ 4. **Testing**: Add comprehensive tests for new features
463
+ 5. **Documentation**: Update README and inline documentation
464
+ 6. **Pull Request**: Submit with detailed description and test results
465
+
466
+ ### Code Quality Standards
467
+ - **Type Hints**: All functions must include type annotations
468
+ - **Docstrings**: Google-style docstrings for all public methods
469
+ - **Testing**: Minimum 80% code coverage required
470
+ - **Linting**: Code must pass flake8 and black formatting
471
+
472
+ ## 📄 License & Legal
473
+
474
+ This project is part of the HackBhoomi2025 agricultural intelligence platform. All rights reserved.
475
+
476
+ ### Model Attribution
477
+ - Base ResNet50 architecture from torchvision (BSD License)
478
+ - Training dataset: Publicly available agricultural disease datasets
479
+ - Custom modifications and enhancements: HackBhoomi2025 team
480
+
481
+ ## 🆘 Troubleshooting Guide
482
+
483
+ ### Common Issues & Solutions
484
+
485
+ 1. **CUDA Out of Memory Error**:
486
+ ```powershell
487
+ # Solution: Use lighter model or reduce batch size
488
+ $env:USE_LITE_MODEL = "true"
489
+ $env:BATCH_SIZE = "8"
490
+ ```
491
+
492
+ 2. **Model Loading Errors**:
493
+ ```powershell
494
+ # Verify model file integrity
495
+ python -c "import torch; torch.load('models/crop_disease_v3_model.pth', map_location='cpu')"
496
+ ```
497
+
498
+ 3. **Low Prediction Accuracy**:
499
+ - Ensure image quality (minimum 224x224 resolution)
500
+ - Verify crop type is supported (Pepper, Potato, Tomato only)
501
+ - Check image format (JPG, PNG supported)
502
+ - Review confidence threshold settings
503
+
504
+ 4. **API Connection Issues**:
505
+ ```powershell
506
+ # Check if server is running
507
+ Invoke-RestMethod -Uri "http://localhost:8000/health" -Method Get
508
+ ```
509
+
510
+ 5. **Dependencies Installation Problems**:
511
+ ```powershell
512
+ # Clean installation
513
+ pip cache purge
514
+ pip install --no-cache-dir -r requirements.txt
515
+ ```
516
+
517
+ ### Performance Troubleshooting
518
+ - **Slow Inference**: Enable GPU acceleration, use lite model variant
519
+ - **High Memory Usage**: Reduce batch size, use memory-optimized model
520
+ - **API Timeout**: Increase request timeout, optimize image preprocessing
521
+
522
+ ### Support & Resources
523
+ - **Issue Tracking**: GitHub Issues for bug reports and feature requests
524
+ - **Documentation**: Comprehensive API documentation at `/docs`
525
+ - **Community**: HackBhoomi2025 development team for technical support
526
+
527
+ ---
528
+
529
+ **📊 Project Statistics:**
530
+ - **Lines of Code**: 2,000+ (main application)
531
+ - **Model Parameters**: 25.6M (ResNet50), 1.2M (Lite variant)
532
+ - **Supported Image Formats**: JPG, JPEG, PNG
533
+ - **API Response Time**: <200ms average
534
+ - **Model Accuracy**: 90.09% (state-of-the-art for agricultural disease detection)
535
+
536
+ *Last Updated: September 2025*
537
+ *Model Version: 3.0*
538
+ *API Version: 2.0.0*
539
+ *Documentation Version: 1.5*
api/Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install system dependencies
7
+ RUN apt-get update && apt-get install -y \
8
+ libglib2.0-0 \
9
+ libsm6 \
10
+ libxext6 \
11
+ libxrender-dev \
12
+ libgomp1 \
13
+ libglib2.0-0 \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ # Copy requirements and install Python dependencies
17
+ COPY requirements.txt .
18
+ RUN pip install --no-cache-dir -r requirements.txt
19
+
20
+ # Copy application code
21
+ COPY . .
22
+ COPY ../src ./src
23
+ COPY ../models ./models
24
+ COPY ../knowledge_base ./knowledge_base
25
+
26
+ # Expose port
27
+ EXPOSE 8000
28
+
29
+ # Health check
30
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
31
+ CMD curl -f http://localhost:8000/health || exit 1
32
+
33
+ # Run the application
34
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
api/__init__.py ADDED
File without changes
api/main.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastAPI Backend for Crop Disease Detection
3
+ Provides REST API endpoints for disease prediction with visual explanations
4
+ """
5
+
6
+ from fastapi import FastAPI, File, UploadFile, HTTPException, Form
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from PIL import Image
12
+ import io
13
+ import json
14
+ import sys
15
+ import os
16
+ from pathlib import Path
17
+ from typing import Optional, Dict, Any
18
+ import tempfile
19
+ import traceback
20
+
21
+ # Add src to path for imports
22
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
23
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
24
+
25
+ try:
26
+ from src.model import CropDiseaseResNet50
27
+ from src.explain import CropDiseaseExplainer
28
+ from src.risk_level import RiskLevelCalculator
29
+ from src.dataset import get_transforms
30
+ except ImportError as e:
31
+ print(f"Import error: {e}")
32
+ print("Make sure all required modules are available")
33
+
34
+ # Initialize FastAPI app
35
+ app = FastAPI(
36
+ title="Crop Disease Detection API",
37
+ description="AI-powered crop disease detection with visual explanations",
38
+ version="2.0.0"
39
+ )
40
+
41
+ # Add CORS middleware
42
+ app.add_middleware(
43
+ CORSMiddleware,
44
+ allow_origins=["*"], # Configure appropriately for production
45
+ allow_credentials=True,
46
+ allow_methods=["*"],
47
+ allow_headers=["*"],
48
+ )
49
+
50
+ # Global variables for model and components
51
+ model = None
52
+ explainer = None
53
+ risk_calculator = None
54
+ class_names = []
55
+ device = None
56
+
57
+ def load_model_and_components():
58
+ """Load trained model and initialize components"""
59
+ global model, explainer, risk_calculator, class_names, device
60
+
61
+ try:
62
+ # Set device
63
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
64
+ print(f"Using device: {device}")
65
+
66
+ # Load class names from V3 model checkpoint (updated for Pepper, Potato, Tomato)
67
+ class_names = [
68
+ 'Pepper__bell___Bacterial_spot',
69
+ 'Pepper__bell___healthy',
70
+ 'Potato___Early_blight',
71
+ 'Potato___healthy',
72
+ 'Potato___Late_blight',
73
+ 'Tomato__Target_Spot',
74
+ 'Tomato__Tomato_mosaic_virus',
75
+ 'Tomato__Tomato_YellowLeaf__Curl_Virus',
76
+ 'Tomato_Bacterial_spot',
77
+ 'Tomato_Early_blight',
78
+ 'Tomato_healthy',
79
+ 'Tomato_Late_blight',
80
+ 'Tomato_Leaf_Mold',
81
+ 'Tomato_Septoria_leaf_spot',
82
+ 'Tomato_Spider_mites_Two_spotted_spider_mite'
83
+ ]
84
+
85
+ # Load trained model
86
+ model_path = 'models/crop_disease_v3_model.pth'
87
+
88
+ if os.path.exists(model_path):
89
+ model = CropDiseaseResNet50(num_classes=len(class_names), pretrained=False)
90
+ checkpoint = torch.load(model_path, map_location=device)
91
+
92
+ # Handle checkpoint format from crop_disease_v3_model.pth
93
+ if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
94
+ state_dict = checkpoint['model_state_dict']
95
+ # Use class names from checkpoint if available
96
+ if 'class_names' in checkpoint:
97
+ class_names = checkpoint['class_names']
98
+ else:
99
+ state_dict = checkpoint
100
+
101
+ model.load_state_dict(state_dict, strict=True)
102
+ model.to(device)
103
+ model.eval()
104
+ print(f"Model loaded from {model_path}")
105
+ else:
106
+ print("Warning: No trained model found. Creating untrained model for API structure.")
107
+ model = CropDiseaseResNet50(num_classes=len(class_names), pretrained=True)
108
+ model.to(device)
109
+ model.eval()
110
+
111
+ # Initialize explainer
112
+ explainer = CropDiseaseExplainer(model, class_names, device)
113
+ print("Explainer initialized")
114
+
115
+ # Initialize risk calculator
116
+ risk_calculator = RiskLevelCalculator()
117
+ print("Risk calculator initialized")
118
+
119
+ return True
120
+
121
+ except Exception as e:
122
+ print(f"Error loading model and components: {e}")
123
+ traceback.print_exc()
124
+ return False
125
+
126
+ @app.on_event("startup")
127
+ async def startup_event():
128
+ """Initialize components on startup"""
129
+ success = load_model_and_components()
130
+ if not success:
131
+ print("Warning: Failed to load some components. API may have limited functionality.")
132
+
133
+ @app.get("/")
134
+ async def root():
135
+ """Root endpoint"""
136
+ return {
137
+ "message": "Crop Disease Detection API",
138
+ "version": "2.0.0",
139
+ "status": "active",
140
+ "endpoints": {
141
+ "predict": "/predict - POST with image file",
142
+ "health": "/health - GET for health check"
143
+ }
144
+ }
145
+
146
+ @app.get("/health")
147
+ async def health_check():
148
+ """Health check endpoint"""
149
+ return {
150
+ "status": "ok",
151
+ "model_loaded": model is not None,
152
+ "explainer_ready": explainer is not None,
153
+ "risk_calculator_ready": risk_calculator is not None,
154
+ "device": str(device) if device else "unknown",
155
+ "classes": len(class_names)
156
+ }
157
+
158
+ @app.post("/predict")
159
+ async def predict_disease(
160
+ file: UploadFile = File(...),
161
+ include_explanation: bool = Form(True),
162
+ weather_humidity: Optional[float] = Form(None),
163
+ weather_temperature: Optional[float] = Form(None),
164
+ weather_rainfall: Optional[float] = Form(None),
165
+ growth_stage: Optional[str] = Form(None)
166
+ ):
167
+ """
168
+ Predict crop disease from uploaded image
169
+
170
+ Args:
171
+ file: Uploaded image file
172
+ include_explanation: Whether to include Grad-CAM explanation
173
+ weather_humidity: Optional humidity percentage
174
+ weather_temperature: Optional temperature in Celsius
175
+ weather_rainfall: Optional rainfall in mm
176
+ growth_stage: Optional crop growth stage
177
+
178
+ Returns:
179
+ JSON response with prediction, risk assessment, and explanation
180
+ """
181
+
182
+ if not model:
183
+ raise HTTPException(status_code=503, detail="Model not loaded")
184
+
185
+ try:
186
+ # Validate file type
187
+ if not file.content_type.startswith('image/'):
188
+ raise HTTPException(status_code=400, detail="File must be an image")
189
+
190
+ # Read and process image
191
+ image_data = await file.read()
192
+ image = Image.open(io.BytesIO(image_data)).convert('RGB')
193
+
194
+ # Preprocess image
195
+ from torchvision import transforms
196
+ transform = transforms.Compose([
197
+ transforms.Resize((224, 224)),
198
+ transforms.ToTensor(),
199
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
200
+ ])
201
+
202
+ input_tensor = transform(image).unsqueeze(0).to(device)
203
+
204
+ # Make prediction
205
+ with torch.no_grad():
206
+ outputs = model(input_tensor)
207
+ probabilities = F.softmax(outputs, dim=1)
208
+ confidence, predicted_idx = torch.max(probabilities, 1)
209
+
210
+ predicted_class = class_names[predicted_idx.item()]
211
+ confidence_score = confidence.item()
212
+
213
+ # Get all class probabilities
214
+ class_probabilities = {
215
+ class_names[i]: probabilities[0, i].item()
216
+ for i in range(len(class_names))
217
+ }
218
+
219
+ # Parse crop and disease from class name (improved for V3 model formats)
220
+ if '___' in predicted_class:
221
+ parts = predicted_class.split('___')
222
+ crop = parts[0]
223
+ disease = parts[1]
224
+ elif '__' in predicted_class:
225
+ parts = predicted_class.split('__', 1) # Split only on first occurrence
226
+ crop = parts[0]
227
+ disease = parts[1]
228
+ elif '_' in predicted_class:
229
+ parts = predicted_class.split('_', 1) # Split only on first occurrence
230
+ crop = parts[0]
231
+ disease = parts[1]
232
+ else:
233
+ crop = "Unknown"
234
+ disease = predicted_class
235
+
236
+ # Calculate risk level
237
+ weather_data = None
238
+ if any([weather_humidity, weather_temperature, weather_rainfall]):
239
+ weather_data = {
240
+ 'humidity': weather_humidity or 50,
241
+ 'temperature': weather_temperature or 25,
242
+ 'rainfall': weather_rainfall or 0
243
+ }
244
+
245
+ risk_assessment = risk_calculator.calculate_enhanced_risk(
246
+ predicted_class, confidence_score, weather_data, growth_stage
247
+ )
248
+
249
+ # Load disease information
250
+ disease_info = {}
251
+ try:
252
+ with open('knowledge_base/disease_info.json', 'r') as f:
253
+ kb_data = json.load(f)
254
+ for d in kb_data['diseases']:
255
+ # Use the class_name field directly instead of constructing it
256
+ if d.get('class_name') == predicted_class:
257
+ disease_info = {
258
+ 'description': d['description'],
259
+ 'symptoms': d['symptoms'],
260
+ 'solutions': d['solutions'],
261
+ 'prevention': d['prevention']
262
+ }
263
+ break
264
+ except Exception as e:
265
+ print(f"Error loading disease info: {e}")
266
+
267
+ # Prepare response
268
+ response = {
269
+ 'predicted_class': predicted_class,
270
+ 'crop': crop,
271
+ 'disease': disease,
272
+ 'confidence': confidence_score,
273
+ 'risk_level': risk_assessment['risk_level'],
274
+ 'class_probabilities': class_probabilities,
275
+ 'risk_assessment': risk_assessment,
276
+ 'disease_info': disease_info,
277
+ 'prediction_timestamp': risk_assessment['assessment_timestamp']
278
+ }
279
+
280
+ # Generate visual explanation if requested
281
+ if include_explanation and explainer:
282
+ try:
283
+ # Save temporary image file
284
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp_file:
285
+ image.save(tmp_file.name)
286
+ tmp_path = tmp_file.name
287
+
288
+ # Generate explanation
289
+ explanation = explainer.explain_prediction(
290
+ tmp_path, return_base64=True
291
+ )
292
+
293
+ if 'error' in explanation:
294
+ response['explanation'] = {
295
+ 'error': explanation['error'],
296
+ 'explanation_image': ''
297
+ }
298
+ else:
299
+ response['explanation'] = {
300
+ 'explanation_image': explanation.get('overlay_base64', ''),
301
+ 'predicted_class': explanation.get('predicted_class', predicted_class),
302
+ 'confidence': explanation.get('confidence', confidence_score),
303
+ 'save_path': explanation.get('save_path', '')
304
+ }
305
+
306
+ # Clean up temporary file
307
+ os.unlink(tmp_path)
308
+
309
+ except Exception as e:
310
+ print(f"Error generating explanation: {e}")
311
+ response['explanation'] = {
312
+ 'error': 'Could not generate visual explanation',
313
+ 'explanation_image': ''
314
+ }
315
+
316
+ return JSONResponse(content=response)
317
+
318
+ except Exception as e:
319
+ print(f"Prediction error: {e}")
320
+ traceback.print_exc()
321
+ raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}")
322
+
323
+ @app.post("/batch_predict")
324
+ async def batch_predict(files: list[UploadFile] = File(...)):
325
+ """
326
+ Predict diseases for multiple images
327
+
328
+ Args:
329
+ files: List of uploaded image files
330
+
331
+ Returns:
332
+ JSON response with predictions for all images
333
+ """
334
+
335
+ if not model:
336
+ raise HTTPException(status_code=503, detail="Model not loaded")
337
+
338
+ if len(files) > 10: # Limit batch size
339
+ raise HTTPException(status_code=400, detail="Maximum 10 images per batch")
340
+
341
+ try:
342
+ predictions = []
343
+
344
+ for i, file in enumerate(files):
345
+ if not file.content_type.startswith('image/'):
346
+ predictions.append({
347
+ 'filename': file.filename,
348
+ 'error': 'Invalid file type'
349
+ })
350
+ continue
351
+
352
+ try:
353
+ # Process individual image
354
+ image_data = await file.read()
355
+ image = Image.open(io.BytesIO(image_data)).convert('RGB')
356
+
357
+ # Make prediction (simplified for batch processing)
358
+ from torchvision import transforms
359
+ transform = transforms.Compose([
360
+ transforms.Resize((224, 224)),
361
+ transforms.ToTensor(),
362
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
363
+ ])
364
+
365
+ input_tensor = transform(image).unsqueeze(0).to(device)
366
+
367
+ with torch.no_grad():
368
+ outputs = model(input_tensor)
369
+ probabilities = F.softmax(outputs, dim=1)
370
+ confidence, predicted_idx = torch.max(probabilities, 1)
371
+
372
+ predicted_class = class_names[predicted_idx.item()]
373
+ confidence_score = confidence.item()
374
+
375
+ # Calculate basic risk
376
+ risk_level = risk_calculator.calculate_base_risk(predicted_class, confidence_score)
377
+
378
+ predictions.append({
379
+ 'filename': file.filename,
380
+ 'predicted_class': predicted_class,
381
+ 'confidence': confidence_score,
382
+ 'risk_level': risk_level
383
+ })
384
+
385
+ except Exception as e:
386
+ predictions.append({
387
+ 'filename': file.filename,
388
+ 'error': str(e)
389
+ })
390
+
391
+ # Generate summary
392
+ summary = risk_calculator.get_risk_summary([
393
+ p for p in predictions if 'error' not in p
394
+ ])
395
+
396
+ return JSONResponse(content={
397
+ 'predictions': predictions,
398
+ 'summary': summary,
399
+ 'total_processed': len(files),
400
+ 'successful_predictions': len([p for p in predictions if 'error' not in p])
401
+ })
402
+
403
+ except Exception as e:
404
+ raise HTTPException(status_code=500, detail=f"Batch prediction failed: {str(e)}")
405
+
406
+ @app.get("/classes")
407
+ async def get_classes():
408
+ """Get list of supported disease classes"""
409
+ return {
410
+ 'classes': class_names,
411
+ 'total_classes': len(class_names),
412
+ 'crops': ['Pepper', 'Potato', 'Tomato']
413
+ }
414
+
415
+ @app.get("/model_info")
416
+ async def get_model_info():
417
+ """Get model architecture and training information"""
418
+ return {
419
+ 'model_name': 'CropDiseaseResNet50',
420
+ 'architecture': 'ResNet50 with custom classifier',
421
+ 'input_size': [3, 224, 224],
422
+ 'num_classes': len(class_names),
423
+ 'device': str(device),
424
+ 'model_file': 'crop_disease_v3_model.pth',
425
+ 'features': {
426
+ 'backbone': 'ResNet50 (pretrained)',
427
+ 'classifier': 'Custom sequential layers with dropout',
428
+ 'grad_cam': 'Available for visual explanations',
429
+ 'risk_assessment': 'Multi-factor risk calculation'
430
+ },
431
+ 'capabilities': [
432
+ 'Disease classification',
433
+ 'Visual explanations (Grad-CAM)',
434
+ 'Risk level assessment',
435
+ 'Treatment recommendations',
436
+ 'Batch processing'
437
+ ]
438
+ }
439
+
440
+ @app.get("/disease_info/{crop}/{disease}")
441
+ async def get_disease_info(crop: str, disease: str):
442
+ """Get detailed information about a specific disease"""
443
+
444
+ try:
445
+ with open('knowledge_base/disease_info.json', 'r') as f:
446
+ kb_data = json.load(f)
447
+
448
+ for d in kb_data['diseases']:
449
+ if d['crop'].lower() == crop.lower() and d['disease'].lower() == disease.lower():
450
+ return d
451
+
452
+ raise HTTPException(status_code=404, detail="Disease information not found")
453
+
454
+ except FileNotFoundError:
455
+ raise HTTPException(status_code=503, detail="Knowledge base not available")
456
+ except Exception as e:
457
+ raise HTTPException(status_code=500, detail=f"Error retrieving disease info: {str(e)}")
458
+
459
+ if __name__ == "__main__":
460
+ import uvicorn
461
+
462
+ print("🚀 Starting Crop Disease Detection API...")
463
+ print("📊 Loading model and components...")
464
+
465
+ # Load components
466
+ success = load_model_and_components()
467
+ if success:
468
+ print("✅ All components loaded successfully!")
469
+ else:
470
+ print("⚠️ Some components failed to load")
471
+
472
+ print("🌐 Starting server on http://localhost:4333")
473
+ print("📖 API documentation available at http://localhost:4333/docs")
474
+
475
+ uvicorn.run(app, host="localhost", port=4333)
api/main_optimized.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Memory-optimized FastAPI Backend for Crop Disease Detection
3
+ Optimized to use <512MB RAM
4
+ """
5
+
6
+ from fastapi import FastAPI, File, UploadFile, HTTPException, Form
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from PIL import Image
12
+ import io
13
+ import json
14
+ import sys
15
+ import os
16
+ from pathlib import Path
17
+ from typing import Optional, Dict, Any
18
+ import tempfile
19
+ import traceback
20
+ import gc
21
+ import psutil
22
+
23
+ # Add src to path for imports
24
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
25
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
26
+
27
+ try:
28
+ from src.model import CropDiseaseResNet50Lite
29
+ from src.explain_lite import CropDiseaseExplainerLite
30
+ from src.risk_level import RiskLevelCalculator
31
+ from src.dataset import get_inference_transforms
32
+ except ImportError as e:
33
+ print(f"Import error: {e}")
34
+ print("Make sure all required modules are available")
35
+
36
+ # Initialize FastAPI app
37
+ app = FastAPI(
38
+ title="Crop Disease Detection API (Optimized)",
39
+ description="Memory-optimized AI-powered crop disease detection",
40
+ version="2.1.0"
41
+ )
42
+
43
+ # Add CORS middleware
44
+ app.add_middleware(
45
+ CORSMiddleware,
46
+ allow_origins=["*"],
47
+ allow_credentials=True,
48
+ allow_methods=["*"],
49
+ allow_headers=["*"],
50
+ )
51
+
52
+ # Global variables for model and components
53
+ model = None
54
+ explainer = None
55
+ risk_calculator = None
56
+ class_names = []
57
+ device = None
58
+ transforms = None
59
+
60
+ def get_memory_usage():
61
+ """Get current memory usage in MB"""
62
+ process = psutil.Process(os.getpid())
63
+ memory_info = process.memory_info()
64
+ return memory_info.rss / 1024 / 1024 # Convert to MB
65
+
66
+ def optimize_memory():
67
+ """Force garbage collection and clear GPU cache"""
68
+ gc.collect()
69
+ if torch.cuda.is_available():
70
+ torch.cuda.empty_cache()
71
+
72
+ def load_model_and_components():
73
+ """Load trained model and initialize components with memory optimization"""
74
+ global model, explainer, risk_calculator, class_names, device, transforms
75
+
76
+ try:
77
+ # Set device - prefer CPU for memory efficiency
78
+ if torch.cuda.is_available() and torch.cuda.get_device_properties(0).total_memory > 2e9:
79
+ device = torch.device('cuda')
80
+ else:
81
+ device = torch.device('cpu')
82
+ print(f"Using device: {device}")
83
+
84
+ # Optimized class names (reduced set for memory efficiency)
85
+ class_names = [
86
+ 'Pepper_Bacterial_spot',
87
+ 'Pepper_healthy',
88
+ 'Potato_Early_blight',
89
+ 'Potato_healthy',
90
+ 'Potato_Late_blight',
91
+ 'Tomato_Target_Spot',
92
+ 'Tomato_mosaic_virus',
93
+ 'Tomato_Yellow_Leaf_Curl',
94
+ 'Tomato_Bacterial_spot',
95
+ 'Tomato_Early_blight',
96
+ 'Tomato_healthy',
97
+ 'Tomato_Late_blight',
98
+ 'Tomato_Leaf_Mold',
99
+ 'Tomato_Septoria_leaf_spot',
100
+ 'Tomato_Spider_mites'
101
+ ]
102
+
103
+ # Load model with memory optimization
104
+ model_path = 'models/crop_disease_v3_model.pth'
105
+
106
+ if os.path.exists(model_path):
107
+ # Use lite version of model
108
+ model = CropDiseaseResNet50Lite(num_classes=len(class_names), pretrained=False)
109
+
110
+ # Load with memory mapping for large files
111
+ checkpoint = torch.load(model_path, map_location=device, weights_only=True)
112
+
113
+ if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
114
+ state_dict = checkpoint['model_state_dict']
115
+ if 'class_names' in checkpoint:
116
+ class_names = checkpoint['class_names']
117
+ else:
118
+ state_dict = checkpoint
119
+
120
+ # Load state dict and immediately clear checkpoint from memory
121
+ model.load_state_dict(state_dict, strict=False)
122
+ del checkpoint, state_dict
123
+ optimize_memory()
124
+
125
+ model.to(device)
126
+ model.eval()
127
+
128
+ # Enable memory efficient mode
129
+ if hasattr(model, 'set_memory_efficient'):
130
+ model.set_memory_efficient(True)
131
+
132
+ print(f"Lite model loaded from {model_path}")
133
+ else:
134
+ print("Warning: No trained model found. Creating lite model.")
135
+ model = CropDiseaseResNet50Lite(num_classes=len(class_names), pretrained=True)
136
+ model.to(device)
137
+ model.eval()
138
+
139
+ # Initialize lite explainer only if needed
140
+ explainer = CropDiseaseExplainerLite(model, class_names, device)
141
+ print("Lite explainer initialized")
142
+
143
+ # Initialize risk calculator
144
+ risk_calculator = RiskLevelCalculator()
145
+ print("Risk calculator initialized")
146
+
147
+ # Pre-load transforms
148
+ transforms = get_inference_transforms(input_size=224)
149
+
150
+ # Force memory cleanup
151
+ optimize_memory()
152
+
153
+ memory_usage = get_memory_usage()
154
+ print(f"Memory usage after loading: {memory_usage:.1f} MB")
155
+
156
+ return True
157
+
158
+ except Exception as e:
159
+ print(f"Error loading model and components: {e}")
160
+ traceback.print_exc()
161
+ return False
162
+
163
+ @app.on_event("startup")
164
+ async def startup_event():
165
+ """Initialize components on startup"""
166
+ print("Starting optimized disease detection API...")
167
+ success = load_model_and_components()
168
+ if success:
169
+ print("✅ All components loaded successfully")
170
+ else:
171
+ print("⚠️ Failed to load some components")
172
+
173
+ @app.get("/")
174
+ async def root():
175
+ """Root endpoint"""
176
+ memory_usage = get_memory_usage()
177
+ return {
178
+ "message": "Crop Disease Detection API (Optimized)",
179
+ "version": "2.1.0",
180
+ "status": "active",
181
+ "memory_usage_mb": f"{memory_usage:.1f}",
182
+ "optimization": "Memory optimized for <512MB usage",
183
+ "endpoints": {
184
+ "predict": "/predict - POST with image file",
185
+ "health": "/health - GET for health check",
186
+ "memory": "/memory - GET memory usage info"
187
+ }
188
+ }
189
+
190
+ @app.get("/health")
191
+ async def health_check():
192
+ """Health check endpoint with memory info"""
193
+ memory_usage = get_memory_usage()
194
+ return {
195
+ "status": "ok",
196
+ "model_loaded": model is not None,
197
+ "explainer_loaded": explainer is not None,
198
+ "device": str(device) if device else "unknown",
199
+ "memory_usage_mb": f"{memory_usage:.1f}",
200
+ "memory_optimized": memory_usage < 512
201
+ }
202
+
203
+ @app.get("/memory")
204
+ async def memory_info():
205
+ """Get detailed memory usage information"""
206
+ memory_usage = get_memory_usage()
207
+ process = psutil.Process(os.getpid())
208
+ memory_info = process.memory_info()
209
+
210
+ return {
211
+ "memory_usage_mb": f"{memory_usage:.1f}",
212
+ "memory_percent": f"{process.memory_percent():.1f}%",
213
+ "rss_mb": f"{memory_info.rss / 1024 / 1024:.1f}",
214
+ "vms_mb": f"{memory_info.vms / 1024 / 1024:.1f}",
215
+ "available_memory_mb": f"{psutil.virtual_memory().available / 1024 / 1024:.1f}",
216
+ "gpu_memory_allocated": f"{torch.cuda.memory_allocated() / 1024 / 1024:.1f}" if torch.cuda.is_available() else "N/A",
217
+ "optimization_status": "Optimized" if memory_usage < 512 else "Needs optimization"
218
+ }
219
+
220
+ @app.post("/predict")
221
+ async def predict_disease(
222
+ file: UploadFile = File(...),
223
+ include_explanation: bool = Form(False),
224
+ weather_humidity: Optional[float] = Form(None),
225
+ weather_temperature: Optional[float] = Form(None),
226
+ weather_rainfall: Optional[float] = Form(None)
227
+ ):
228
+ """
229
+ Predict plant disease from uploaded image (memory optimized)
230
+ """
231
+ if model is None:
232
+ raise HTTPException(status_code=503, detail="Model not loaded")
233
+
234
+ try:
235
+ # Memory optimization: track usage
236
+ initial_memory = get_memory_usage()
237
+
238
+ # Read and validate image with memory limits
239
+ contents = await file.read()
240
+ if len(contents) > 5 * 1024 * 1024: # 5MB limit
241
+ raise HTTPException(status_code=413, detail="Image too large. Maximum size: 5MB")
242
+
243
+ # Process image with memory optimization
244
+ image = Image.open(io.BytesIO(contents))
245
+
246
+ # Convert to RGB if needed
247
+ if image.mode != 'RGB':
248
+ image = image.convert('RGB')
249
+
250
+ # Resize to reduce memory usage
251
+ max_size = 224
252
+ if image.size[0] > max_size or image.size[1] > max_size:
253
+ image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
254
+
255
+ # Apply transforms
256
+ if transforms is None:
257
+ transforms_fn = get_inference_transforms(input_size=224)
258
+ else:
259
+ transforms_fn = transforms
260
+
261
+ input_tensor = transforms_fn(image).unsqueeze(0).to(device)
262
+
263
+ # Clear image from memory
264
+ del image, contents
265
+ optimize_memory()
266
+
267
+ # Prediction with memory optimization
268
+ with torch.no_grad():
269
+ outputs = model(input_tensor)
270
+ probabilities = F.softmax(outputs, dim=1)
271
+ confidence, predicted_idx = torch.max(probabilities, 1)
272
+
273
+ predicted_class = class_names[predicted_idx.item()]
274
+ confidence_score = confidence.item()
275
+
276
+ # Get class probabilities (top 3 only to save memory)
277
+ class_probs = {}
278
+ top_probs, top_indices = torch.topk(probabilities[0], min(3, len(class_names)))
279
+ for i, (prob, idx) in enumerate(zip(top_probs, top_indices)):
280
+ class_probs[class_names[idx.item()]] = prob.item()
281
+
282
+ # Clear tensors
283
+ del input_tensor, outputs, probabilities
284
+ optimize_memory()
285
+
286
+ # Load disease information efficiently
287
+ disease_info = get_disease_info_lite(predicted_class)
288
+
289
+ # Calculate risk assessment
290
+ weather_data = {}
291
+ if weather_humidity is not None:
292
+ weather_data['humidity'] = weather_humidity
293
+ if weather_temperature is not None:
294
+ weather_data['temperature'] = weather_temperature
295
+ if weather_rainfall is not None:
296
+ weather_data['rainfall'] = weather_rainfall
297
+
298
+ risk_assessment = risk_calculator.calculate_risk(
299
+ predicted_class, confidence_score, weather_data
300
+ ) if risk_calculator else {"overall_risk": "unknown", "risk_factors": [], "recommendations": []}
301
+
302
+ # Generate explanation only if requested and memory allows
303
+ explanation_data = {}
304
+ current_memory = get_memory_usage()
305
+
306
+ if include_explanation and current_memory < 400 and explainer: # Only if we have memory headroom
307
+ try:
308
+ explanation_data = explainer.generate_explanation_lite(
309
+ await file.read(), predicted_class
310
+ )
311
+ except Exception as e:
312
+ print(f"Explanation generation failed: {e}")
313
+ explanation_data = {"error": "Explanation unavailable due to memory constraints"}
314
+ elif include_explanation:
315
+ explanation_data = {"error": "Explanation disabled due to memory constraints"}
316
+
317
+ # Final memory cleanup
318
+ optimize_memory()
319
+ final_memory = get_memory_usage()
320
+
321
+ # Prepare response
322
+ result = {
323
+ "predicted_class": predicted_class,
324
+ "confidence": confidence_score,
325
+ "class_probabilities": class_probs,
326
+ "disease_info": disease_info,
327
+ "risk_assessment": risk_assessment,
328
+ "crop": extract_crop_name(predicted_class),
329
+ "memory_usage": {
330
+ "initial_mb": f"{initial_memory:.1f}",
331
+ "final_mb": f"{final_memory:.1f}",
332
+ "memory_optimized": final_memory < 512
333
+ }
334
+ }
335
+
336
+ if explanation_data:
337
+ result["explanation"] = explanation_data
338
+
339
+ return JSONResponse(content=result)
340
+
341
+ except Exception as e:
342
+ # Cleanup on error
343
+ optimize_memory()
344
+ print(f"Prediction error: {e}")
345
+ traceback.print_exc()
346
+ raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}")
347
+
348
+ def get_disease_info_lite(disease_class: str) -> Dict[str, Any]:
349
+ """Get disease information with memory optimization"""
350
+ try:
351
+ # Load only essential disease info to save memory
352
+ knowledge_base_path = Path(__file__).parent.parent / "knowledge_base" / "disease_info.json"
353
+
354
+ if knowledge_base_path.exists():
355
+ with open(knowledge_base_path, 'r') as f:
356
+ all_disease_info = json.load(f)
357
+
358
+ # Get specific disease info
359
+ disease_info = all_disease_info.get(disease_class, {})
360
+
361
+ # Return only essential fields to save memory
362
+ return {
363
+ "symptoms": disease_info.get("symptoms", [])[:3], # Limit to 3 symptoms
364
+ "solutions": disease_info.get("solutions", [])[:3], # Limit to 3 solutions
365
+ "prevention": disease_info.get("prevention", [])[:3], # Limit to 3 prevention methods
366
+ "description": disease_info.get("description", "No description available")[:200] # Truncate description
367
+ }
368
+ except Exception as e:
369
+ print(f"Error loading disease info: {e}")
370
+
371
+ return {
372
+ "symptoms": ["Symptoms information unavailable"],
373
+ "solutions": ["Please consult agricultural expert"],
374
+ "prevention": ["Follow general plant care guidelines"],
375
+ "description": "Disease information unavailable"
376
+ }
377
+
378
+ def extract_crop_name(disease_class: str) -> str:
379
+ """Extract crop name from disease class"""
380
+ if disease_class.startswith(('Pepper', 'pepper')):
381
+ return "Pepper"
382
+ elif disease_class.startswith(('Potato', 'potato')):
383
+ return "Potato"
384
+ elif disease_class.startswith(('Tomato', 'tomato')):
385
+ return "Tomato"
386
+ else:
387
+ return "Unknown"
388
+
389
+ if __name__ == "__main__":
390
+ import uvicorn
391
+
392
+ print("Starting memory-optimized disease detection API...")
393
+ print("Target: <512MB RAM usage")
394
+
395
+ uvicorn.run(
396
+ app,
397
+ host="0.0.0.0",
398
+ port=8001,
399
+ workers=1, # Single worker to save memory
400
+ limit_concurrency=2 # Limit concurrent requests
401
+ )
api/requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0
3
+ python-multipart==0.0.6
4
+ pillow==10.0.1
5
+ torch==2.1.0
6
+ torchvision==0.16.0
7
+ numpy==1.24.3
8
+ opencv-python==4.8.1.78
9
+ matplotlib==3.7.2
10
+ scikit-learn==1.3.0
11
+ seaborn==0.12.2
12
+ python-jose[cryptography]==3.3.0
13
+ passlib[bcrypt]==1.7.4
data/dataset_stats.txt ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dataset Organization Statistics
2
+ ========================================
3
+
4
+ Corn___Cercospora_leaf_spot_Gray_leaf_spot:
5
+ Total: 5
6
+ Train: 3
7
+ Val: 1
8
+ Test: 1
9
+
10
+ Corn___Common_rust:
11
+ Total: 5
12
+ Train: 3
13
+ Val: 1
14
+ Test: 1
15
+
16
+ Corn___Northern_Leaf_Blight:
17
+ Total: 5
18
+ Train: 3
19
+ Val: 1
20
+ Test: 1
21
+
22
+ Corn___healthy:
23
+ Total: 5
24
+ Train: 3
25
+ Val: 1
26
+ Test: 1
27
+
28
+ Potato___Early_Blight:
29
+ Total: 5
30
+ Train: 3
31
+ Val: 1
32
+ Test: 1
33
+
34
+ Potato___Late_Blight:
35
+ Total: 5
36
+ Train: 3
37
+ Val: 1
38
+ Test: 1
39
+
40
+ Potato___healthy:
41
+ Total: 5
42
+ Train: 3
43
+ Val: 1
44
+ Test: 1
45
+
46
+ Tomato___Bacterial_spot:
47
+ Total: 5
48
+ Train: 3
49
+ Val: 1
50
+ Test: 1
51
+
52
+ Tomato___Early_blight:
53
+ Total: 5
54
+ Train: 3
55
+ Val: 1
56
+ Test: 1
57
+
58
+ Tomato___Late_blight:
59
+ Total: 5
60
+ Train: 3
61
+ Val: 1
62
+ Test: 1
63
+
64
+ Tomato___Leaf_Mold:
65
+ Total: 5
66
+ Train: 3
67
+ Val: 1
68
+ Test: 1
69
+
70
+ Tomato___Septoria_leaf_spot:
71
+ Total: 5
72
+ Train: 3
73
+ Val: 1
74
+ Test: 1
75
+
76
+ Tomato___Spider_mites_Two_spotted_spider_mite:
77
+ Total: 5
78
+ Train: 3
79
+ Val: 1
80
+ Test: 1
81
+
82
+ Tomato___Target_Spot:
83
+ Total: 5
84
+ Train: 3
85
+ Val: 1
86
+ Test: 1
87
+
88
+ Tomato___Tomato_Yellow_Leaf_Curl_Virus:
89
+ Total: 5
90
+ Train: 3
91
+ Val: 1
92
+ Test: 1
93
+
94
+ Tomato___Tomato_mosaic_virus:
95
+ Total: 5
96
+ Train: 3
97
+ Val: 1
98
+ Test: 1
99
+
100
+ Tomato___healthy:
101
+ Total: 5
102
+ Train: 3
103
+ Val: 1
104
+ Test: 1
105
+
106
+ Summary:
107
+ Total images: 85
108
+ Training: 51 (60.0%)
109
+ Validation: 17 (20.0%)
110
+ Test: 17 (20.0%)
data/processed/dataset_info.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset_name": "Crop Disease Detection - Retrained",
3
+ "num_classes": 15,
4
+ "class_names": [
5
+ "Pepper__bell___Bacterial_spot",
6
+ "Pepper__bell___healthy",
7
+ "Potato___Early_blight",
8
+ "Potato___Late_blight",
9
+ "Potato___healthy",
10
+ "Tomato_Bacterial_spot",
11
+ "Tomato_Early_blight",
12
+ "Tomato_Late_blight",
13
+ "Tomato_Leaf_Mold",
14
+ "Tomato_Septoria_leaf_spot",
15
+ "Tomato_Spider_mites_Two_spotted_spider_mite",
16
+ "Tomato__Target_Spot",
17
+ "Tomato__Tomato_YellowLeaf__Curl_Virus",
18
+ "Tomato__Tomato_mosaic_virus",
19
+ "Tomato_healthy"
20
+ ],
21
+ "class_to_idx": {
22
+ "Pepper__bell___Bacterial_spot": 0,
23
+ "Pepper__bell___healthy": 1,
24
+ "Potato___Early_blight": 2,
25
+ "Potato___Late_blight": 3,
26
+ "Potato___healthy": 4,
27
+ "Tomato_Bacterial_spot": 5,
28
+ "Tomato_Early_blight": 6,
29
+ "Tomato_Late_blight": 7,
30
+ "Tomato_Leaf_Mold": 8,
31
+ "Tomato_Septoria_leaf_spot": 9,
32
+ "Tomato_Spider_mites_Two_spotted_spider_mite": 10,
33
+ "Tomato__Target_Spot": 11,
34
+ "Tomato__Tomato_YellowLeaf__Curl_Virus": 12,
35
+ "Tomato__Tomato_mosaic_virus": 13,
36
+ "Tomato_healthy": 14
37
+ },
38
+ "split_ratios": {
39
+ "train": 0.7,
40
+ "val": 0.15,
41
+ "test": 0.15
42
+ },
43
+ "split_stats": {
44
+ "train": {
45
+ "Pepper__bell___Bacterial_spot": 697,
46
+ "Pepper__bell___healthy": 1034,
47
+ "Potato___Early_blight": 700,
48
+ "Potato___healthy": 106,
49
+ "Potato___Late_blight": 700,
50
+ "Tomato__Target_Spot": 982,
51
+ "Tomato__Tomato_mosaic_virus": 261,
52
+ "Tomato__Tomato_YellowLeaf__Curl_Virus": 2245,
53
+ "Tomato_Bacterial_spot": 1488,
54
+ "Tomato_Early_blight": 700,
55
+ "Tomato_healthy": 1113,
56
+ "Tomato_Late_blight": 1336,
57
+ "Tomato_Leaf_Mold": 666,
58
+ "Tomato_Septoria_leaf_spot": 1239,
59
+ "Tomato_Spider_mites_Two_spotted_spider_mite": 1173
60
+ },
61
+ "val": {
62
+ "Pepper__bell___Bacterial_spot": 149,
63
+ "Pepper__bell___healthy": 221,
64
+ "Potato___Early_blight": 150,
65
+ "Potato___healthy": 22,
66
+ "Potato___Late_blight": 150,
67
+ "Tomato__Target_Spot": 210,
68
+ "Tomato__Tomato_mosaic_virus": 55,
69
+ "Tomato__Tomato_YellowLeaf__Curl_Virus": 481,
70
+ "Tomato_Bacterial_spot": 319,
71
+ "Tomato_Early_blight": 150,
72
+ "Tomato_healthy": 238,
73
+ "Tomato_Late_blight": 286,
74
+ "Tomato_Leaf_Mold": 142,
75
+ "Tomato_Septoria_leaf_spot": 265,
76
+ "Tomato_Spider_mites_Two_spotted_spider_mite": 251
77
+ },
78
+ "test": {
79
+ "Pepper__bell___Bacterial_spot": 151,
80
+ "Pepper__bell___healthy": 223,
81
+ "Potato___Early_blight": 150,
82
+ "Potato___healthy": 24,
83
+ "Potato___Late_blight": 150,
84
+ "Tomato__Target_Spot": 212,
85
+ "Tomato__Tomato_mosaic_virus": 57,
86
+ "Tomato__Tomato_YellowLeaf__Curl_Virus": 482,
87
+ "Tomato_Bacterial_spot": 320,
88
+ "Tomato_Early_blight": 150,
89
+ "Tomato_healthy": 240,
90
+ "Tomato_Late_blight": 287,
91
+ "Tomato_Leaf_Mold": 144,
92
+ "Tomato_Septoria_leaf_spot": 267,
93
+ "Tomato_Spider_mites_Two_spotted_spider_mite": 252
94
+ }
95
+ },
96
+ "total_images": {
97
+ "train": 14440,
98
+ "val": 3089,
99
+ "test": 3109,
100
+ "total": 20638
101
+ }
102
+ }
data/processed/test/Pepper__bell___Bacterial_spot/01940b6d-7dea-4889-a7b8-a35f4e9bba34___NREC_B.Spot 9120.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/0719e8e8-c1ae-4d5a-b29c-dbadc36d13f3___NREC_B.Spot 1947.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/080b107a-192f-40ce-8942-d8ccca8dfc52___NREC_B.Spot 1872.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/0915c9a9-25b0-4728-be01-86e5cecb57df___NREC_B.Spot 1816.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/09ae534a-e931-4f83-8545-cf330dfebae9___NREC_B.Spot 9210.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/0c99cb45-b4e0-4ade-bba5-fab3b678f0bb___JR_B.Spot 8912.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/0d2635e7-df23-4ceb-b3ba-3af50bb58357___NREC_B.Spot 1874.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/0fe8a42b-b943-43d6-88c4-78abdcbfe02d___NREC_B.Spot 9236.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/11e6ce0a-8511-485a-b22c-21b978d28e5e___JR_B.Spot 3383.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/12c953a0-bd3e-45d0-aaea-5139f5d63e01___JR_B.Spot 8863.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/12f47cff-1a75-47ec-99d2-01720786e478___NREC_B.Spot 1859.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/144ae14f-dbf4-4dfa-9d47-98fb33009a48___JR_B.Spot 3364.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/146d24cd-0c7e-458b-9f82-7b27525b04e4___JR_B.Spot 3329.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/168a11c9-159b-468c-a6d9-07d0b61c42c9___JR_B.Spot 3193.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/17557939-f9e2-435a-a4e7-f4d3cff8aa8b___JR_B.Spot 3107.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/176a9f0a-b815-4e4d-88d4-0960610f723b___NREC_B.Spot 1820.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/179067a6-1012-4a23-8f09-e413300e9f32___NREC_B.Spot 9085.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/188f102a-6f64-4180-9d38-f98b61aaec60___JR_B.Spot 9014.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/18df58d7-c6ac-48e5-8cb0-596b70252a8e___NREC_B.Spot 9153.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/1b8d3e98-43d9-441d-93ef-a359e6e9ddc2___NREC_B.Spot 9052.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/1f838b54-c372-4b51-b398-6988377b2218___JR_B.Spot 8977.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/2433614e-78d3-45ae-b719-59efb0397572___JR_B.Spot 8966.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/260e0075-466d-4aa2-8ad6-825cce898cdb___JR_B.Spot 9065.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/28c448aa-4d2c-4a96-baf0-e7ed99ae2495___JR_B.Spot 3385.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/29896da5-a228-4e67-8d23-930c40ebb03b___JR_B.Spot 8920.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/2b562d10-5ccf-4f20-aadc-2e1480bd303e___JR_B.Spot 3256.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/2b710a19-f4cd-4bcf-afbf-e48face96045___NREC_B.Spot 1931.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/2cdea224-6f10-4a3d-a3f1-1debd1a42640___JR_B.Spot 8926.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/3233e1db-99e9-4107-8711-c8b28a8bfda1___JR_B.Spot 3165.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/33763f26-1135-458f-8b2b-34897b8bb647___JR_B.Spot 3323.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/35b1d344-1bb9-4976-9ba2-de290dd167dd___NREC_B.Spot 9056.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/3610f357-8b9f-4f98-8e7f-c4297daf3b20___NREC_B.Spot 1971.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/36223df9-bc20-4672-8938-6f1f60fb4a0e___NREC_B.Spot 1873.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/3700475e-b0b1-4b8b-90bd-374be22dbfd0___NREC_B.Spot 1860.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/378ed86f-f435-44e8-93b3-3e05aa569492___JR_B.Spot 8864.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/37e9bf3a-85da-4114-8e69-185498b9a9af___JR_B.Spot 3337.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/37fbacb5-ed9d-48f2-bf19-37656ff7c317___JR_B.Spot 3307.JPG ADDED
data/processed/test/Pepper__bell___Bacterial_spot/389480c3-0209-45e3-b3ee-2447b22de68f___JR_B.Spot 9016.JPG ADDED