yasyn14 commited on
Commit
7a11d7a
·
1 Parent(s): 32e2644

getting strted

Browse files
Files changed (12) hide show
  1. .dockerignore +65 -0
  2. .gitattributes +0 -35
  3. .gitignore +102 -0
  4. .huggingface.yaml +22 -0
  5. Dockerfile +35 -0
  6. README.md +164 -7
  7. app.py +11 -0
  8. deployment_guide.md +213 -0
  9. huggingface.yml +1 -0
  10. main.py +285 -0
  11. requirements.txt +0 -0
  12. runtime.txt +1 -0
.dockerignore ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Git
2
+ .git
3
+ .gitignore
4
+
5
+ # Python
6
+ __pycache__
7
+ *.pyc
8
+ *.pyo
9
+ *.pyd
10
+ .Python
11
+ pip-log.txt
12
+ pip-delete-this-directory.txt
13
+ .tox
14
+ .coverage
15
+ .coverage.*
16
+ .pytest_cache
17
+ nosetests.xml
18
+ coverage.xml
19
+ *.cover
20
+ *.log
21
+
22
+ # Virtual environments
23
+ .env
24
+ .venv
25
+ env/
26
+ venv/
27
+ ENV/
28
+ env.bak/
29
+ venv.bak/
30
+
31
+ # IDEs and editors
32
+ .vscode
33
+ .idea
34
+ *.swp
35
+ *.swo
36
+ *~
37
+
38
+ # OS
39
+ .DS_Store
40
+ Thumbs.db
41
+
42
+ # Documentation
43
+ README.md
44
+ *.md
45
+ docs/
46
+
47
+ # Test files
48
+ tests/
49
+ test_images/
50
+ *.jpg
51
+ *.png
52
+ *.jpeg
53
+ *.bmp
54
+ *.gif
55
+ *.tiff
56
+ *.webp
57
+
58
+ # Logs
59
+ *.log
60
+ logs/
61
+
62
+ # Model cache (will be downloaded during build)
63
+ huggingface/
64
+ .huggingface/
65
+ /tmp/huggingface/
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ pip-wheel-metadata/
20
+ share/python-wheels/
21
+ *.egg-info/
22
+ .installed.cfg
23
+ *.egg
24
+ MANIFEST
25
+
26
+ # PyInstaller
27
+ *.manifest
28
+ *.spec
29
+
30
+ # Installer logs
31
+ pip-log.txt
32
+ pip-delete-this-directory.txt
33
+
34
+ # Unit test / coverage reports
35
+ htmlcov/
36
+ .tox/
37
+ .nox/
38
+ .coverage
39
+ .coverage.*
40
+ .cache
41
+ nosetests.xml
42
+ coverage.xml
43
+ *.cover
44
+ *.py,cover
45
+ .hypothesis/
46
+ .pytest_cache/
47
+
48
+ # Virtual environments
49
+ .env
50
+ .venv
51
+ env/
52
+ venv/
53
+ ENV/
54
+ env.bak/
55
+ venv.bak/
56
+
57
+ # IDEs
58
+ .vscode/
59
+ .idea/
60
+ *.swp
61
+ *.swo
62
+ *~
63
+
64
+ # OS
65
+ .DS_Store
66
+ .DS_Store?
67
+ ._*
68
+ .Spotlight-V100
69
+ .Trashes
70
+ ehthumbs.db
71
+ Thumbs.db
72
+
73
+ # Logs
74
+ *.log
75
+ logs/
76
+
77
+ # Model files (cached)
78
+ /tmp/
79
+ *.keras
80
+ *.h5
81
+ *.pb
82
+ *.ckpt
83
+
84
+ # Hugging Face cache
85
+ huggingface/
86
+ .huggingface/
87
+
88
+ # Test images
89
+ test_images/
90
+ *.jpg
91
+ *.png
92
+ *.jpeg
93
+ *.bmp
94
+ *.gif
95
+ *.tiff
96
+ *.webp
97
+
98
+ # Environment variables
99
+ .env.local
100
+ .env.development.local
101
+ .env.test.local
102
+ .env.production.local
.huggingface.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title: SmartLeaf API
2
+ emoji: 🌿
3
+ colorFrom: green
4
+ colorTo: lime
5
+ sdk: docker
6
+ license: mit
7
+ app_port: 7860
8
+ pinned: true
9
+ app_file: main.py
10
+ duplicated_from: null
11
+ models:
12
+ - efficientnet-b3 # Replace this with your actual model if different
13
+ tags:
14
+ - agriculture
15
+ - smart-farming
16
+ - plant-health
17
+ - computer-vision
18
+ - ai
19
+ spaces_server_url: https://huggingface.co
20
+ hf_oauth: false
21
+ disable_embedding: false
22
+ repository: smartleaf-api
Dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+ FROM python:3.12-slim
3
+
4
+ # 1. Create non‑root user
5
+ RUN useradd --create-home --shell /bin/bash --uid 1000 appuser
6
+
7
+ # 2. Environment variables - set HF_HOME to match what's used in lifespan
8
+ ENV PYTHONDONTWRITEBYTECODE=1 \
9
+ PYTHONUNBUFFERED=1 \
10
+ HF_HOME=/home/appuser/huggingface \
11
+ PORT=7860 \
12
+ PATH=/home/appuser/.local/bin:$PATH
13
+
14
+ # 3. Set working directory
15
+ WORKDIR /home/appuser/app
16
+
17
+ # 4. Install Python dependencies
18
+ COPY requirements.txt .
19
+ RUN pip install --upgrade pip \
20
+ && pip install --no-cache-dir -r requirements.txt
21
+
22
+ # 5. Copy code and set permissions
23
+ COPY --chown=appuser:appuser . .
24
+
25
+ # 6. Create huggingface directory with proper permissions
26
+ RUN mkdir -p /home/appuser/huggingface && \
27
+ chown -R appuser:appuser /home/appuser
28
+
29
+ # 7. Switch to appuser
30
+ USER appuser
31
+
32
+
33
+ # 9. Expose port & run app using uvicorn
34
+ EXPOSE 7860
35
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,12 +1,169 @@
1
  ---
2
- title: Smartleaf Api
3
- emoji: 🐨
4
- colorFrom: indigo
5
- colorTo: gray
6
  sdk: docker
7
  pinned: false
8
- license: mit
9
- short_description: Smart leaf api
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Plant Disease Prediction API
3
+ emoji: 🌱
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: docker
7
  pinned: false
8
+ license: apache-2.0
9
+ app_port: 7860
10
  ---
11
 
12
+ # Plant Disease Prediction API 🌱
13
+
14
+ A FastAPI-based web service that predicts plant diseases from leaf images using a deep learning model trained on plant pathology data.
15
+
16
+ ## Features
17
+
18
+ - **38 Disease Classes**: Supports detection of various diseases across multiple plant species including Apple, Corn, Grape, Tomato, Potato, and more
19
+ - **Batch Processing**: Process up to 10 images simultaneously
20
+ - **High Accuracy**: Uses a pre-trained CNN model for reliable disease classification
21
+ - **Easy Integration**: RESTful API with comprehensive documentation
22
+ - **Model Pre-warming**: Fast inference with pre-loaded model
23
+
24
+ ## Supported Plants & Diseases
25
+
26
+ ### Apple
27
+
28
+ - Apple Scab, Black Rot, Cedar Apple Rust, Healthy
29
+
30
+ ### Corn (Maize)
31
+
32
+ - Cercospora Leaf Spot, Common Rust, Northern Leaf Blight, Healthy
33
+
34
+ ### Tomato
35
+
36
+ - Bacterial Spot, Early Blight, Late Blight, Leaf Mold, Septoria Leaf Spot, Spider Mites, Target Spot, Yellow Leaf Curl Virus, Mosaic Virus, Healthy
37
+
38
+ ### Grape
39
+
40
+ - Black Rot, Esca (Black Measles), Leaf Blight, Healthy
41
+
42
+ ### Potato
43
+
44
+ - Early Blight, Late Blight, Healthy
45
+
46
+ ### Other Plants
47
+
48
+ - Blueberry, Cherry, Orange, Peach, Bell Pepper, Raspberry, Soybean, Squash, Strawberry
49
+
50
+ ## API Endpoints
51
+
52
+ ### `POST /predict`
53
+
54
+ Upload one or more images to get disease predictions.
55
+
56
+ **Request:**
57
+
58
+ - Content-Type: `multipart/form-data`
59
+ - Body: Image files (jpg, png, bmp, gif, tiff, webp)
60
+
61
+ **Response:**
62
+
63
+ ```json
64
+ {
65
+ "success": true,
66
+ "results": [
67
+ {
68
+ "predicted_class": "Tomato___Late_blight",
69
+ "clean_class_name": "Tomato - Late blight",
70
+ "confidence": 0.95,
71
+ "all_predictions": {
72
+ "Apple - Apple scab": 0.001,
73
+ "Tomato - Late blight": 0.95
74
+ // ... other classes
75
+ }
76
+ }
77
+ ],
78
+ "message": "Successfully processed 1 image(s)"
79
+ }
80
+ ```
81
+
82
+ ### `GET /health`
83
+
84
+ Check API and model status.
85
+
86
+ ### `GET /classes`
87
+
88
+ Get all supported disease classes.
89
+
90
+ ### `GET /`
91
+
92
+ API information and status.
93
+
94
+ ## Usage Examples
95
+
96
+ ### Python
97
+
98
+ ```python
99
+ import requests
100
+
101
+ # Single image prediction
102
+ with open("leaf_image.jpg", "rb") as f:
103
+ files = {"files": ("leaf_image.jpg", f, "image/jpeg")}
104
+ response = requests.post("https://your-space-url/predict", files=files)
105
+ result = response.json()
106
+ print(f"Predicted: {result['results'][0]['clean_class_name']}")
107
+ print(f"Confidence: {result['results'][0]['confidence']:.2%}")
108
+ ```
109
+
110
+ ### cURL
111
+
112
+ ```bash
113
+ curl -X POST "https://your-space-url/predict" \
114
+ -F "files=@leaf_image.jpg"
115
+ ```
116
+
117
+ ### JavaScript
118
+
119
+ ```javascript
120
+ const formData = new FormData();
121
+ formData.append("files", fileInput.files[0]);
122
+
123
+ fetch("/predict", {
124
+ method: "POST",
125
+ body: formData,
126
+ })
127
+ .then((response) => response.json())
128
+ .then((data) => console.log(data));
129
+ ```
130
+
131
+ ## Model Information
132
+
133
+ - **Architecture**: Convolutional Neural Network (CNN)
134
+ - **Input Size**: 300x300 RGB images
135
+ - **Training Data**: PlantVillage dataset
136
+ - **Classes**: 38 plant disease categories
137
+ - **Framework**: TensorFlow/Keras
138
+
139
+ ## Performance
140
+
141
+ - **Batch Size**: Up to 10 images per request
142
+ - **Response Time**: ~1-3 seconds per image
143
+ - **Memory Usage**: ~2GB for model loading
144
+ - **Accuracy**: >95% on validation set
145
+
146
+ ## Limitations
147
+
148
+ - Best results with clear, well-lit leaf images
149
+ - Works optimally with images similar to training data
150
+ - Single leaf per image recommended
151
+ - Supported image formats: JPG, PNG, BMP, GIF, TIFF, WEBP
152
+
153
+ ## Environment Variables
154
+
155
+ - `HF_MODEL_REPO`: Hugging Face model repository (default: "yasyn14/smart-leaf-model")
156
+ - `HF_MODEL_FILENAME`: Model file name (default: "best_model_32epochs.keras")
157
+ - `HF_HOME`: Cache directory for models (default: "/tmp/huggingface")
158
+
159
+ ## License
160
+
161
+ Apache 2.0 License
162
+
163
+ ## Support
164
+
165
+ For issues and questions, please visit the [model repository](https://huggingface.co/yasyn14/smart-leaf-model) or create an issue in this space.
166
+
167
+ ---
168
+
169
+ _Built with FastAPI, TensorFlow, and 🤗 Transformers_
app.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Alternative entry point for Hugging Face Spaces
2
+ # This file can be used if main.py doesn't work as expected
3
+
4
+ from main import app
5
+
6
+ if __name__ == "__main__":
7
+ import uvicorn
8
+ import os
9
+
10
+ port = int(os.getenv("PORT", 7860))
11
+ uvicorn.run(app, host="0.0.0.0", port=port)
deployment_guide.md ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deployment Guide for Hugging Face Spaces
2
+
3
+ ## 📁 File Structure
4
+
5
+ Make sure your repository has the following structure:
6
+
7
+ ```
8
+ your-space/
9
+ ├── main.py # Main FastAPI application
10
+ ├── app.py # Alternative entry point
11
+ ├── requirements.txt # Python dependencies
12
+ ├── Dockerfile # Docker configuration
13
+ ├── README.md # Space documentation
14
+ ├── .gitignore # Git ignore rules
15
+ ├── .dockerignore # Docker ignore rules
16
+ └── DEPLOYMENT_GUIDE.md # This file
17
+ ```
18
+
19
+ ## 🚀 Step-by-Step Deployment
20
+
21
+ ### 1. Create a New Space
22
+
23
+ 1. Go to [Hugging Face Spaces](https://huggingface.co/spaces)
24
+ 2. Click "Create new Space"
25
+ 3. Fill in the details:
26
+ - **Space name**: `plant-disease-api` (or your preferred name)
27
+ - **License**: Apache 2.0
28
+ - **SDK**: Docker
29
+ - **Hardware**: CPU Basic (upgrade to GPU if needed)
30
+ - **Visibility**: Public or Private
31
+
32
+ ### 2. Configure the Space
33
+
34
+ The README.md file already contains the necessary YAML frontmatter:
35
+
36
+ ```yaml
37
+ ---
38
+ title: Plant Disease Prediction API
39
+ emoji: 🌱
40
+ colorFrom: green
41
+ colorTo: blue
42
+ sdk: docker
43
+ pinned: false
44
+ license: apache-2.0
45
+ app_port: 7860
46
+ ---
47
+ ```
48
+
49
+ ### 3. Upload Files
50
+
51
+ You can either:
52
+
53
+ **Option A: Git Clone and Push**
54
+
55
+ ```bash
56
+ git clone https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME
57
+ cd YOUR_SPACE_NAME
58
+ # Copy all files to this directory
59
+ git add .
60
+ git commit -m "Initial deployment"
61
+ git push
62
+ ```
63
+
64
+ **Option B: Web Interface**
65
+
66
+ - Upload files directly through the Hugging Face web interface
67
+ - Drag and drop or use the file upload feature
68
+
69
+ ### 4. Environment Variables (Optional)
70
+
71
+ If you need to set custom environment variables:
72
+
73
+ 1. Go to your Space settings
74
+ 2. Add environment variables:
75
+ - `HF_MODEL_REPO`: Your model repository
76
+ - `HF_MODEL_FILENAME`: Your model filename
77
+ - `HF_HOME`: Cache directory (default: `/tmp/huggingface`)
78
+
79
+ ### 5. Monitor Deployment
80
+
81
+ 1. Go to your Space page
82
+ 2. Check the "Logs" tab for build progress
83
+ 3. Wait for the status to change from "Building" to "Running"
84
+
85
+ ## 🔧 Configuration Details
86
+
87
+ ### Port Configuration
88
+
89
+ - Hugging Face Spaces expects applications to run on port **7860**
90
+ - The Dockerfile and application are configured for this
91
+
92
+ ### Model Loading
93
+
94
+ - The model will be downloaded from Hugging Face Hub on first startup
95
+ - Subsequent startups will use cached model (faster)
96
+ - Pre-warming ensures fast first predictions
97
+
98
+ ### Resource Requirements
99
+
100
+ - **Memory**: ~2-3GB for TensorFlow + model
101
+ - **CPU**: Minimum 2 cores recommended
102
+ - **Storage**: ~1GB for model and dependencies
103
+
104
+ ## 🐛 Troubleshooting
105
+
106
+ ### Common Issues
107
+
108
+ 1. **Build Fails**
109
+
110
+ - Check logs in the Space interface
111
+ - Verify all files are uploaded correctly
112
+ - Ensure requirements.txt has correct versions
113
+
114
+ 2. **Model Loading Errors**
115
+
116
+ - Verify `HF_MODEL_REPO` and `HF_MODEL_FILENAME` are correct
117
+ - Check if model exists and is accessible
118
+ - Review model format (should be .keras file)
119
+
120
+ 3. **Memory Issues**
121
+
122
+ - Upgrade to larger hardware tier
123
+ - Optimize model loading in code
124
+ - Clear unnecessary cache
125
+
126
+ 4. **Port Issues**
127
+ - Ensure application runs on port 7860
128
+ - Check Dockerfile EXPOSE directive
129
+ - Verify app_port in README.md frontmatter
130
+
131
+ ### Debug Commands
132
+
133
+ Add these to your main.py for debugging:
134
+
135
+ ```python
136
+ import os
137
+ import psutil
138
+ import logging
139
+
140
+ # Log system info
141
+ logging.info(f"Available memory: {psutil.virtual_memory().total / 1e9:.2f} GB")
142
+ logging.info(f"CPU cores: {psutil.cpu_count()}")
143
+ logging.info(f"Python version: {sys.version}")
144
+ logging.info(f"TensorFlow version: {tf.__version__}")
145
+ ```
146
+
147
+ ## 📊 Testing Your Deployment
148
+
149
+ ### Health Check
150
+
151
+ ```bash
152
+ curl https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space/health
153
+ ```
154
+
155
+ ### Test Prediction
156
+
157
+ ```bash
158
+ curl -X POST "https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space/predict" \
159
+ -F "files=@your_test_image.jpg"
160
+ ```
161
+
162
+ ### Interactive API Docs
163
+
164
+ Visit: `https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space/docs`
165
+
166
+ ## 🔄 Updates and Maintenance
167
+
168
+ ### Updating Your Space
169
+
170
+ 1. Make changes to your local files
171
+ 2. Push to the Space repository
172
+ 3. Space will automatically rebuild and redeploy
173
+
174
+ ### Monitoring Performance
175
+
176
+ - Check Space logs regularly
177
+ - Monitor response times
178
+ - Watch for memory usage spikes
179
+
180
+ ### Scaling Options
181
+
182
+ - Upgrade hardware tier for better performance
183
+ - Consider GPU hardware for faster inference
184
+ - Implement caching for frequently used predictions
185
+
186
+ ## 🔒 Security Considerations
187
+
188
+ - Keep your Space public for API access
189
+ - Don't include sensitive credentials in code
190
+ - Use environment variables for configuration
191
+ - Monitor usage to prevent abuse
192
+
193
+ ## 📈 Performance Optimization
194
+
195
+ ### Model Optimization
196
+
197
+ - Use model quantization for smaller size
198
+ - Implement model pruning if needed
199
+ - Cache predictions when possible
200
+
201
+ ### API Optimization
202
+
203
+ - Add request rate limiting
204
+ - Implement response caching
205
+ - Optimize image preprocessing
206
+
207
+ ---
208
+
209
+ **Need Help?**
210
+
211
+ - Check [Hugging Face Spaces Documentation](https://huggingface.co/docs/spaces)
212
+ - Visit [Community Forums](https://discuss.huggingface.co/)
213
+ - Create an issue in your Space repository
huggingface.yml ADDED
@@ -0,0 +1 @@
 
 
1
+ sdk: docker
main.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from typing import List, Optional
4
+ from contextlib import asynccontextmanager
5
+
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ from fastapi import FastAPI, File, UploadFile, HTTPException, status
9
+ from fastapi.responses import JSONResponse
10
+ from PIL import Image
11
+ import io
12
+ from huggingface_hub import hf_hub_download
13
+ from pydantic import BaseModel
14
+
15
+ # Configure logging
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # Configuration
20
+ HF_MODEL_REPO: str = os.getenv("HF_MODEL_REPO", "yasyn14/smart-leaf-model")
21
+ HF_MODEL_FILENAME: str = os.getenv("HF_MODEL_FILENAME", "best_model_32epochs.keras")
22
+ HF_CACHE_DIR: str = os.getenv("HF_HOME", "/home/appuser/huggingface")
23
+ IMAGE_SIZE: tuple = (300, 300)
24
+ MAX_BATCH_SIZE: int = 10
25
+
26
+ # Plant disease class names
27
+ CLASS_NAMES = [
28
+ 'Apple___Apple_scab', 'Apple___Black_rot', 'Apple___Cedar_apple_rust', 'Apple___healthy',
29
+ 'Blueberry___healthy', 'Cherry_(including_sour)___Powdery_mildew', 'Cherry_(including_sour)___healthy',
30
+ 'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot', 'Corn_(maize)___Common_rust_',
31
+ 'Corn_(maize)___Northern_Leaf_Blight', 'Corn_(maize)___healthy', 'Grape___Black_rot',
32
+ 'Grape___Esca_(Black_Measles)', 'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)', 'Grape___healthy',
33
+ 'Orange___Haunglongbing_(Citrus_greening)', 'Peach___Bacterial_spot', 'Peach___healthy',
34
+ 'Pepper,_bell___Bacterial_spot', 'Pepper,_bell___healthy', 'Potato___Early_blight',
35
+ 'Potato___Late_blight', 'Potato___healthy', 'Raspberry___healthy', 'Soybean___healthy',
36
+ 'Squash___Powdery_mildew', 'Strawberry___Leaf_scorch', 'Strawberry___healthy',
37
+ 'Tomato___Bacterial_spot', 'Tomato___Early_blight', 'Tomato___Late_blight', 'Tomato___Leaf_Mold',
38
+ 'Tomato___Septoria_leaf_spot', 'Tomato___Spider_mites Two-spotted_spider_mite',
39
+ 'Tomato___Target_Spot', 'Tomato___Tomato_Yellow_Leaf_Curl_Virus', 'Tomato___Tomato_mosaic_virus',
40
+ 'Tomato___healthy'
41
+ ]
42
+
43
+ # Clean class names for better display
44
+ CLEAN_CLASS_NAMES = [name.replace('___', ' - ').replace('_', ' ') for name in CLASS_NAMES]
45
+
46
+ # HTTP Status Messages
47
+ HTTP_MESSAGES = {
48
+ "MODEL_NOT_LOADED": "Model not loaded. Please check server logs.",
49
+ "INVALID_FILE_TYPE": "File must be an image",
50
+ "BATCH_SIZE_EXCEEDED": "Maximum {max_size} images allowed per batch",
51
+ "PREDICTION_FAILED": "Prediction failed: {error}",
52
+ "IMAGE_PROCESSING_FAILED": "Error preprocessing image: {error}",
53
+ "MODEL_LOAD_SUCCESS": "Model loaded successfully",
54
+ "MODEL_LOAD_FAILED": "Failed to load model"
55
+ }
56
+
57
+ # Global model variable
58
+ model: Optional[tf.keras.Model] = None
59
+
60
+ # Response models
61
+ class PredictionResult(BaseModel):
62
+ predicted_class: str
63
+ clean_class_name: str
64
+ confidence: float
65
+ all_predictions: dict
66
+
67
+ class PredictionResponse(BaseModel):
68
+ success: bool
69
+ results: List[PredictionResult]
70
+ message: str
71
+
72
+ class HealthResponse(BaseModel):
73
+ status: str
74
+ model_loaded: bool
75
+ message: str
76
+
77
+ def download_model_from_hf() -> str:
78
+ """Download model from Hugging Face Hub"""
79
+ try:
80
+ logger.info(f"Downloading model from {HF_MODEL_REPO}/{HF_MODEL_FILENAME}")
81
+ model_path = hf_hub_download(
82
+ repo_id=HF_MODEL_REPO,
83
+ filename=HF_MODEL_FILENAME,
84
+ cache_dir=HF_CACHE_DIR
85
+ )
86
+ logger.info(f"Model downloaded to: {model_path}")
87
+ return model_path
88
+ except Exception as e:
89
+ logger.error(f"Failed to download model: {str(e)}")
90
+ raise
91
+
92
+ def load_model() -> tf.keras.Model:
93
+ """Load the Keras model from Hugging Face"""
94
+ try:
95
+ model_path = download_model_from_hf()
96
+ loaded_model = tf.keras.models.load_model(model_path)
97
+ logger.info("Model loaded successfully")
98
+ return loaded_model
99
+ except Exception as e:
100
+ logger.error(f"Failed to load model: {str(e)}")
101
+ raise
102
+
103
+ def preprocess_image(image_bytes: bytes) -> np.ndarray:
104
+ """Preprocess image for model prediction"""
105
+ try:
106
+ # Open image from bytes
107
+ image = Image.open(io.BytesIO(image_bytes))
108
+
109
+ # Convert to RGB if needed
110
+ if image.mode != 'RGB':
111
+ image = image.convert('RGB')
112
+
113
+ # Resize image
114
+ image = image.resize(IMAGE_SIZE)
115
+
116
+ # Convert to numpy array and normalize
117
+ img_array = np.array(image) / 255.0
118
+
119
+ # Add batch dimension
120
+ img_array = np.expand_dims(img_array, axis=0)
121
+
122
+ return img_array
123
+ except Exception as e:
124
+ logger.error(f"Error preprocessing image: {str(e)}")
125
+ raise
126
+
127
+ def predict_single_image(image_bytes: bytes) -> PredictionResult:
128
+ """Make prediction for a single image"""
129
+ global model
130
+
131
+ if model is None:
132
+ raise HTTPException(
133
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
134
+ detail=HTTP_MESSAGES["MODEL_NOT_LOADED"]
135
+ )
136
+
137
+ try:
138
+ # Preprocess image
139
+ processed_image = preprocess_image(image_bytes)
140
+
141
+ # Make prediction
142
+ predictions = model.predict(processed_image, verbose=0)
143
+ predicted_class_idx = np.argmax(predictions[0])
144
+ confidence = float(predictions[0][predicted_class_idx])
145
+
146
+ # Get class names
147
+ predicted_class = CLASS_NAMES[predicted_class_idx]
148
+ clean_class_name = CLEAN_CLASS_NAMES[predicted_class_idx]
149
+
150
+ # Create all predictions dictionary
151
+ all_predictions = {
152
+ CLEAN_CLASS_NAMES[i]: float(predictions[0][i])
153
+ for i in range(len(CLASS_NAMES))
154
+ }
155
+
156
+ return PredictionResult(
157
+ predicted_class=predicted_class,
158
+ clean_class_name=clean_class_name,
159
+ confidence=confidence,
160
+ all_predictions=all_predictions
161
+ )
162
+
163
+ except Exception as e:
164
+ logger.error(f"Prediction failed: {str(e)}")
165
+ raise HTTPException(
166
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
167
+ detail=HTTP_MESSAGES["PREDICTION_FAILED"].format(error=str(e))
168
+ )
169
+
170
+ def is_image_file(filename: str) -> bool:
171
+ """Check if file is an image based on extension"""
172
+ image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.webp'}
173
+ return any(filename.lower().endswith(ext) for ext in image_extensions)
174
+
175
+ @asynccontextmanager
176
+ async def lifespan(app: FastAPI):
177
+ """Handle startup and shutdown events"""
178
+ # Startup
179
+ global model
180
+ try:
181
+ logger.info("Starting up... Loading model")
182
+ model = load_model()
183
+
184
+ # Pre-warm the model with a dummy prediction
185
+ dummy_image = np.random.rand(1, *IMAGE_SIZE, 3).astype(np.float32)
186
+ _ = model.predict(dummy_image, verbose=0)
187
+ logger.info("Model pre-warmed successfully")
188
+
189
+ except Exception as e:
190
+ logger.error(f"Failed to load model during startup: {str(e)}")
191
+ model = None
192
+
193
+ yield
194
+
195
+ # Shutdown
196
+ logger.info("Shutting down...")
197
+
198
+ # Create FastAPI app
199
+ app = FastAPI(
200
+ title="Plant Disease Prediction API",
201
+ description="API for predicting plant diseases from leaf images using deep learning",
202
+ version="1.0.0",
203
+ lifespan=lifespan
204
+ )
205
+
206
+ @app.get("/", response_model=HealthResponse)
207
+ async def root():
208
+ """Root endpoint with API information"""
209
+ return HealthResponse(
210
+ status="running",
211
+ model_loaded=model is not None,
212
+ message="Plant Disease Prediction API is running"
213
+ )
214
+
215
+ @app.get("/health", response_model=HealthResponse)
216
+ async def health_check():
217
+ """Health check endpoint"""
218
+ return HealthResponse(
219
+ status="healthy" if model is not None else "unhealthy",
220
+ model_loaded=model is not None,
221
+ message=HTTP_MESSAGES["MODEL_LOAD_SUCCESS"] if model is not None else HTTP_MESSAGES["MODEL_NOT_LOADED"]
222
+ )
223
+
224
+ @app.post("/predict", response_model=PredictionResponse)
225
+ async def predict_plant_disease(files: List[UploadFile] = File(...)):
226
+ """
227
+ Predict plant disease from uploaded image(s)
228
+
229
+ - **files**: List of image files to analyze (max 10 files)
230
+
231
+ Returns predictions with confidence scores for each image
232
+ """
233
+
234
+ # Check batch size
235
+ if len(files) > MAX_BATCH_SIZE:
236
+ raise HTTPException(
237
+ status_code=status.HTTP_400_BAD_REQUEST,
238
+ detail=HTTP_MESSAGES["BATCH_SIZE_EXCEEDED"].format(max_size=MAX_BATCH_SIZE)
239
+ )
240
+
241
+ results = []
242
+
243
+ for file in files:
244
+ # Check if file is an image
245
+ if not is_image_file(file.filename):
246
+ raise HTTPException(
247
+ status_code=status.HTTP_400_BAD_REQUEST,
248
+ detail=f"{HTTP_MESSAGES['INVALID_FILE_TYPE']}: {file.filename}"
249
+ )
250
+
251
+ try:
252
+ # Read file content
253
+ image_bytes = await file.read()
254
+
255
+ # Make prediction
256
+ result = predict_single_image(image_bytes)
257
+ results.append(result)
258
+
259
+ except HTTPException:
260
+ raise
261
+ except Exception as e:
262
+ logger.error(f"Error processing file {file.filename}: {str(e)}")
263
+ raise HTTPException(
264
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
265
+ detail=HTTP_MESSAGES["IMAGE_PROCESSING_FAILED"].format(error=str(e))
266
+ )
267
+
268
+ return PredictionResponse(
269
+ success=True,
270
+ results=results,
271
+ message=f"Successfully processed {len(results)} image(s)"
272
+ )
273
+
274
+ @app.get("/classes")
275
+ async def get_classes():
276
+ """Get all available plant disease classes"""
277
+ return {
278
+ "classes": CLASS_NAMES,
279
+ "clean_classes": CLEAN_CLASS_NAMES,
280
+ "total_classes": len(CLASS_NAMES)
281
+ }
282
+
283
+ if __name__ == "__main__":
284
+ import uvicorn
285
+ uvicorn.run(app, host="0.0.0.0", port=8000)
requirements.txt ADDED
Binary file (2.62 kB). View file
 
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ docker