Spaces:
Build error
Build error
Upload 10 files
Browse filesAdding Project files
- .gitignore +178 -0
- Dockerfile +28 -0
- LICENSE +21 -0
- README.md +84 -0
- app.py +182 -0
- config.py +31 -0
- data_processor.py +99 -0
- model_handler.py +116 -0
- requirements.txt +12 -0
- visualizer.py +98 -0
.gitignore
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
|
| 110 |
+
# pdm
|
| 111 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 112 |
+
#pdm.lock
|
| 113 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 114 |
+
# in version control.
|
| 115 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
| 116 |
+
.pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 121 |
+
__pypackages__/
|
| 122 |
+
|
| 123 |
+
# Celery stuff
|
| 124 |
+
celerybeat-schedule
|
| 125 |
+
celerybeat.pid
|
| 126 |
+
|
| 127 |
+
# SageMath parsed files
|
| 128 |
+
*.sage.py
|
| 129 |
+
|
| 130 |
+
# Environments
|
| 131 |
+
.env
|
| 132 |
+
.venv
|
| 133 |
+
env/
|
| 134 |
+
venv/
|
| 135 |
+
ENV/
|
| 136 |
+
env.bak/
|
| 137 |
+
venv.bak/
|
| 138 |
+
|
| 139 |
+
# Spyder project settings
|
| 140 |
+
.spyderproject
|
| 141 |
+
.spyproject
|
| 142 |
+
|
| 143 |
+
# Rope project settings
|
| 144 |
+
.ropeproject
|
| 145 |
+
|
| 146 |
+
# mkdocs documentation
|
| 147 |
+
/site
|
| 148 |
+
|
| 149 |
+
# mypy
|
| 150 |
+
.mypy_cache/
|
| 151 |
+
.dmypy.json
|
| 152 |
+
dmypy.json
|
| 153 |
+
|
| 154 |
+
# Pyre type checker
|
| 155 |
+
.pyre/
|
| 156 |
+
|
| 157 |
+
# pytype static type analyzer
|
| 158 |
+
.pytype/
|
| 159 |
+
|
| 160 |
+
# Cython debug symbols
|
| 161 |
+
cython_debug/
|
| 162 |
+
|
| 163 |
+
# PyCharm
|
| 164 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 165 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 166 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 167 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 168 |
+
#.idea/
|
| 169 |
+
|
| 170 |
+
# Ruff stuff:
|
| 171 |
+
.ruff_cache/
|
| 172 |
+
|
| 173 |
+
# PyPI configuration file
|
| 174 |
+
.pypirc
|
| 175 |
+
|
| 176 |
+
# Model files
|
| 177 |
+
models/*
|
| 178 |
+
!models/.gitkeep
|
Dockerfile
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python base image
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set environment variables
|
| 5 |
+
ENV PORT 7860
|
| 6 |
+
ENV PYTHONUNBUFFERED=1
|
| 7 |
+
|
| 8 |
+
# Set workdir
|
| 9 |
+
WORKDIR /app
|
| 10 |
+
|
| 11 |
+
# Install system dependencies (if needed)
|
| 12 |
+
RUN apt-get update && apt-get install -y \
|
| 13 |
+
build-essential \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 15 |
+
|
| 16 |
+
# Copy requirements and install Python dependencies
|
| 17 |
+
COPY requirements.txt .
|
| 18 |
+
RUN pip install --upgrade pip
|
| 19 |
+
RUN pip install -r requirements.txt
|
| 20 |
+
|
| 21 |
+
# Copy the rest of your code
|
| 22 |
+
COPY . .
|
| 23 |
+
|
| 24 |
+
# Expose the port Streamlit will run on
|
| 25 |
+
EXPOSE 7860
|
| 26 |
+
|
| 27 |
+
# Run Streamlit on container start
|
| 28 |
+
CMD streamlit run app.py --server.port $PORT --server.address 0.0.0.0
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Mahmoud Al Refaey
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Land Cover Classification (Hugging Face Space)
|
| 2 |
+
|
| 3 |
+
A web-based land cover classification system using a pretrained ResNet50 model and the EuroSAT dataset. This Space features a modern Streamlit interface for easy image upload, prediction, and visualization of land type probabilities.
|
| 4 |
+
|
| 5 |
+
## 🚀 Demo
|
| 6 |
+
|
| 7 |
+
[](https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME)
|
| 8 |
+
|
| 9 |
+
## Features
|
| 10 |
+
|
| 11 |
+
- **Deep Learning Model**: Pretrained ResNet50 fine-tuned on EuroSAT satellite imagery.
|
| 12 |
+
- **User-Friendly Web App**: Built with Streamlit for interactive image upload and real-time predictions.
|
| 13 |
+
- **Class Visualization**: Displays prediction confidence for each land cover class.
|
| 14 |
+
- **Modular Code**: Clean separation of model, data processing, and visualization logic.
|
| 15 |
+
|
| 16 |
+
## How to Use
|
| 17 |
+
|
| 18 |
+
1. **Upload a satellite image** (PNG, JPG, JPEG, TIFF).
|
| 19 |
+
2. **Preview the image** and click "Run Classification".
|
| 20 |
+
3. **View the predicted land cover class** and confidence scores.
|
| 21 |
+
|
| 22 |
+
## Land Cover Classes
|
| 23 |
+
|
| 24 |
+
| Index | Class Name |
|
| 25 |
+
|-------|------------------------|
|
| 26 |
+
| 0 | AnnualCrop |
|
| 27 |
+
| 1 | Forest |
|
| 28 |
+
| 2 | HerbaceousVegetation |
|
| 29 |
+
| 3 | Highway |
|
| 30 |
+
| 4 | Industrial |
|
| 31 |
+
| 5 | Pasture |
|
| 32 |
+
| 6 | PermanentCrop |
|
| 33 |
+
| 7 | Residential |
|
| 34 |
+
| 8 | River |
|
| 35 |
+
| 9 | SeaLake |
|
| 36 |
+
|
| 37 |
+
## Project Structure
|
| 38 |
+
|
| 39 |
+
```
|
| 40 |
+
.
|
| 41 |
+
├── app.py # Streamlit web app
|
| 42 |
+
├── model_handler.py # Model loading and prediction logic
|
| 43 |
+
├── data_processor.py # Data preprocessing utilities
|
| 44 |
+
├── config.py # Configuration (class names, paths)
|
| 45 |
+
├── requirements.txt # Python dependencies
|
| 46 |
+
├── models/ # Model files (see below)
|
| 47 |
+
├── assets/ # Static images for the app
|
| 48 |
+
├── Dockerfile # Docker configuration for Hugging Face Spaces
|
| 49 |
+
└── README.md # This file
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
## Model Files
|
| 53 |
+
|
| 54 |
+
The following files must be present in the `models/` directory:
|
| 55 |
+
- `ResNet50_eurosat.h5`
|
| 56 |
+
- `model.weights.best.keras`
|
| 57 |
+
- `class_indices.npy`
|
| 58 |
+
|
| 59 |
+
If you fork this Space, upload these files via the Hugging Face web interface if they are too large for git.
|
| 60 |
+
|
| 61 |
+
## Deploying on Hugging Face Spaces
|
| 62 |
+
|
| 63 |
+
### Docker Space Deployment
|
| 64 |
+
|
| 65 |
+
1. **Create a new Space on Hugging Face:**
|
| 66 |
+
- Go to [Hugging Face Spaces](https://huggingface.co/spaces).
|
| 67 |
+
- Click **"Create new Space"**.
|
| 68 |
+
- Choose **SDK: Docker**.
|
| 69 |
+
|
| 70 |
+
2. **Push your code:**
|
| 71 |
+
- Push your entire project (including the `Dockerfile`) to the Space.
|
| 72 |
+
- If your model files are too large for git, upload them via the Hugging Face web interface after the initial push.
|
| 73 |
+
|
| 74 |
+
3. **Wait for the build:**
|
| 75 |
+
- Hugging Face will build and run your Docker container.
|
| 76 |
+
- Your Streamlit app will be available at the Space URL.
|
| 77 |
+
|
| 78 |
+
## License
|
| 79 |
+
|
| 80 |
+
MIT License. See [LICENSE](LICENSE) for details.
|
| 81 |
+
|
| 82 |
+
---
|
| 83 |
+
|
| 84 |
+
*This Space is powered by [Streamlit](https://streamlit.io/) and [Hugging Face Spaces](https://huggingface.co/spaces).*
|
app.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import sys
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import plotly.express as px
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import matplotlib.pyplot as plt
|
| 10 |
+
import seaborn as sns
|
| 11 |
+
import io
|
| 12 |
+
import cv2
|
| 13 |
+
from config import DATA_CONFIG, CLASS_NAMES
|
| 14 |
+
|
| 15 |
+
# Add current directory to Python path
|
| 16 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 17 |
+
if current_dir not in sys.path:
|
| 18 |
+
sys.path.append(current_dir)
|
| 19 |
+
|
| 20 |
+
from model_handler import ModelHandler
|
| 21 |
+
from data_processor import DataProcessor
|
| 22 |
+
from visualizer import Visualizer
|
| 23 |
+
|
| 24 |
+
class WebApp:
|
| 25 |
+
def __init__(self):
|
| 26 |
+
self.model_handler = ModelHandler()
|
| 27 |
+
self.data_processor = DataProcessor()
|
| 28 |
+
self.visualizer = Visualizer()
|
| 29 |
+
|
| 30 |
+
# Load model and class indices
|
| 31 |
+
if not self.model_handler.load_model():
|
| 32 |
+
st.error("Failed to load model. Please check if models/ResNet50_eurosat.h5 exists.")
|
| 33 |
+
if not self.model_handler.load_class_indices():
|
| 34 |
+
st.error("Failed to load class indices. Please check if models/class_indices.npy exists.")
|
| 35 |
+
|
| 36 |
+
def main_page(self):
|
| 37 |
+
st.title("Project DEPI - Land Cover Classification")
|
| 38 |
+
st.caption("Welcome to the Land Cover Classification System")
|
| 39 |
+
|
| 40 |
+
# Display welcome image
|
| 41 |
+
st.image("assets/satellite.jpg", use_column_width=True)
|
| 42 |
+
|
| 43 |
+
# Image upload section
|
| 44 |
+
uploaded_image = self.upload()
|
| 45 |
+
|
| 46 |
+
if uploaded_image is not None:
|
| 47 |
+
# Store the uploaded image in session state
|
| 48 |
+
st.session_state.uploaded_image = uploaded_image
|
| 49 |
+
|
| 50 |
+
col1, col2 = st.columns(2)
|
| 51 |
+
with col1:
|
| 52 |
+
st.subheader("Image Preview")
|
| 53 |
+
st.image(uploaded_image, use_column_width=True)
|
| 54 |
+
with col2:
|
| 55 |
+
st.subheader("Classification")
|
| 56 |
+
if st.button('Run Classification'):
|
| 57 |
+
self.run_classification(uploaded_image)
|
| 58 |
+
|
| 59 |
+
def upload(self):
|
| 60 |
+
"""Handle image upload"""
|
| 61 |
+
uploaded_file = st.file_uploader(
|
| 62 |
+
"Choose an image file",
|
| 63 |
+
type=DATA_CONFIG['allowed_formats']
|
| 64 |
+
)
|
| 65 |
+
return uploaded_file.read() if uploaded_file is not None else None
|
| 66 |
+
|
| 67 |
+
def run_classification(self, image_data):
|
| 68 |
+
"""Run classification on uploaded image"""
|
| 69 |
+
try:
|
| 70 |
+
with st.spinner('Running classification...'):
|
| 71 |
+
prediction = self.model_handler.predict(image_data)
|
| 72 |
+
st.success(f"Classification Result: {prediction['class_name']}")
|
| 73 |
+
st.write(f"Confidence: {prediction['confidence']:.2%}")
|
| 74 |
+
# Use visualizer for confidence bar
|
| 75 |
+
class_names = [self.model_handler.class_indices.get(str(i), f"Class_{i}")
|
| 76 |
+
for i in range(len(prediction['all_predictions']))]
|
| 77 |
+
fig = self.visualizer.plot_confidence_bar(class_names, prediction['all_predictions'])
|
| 78 |
+
st.plotly_chart(fig, use_column_width=True)
|
| 79 |
+
except Exception as e:
|
| 80 |
+
st.error(f"Error during classification: {str(e)}")
|
| 81 |
+
|
| 82 |
+
def charts_page(self):
|
| 83 |
+
"""Display various charts and visualizations"""
|
| 84 |
+
st.title("Charts and Visualizations")
|
| 85 |
+
|
| 86 |
+
tab1, tab2 = st.tabs(["Model evaluation", "Image Analysis"])
|
| 87 |
+
|
| 88 |
+
with tab1:
|
| 89 |
+
st.subheader("Model Performance")
|
| 90 |
+
st.title("Model Training Results")
|
| 91 |
+
st.image("assets/model_performance.jpg", caption="Training Progress Over Time", use_column_width=True)
|
| 92 |
+
with st.expander("Accuracy Analysis"):
|
| 93 |
+
st.markdown("""
|
| 94 |
+
- **Training Accuracy**: Shows how well the model learns from training data
|
| 95 |
+
- **Validation Accuracy**: Indicates real-world performance on unseen data
|
| 96 |
+
- **Ideal Scenario**: Both metrics should increase and stabilize at similar values
|
| 97 |
+
""")
|
| 98 |
+
with st.expander("Loss Analysis"):
|
| 99 |
+
st.markdown("""
|
| 100 |
+
- **Training Loss**: Measures error reduction during training
|
| 101 |
+
- **Validation Loss**: Tracks generalization error
|
| 102 |
+
- **Healthy Pattern**: Both should decrease steadily without significant divergence
|
| 103 |
+
""")
|
| 104 |
+
st.header("What This Means")
|
| 105 |
+
st.write("""
|
| 106 |
+
The model is learning properly without overfitting.
|
| 107 |
+
Both accuracy and loss show good progress.
|
| 108 |
+
You could stop training earlier when it stops improving.
|
| 109 |
+
""")
|
| 110 |
+
with tab2:
|
| 111 |
+
st.subheader("Image Analysis")
|
| 112 |
+
if 'uploaded_image' in st.session_state:
|
| 113 |
+
uploaded_image = st.session_state.uploaded_image
|
| 114 |
+
st.image(uploaded_image, caption="Uploaded Image", use_column_width=True)
|
| 115 |
+
try:
|
| 116 |
+
img = Image.open(io.BytesIO(uploaded_image))
|
| 117 |
+
img_array = np.array(img)
|
| 118 |
+
# Use visualizer for RGB histograms
|
| 119 |
+
st.subheader("RGB Color Histograms")
|
| 120 |
+
figs = self.visualizer.plot_rgb_histograms(img_array)
|
| 121 |
+
col1, col2, col3 = st.columns(3)
|
| 122 |
+
for i, col in enumerate([col1, col2, col3]):
|
| 123 |
+
with col:
|
| 124 |
+
st.pyplot(figs[i])
|
| 125 |
+
# Visualization selector
|
| 126 |
+
analysis_type = st.selectbox(
|
| 127 |
+
"Select Analysis Type",
|
| 128 |
+
["Image Statistics", "Edge Detection", "Intensity Map"]
|
| 129 |
+
)
|
| 130 |
+
if analysis_type == "Image Statistics":
|
| 131 |
+
stats = self.visualizer.image_statistics(img_array)
|
| 132 |
+
for key, value in stats.items():
|
| 133 |
+
st.write(f"**{key}:** {value}")
|
| 134 |
+
elif analysis_type == "Edge Detection":
|
| 135 |
+
edges = self.visualizer.edge_detection(img_array)
|
| 136 |
+
st.image(edges, caption="Edge Detection", use_column_width=True)
|
| 137 |
+
elif analysis_type == "Intensity Map":
|
| 138 |
+
fig = self.visualizer.intensity_map(img_array)
|
| 139 |
+
st.plotly_chart(fig)
|
| 140 |
+
except Exception as e:
|
| 141 |
+
st.error(f"Error processing image: {str(e)}")
|
| 142 |
+
else:
|
| 143 |
+
st.warning("Please upload an image in the main page first.")
|
| 144 |
+
|
| 145 |
+
def classes_page(self):
|
| 146 |
+
"""Display detailed information about each class"""
|
| 147 |
+
st.title("Land Cover Classes")
|
| 148 |
+
|
| 149 |
+
# List of (class_name, description)
|
| 150 |
+
class_info = [
|
| 151 |
+
("AnnualCrop", "Agricultural areas where crops are planted and harvested within a single year."),
|
| 152 |
+
("Forest", "Areas dominated by trees, forming a continuous canopy."),
|
| 153 |
+
("HerbaceousVegetation", "Areas covered by non-woody plants and grasses."),
|
| 154 |
+
("Highway", "Major roads and transportation infrastructure."),
|
| 155 |
+
("Industrial", "Areas containing factories, warehouses, and industrial facilities."),
|
| 156 |
+
("Pasture", "Land used for grazing livestock."),
|
| 157 |
+
("PermanentCrop", "Agricultural areas with long-term crops like orchards and vineyards."),
|
| 158 |
+
("Residential", "Areas containing houses and residential buildings."),
|
| 159 |
+
("River", "Natural watercourses and their immediate surroundings."),
|
| 160 |
+
("SeaLake", "Large bodies of water including seas and lakes.")
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
cols = st.columns(2)
|
| 164 |
+
for idx, (name, desc) in enumerate(class_info):
|
| 165 |
+
with cols[idx % 2]:
|
| 166 |
+
with st.expander(f"{name}"):
|
| 167 |
+
st.write(desc)
|
| 168 |
+
|
| 169 |
+
# Initialize and run the app
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
app = WebApp()
|
| 172 |
+
|
| 173 |
+
# Navigation
|
| 174 |
+
st.sidebar.title("Navigation")
|
| 175 |
+
page = st.sidebar.radio("Go to", ["Home", "Charts", "Classes"])
|
| 176 |
+
|
| 177 |
+
if page == "Home":
|
| 178 |
+
app.main_page()
|
| 179 |
+
elif page == "Charts":
|
| 180 |
+
app.charts_page()
|
| 181 |
+
elif page == "Classes":
|
| 182 |
+
app.classes_page()
|
config.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration file for the Land Cover Classification System
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# EuroSAT class names
|
| 6 |
+
CLASS_NAMES = {
|
| 7 |
+
'0': 'AnnualCrop',
|
| 8 |
+
'1': 'Forest',
|
| 9 |
+
'2': 'HerbaceousVegetation',
|
| 10 |
+
'3': 'Highway',
|
| 11 |
+
'4': 'Industrial',
|
| 12 |
+
'5': 'Pasture',
|
| 13 |
+
'6': 'PermanentCrop',
|
| 14 |
+
'7': 'Residential',
|
| 15 |
+
'8': 'River',
|
| 16 |
+
'9': 'SeaLake'
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
# Model configuration
|
| 20 |
+
MODEL_CONFIG = {
|
| 21 |
+
'input_shape': (64, 64, 3),
|
| 22 |
+
'model_path': 'models/ResNet50_eurosat.h5',
|
| 23 |
+
'best_model_path': 'models/model.weights.best.keras',
|
| 24 |
+
'indices_path': 'models/class_indices.npy'
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
# Data processing configuration
|
| 28 |
+
DATA_CONFIG = {
|
| 29 |
+
'allowed_formats': ['png', 'jpg', 'jpeg', 'tiff'],
|
| 30 |
+
'max_image_size': (1024, 1024) # Maximum dimensions for uploaded images
|
| 31 |
+
}
|
data_processor.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hashlib
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
| 7 |
+
from sklearn.model_selection import StratifiedShuffleSplit
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import io
|
| 10 |
+
|
| 11 |
+
class DataProcessor:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.input_shape = (64, 64, 3) # Default input shape for EuroSAT
|
| 14 |
+
|
| 15 |
+
def preprocess_for_inference(self, image):
|
| 16 |
+
"""Preprocess a single image for model inference"""
|
| 17 |
+
if isinstance(image, bytes):
|
| 18 |
+
# Convert bytes to PIL Image
|
| 19 |
+
image = Image.open(io.BytesIO(image))
|
| 20 |
+
|
| 21 |
+
# Convert RGBA to RGB if necessary
|
| 22 |
+
if image.mode == 'RGBA':
|
| 23 |
+
image = image.convert('RGB')
|
| 24 |
+
|
| 25 |
+
# Convert PIL Image to numpy array
|
| 26 |
+
image = np.array(image)
|
| 27 |
+
|
| 28 |
+
# Convert to float32 and normalize
|
| 29 |
+
image = image.astype(np.float32) / 255.0
|
| 30 |
+
|
| 31 |
+
# Resize image
|
| 32 |
+
image = tf.image.resize(image, (self.input_shape[0], self.input_shape[1]))
|
| 33 |
+
|
| 34 |
+
# Add batch dimension
|
| 35 |
+
image = tf.expand_dims(image, 0)
|
| 36 |
+
|
| 37 |
+
return image
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError("Input must be bytes (image file content)")
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def check_image_size(image_path):
|
| 43 |
+
"""Check dimensions of an image file"""
|
| 44 |
+
with Image.open(image_path) as img:
|
| 45 |
+
return img.size
|
| 46 |
+
|
| 47 |
+
@staticmethod
|
| 48 |
+
def check_image_dimensions(dataset_path):
|
| 49 |
+
"""Check dimensions of all images in dataset"""
|
| 50 |
+
all_dimensions = set()
|
| 51 |
+
for folder in os.listdir(dataset_path):
|
| 52 |
+
class_path = os.path.join(dataset_path, folder)
|
| 53 |
+
if os.path.isdir(class_path):
|
| 54 |
+
for image_name in os.listdir(class_path):
|
| 55 |
+
image_path = os.path.join(class_path, image_name)
|
| 56 |
+
width, height = DataProcessor.check_image_size(image_path)
|
| 57 |
+
all_dimensions.add((width, height))
|
| 58 |
+
return all_dimensions
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def get_data_generators():
|
| 62 |
+
"""Get data generators for training and validation"""
|
| 63 |
+
train_gen = ImageDataGenerator(
|
| 64 |
+
rescale=1./255,
|
| 65 |
+
rotation_range=60,
|
| 66 |
+
width_shift_range=0.2,
|
| 67 |
+
height_shift_range=0.2,
|
| 68 |
+
shear_range=0.2,
|
| 69 |
+
zoom_range=0.2,
|
| 70 |
+
horizontal_flip=True,
|
| 71 |
+
vertical_flip=True
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
test_gen = ImageDataGenerator(rescale=1./255)
|
| 75 |
+
|
| 76 |
+
return train_gen, test_gen
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
def get_image_hash(image_path):
|
| 80 |
+
"""Calculate MD5 hash of an image file"""
|
| 81 |
+
with open(image_path, "rb") as f:
|
| 82 |
+
return hashlib.md5(f.read()).hexdigest()
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def check_duplicates(dataset_path):
|
| 86 |
+
"""Find duplicate images in dataset"""
|
| 87 |
+
seen_hashes = set()
|
| 88 |
+
duplicates = []
|
| 89 |
+
for folder in os.listdir(dataset_path):
|
| 90 |
+
class_path = os.path.join(dataset_path, folder)
|
| 91 |
+
if os.path.isdir(class_path):
|
| 92 |
+
for image_name in os.listdir(class_path):
|
| 93 |
+
image_path = os.path.join(class_path, image_name)
|
| 94 |
+
img_hash = DataProcessor.get_image_hash(image_path)
|
| 95 |
+
if img_hash in seen_hashes:
|
| 96 |
+
duplicates.append(image_path)
|
| 97 |
+
else:
|
| 98 |
+
seen_hashes.add(img_hash)
|
| 99 |
+
return duplicates
|
model_handler.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
from tensorflow.keras.models import load_model, Model
|
| 3 |
+
from tensorflow.keras.applications import ResNet50
|
| 4 |
+
from tensorflow.keras.layers import Flatten, Dense, Dropout
|
| 5 |
+
from tensorflow.keras.optimizers import Adam
|
| 6 |
+
import numpy as np
|
| 7 |
+
import os
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import io
|
| 10 |
+
from config import CLASS_NAMES, MODEL_CONFIG
|
| 11 |
+
|
| 12 |
+
class ModelHandler:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.model = None
|
| 15 |
+
self.class_indices = CLASS_NAMES
|
| 16 |
+
self.input_shape = MODEL_CONFIG['input_shape']
|
| 17 |
+
|
| 18 |
+
def load_model(self, model_path=MODEL_CONFIG['model_path']):
|
| 19 |
+
"""Load the pre-trained ResNet50 model"""
|
| 20 |
+
try:
|
| 21 |
+
self.model = load_model(model_path)
|
| 22 |
+
return True
|
| 23 |
+
except Exception as e:
|
| 24 |
+
print(f"Error loading model: {str(e)}")
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
def load_class_indices(self, indices_path=MODEL_CONFIG['indices_path']):
|
| 28 |
+
"""Load class indices mapping"""
|
| 29 |
+
try:
|
| 30 |
+
# Load indices from file if it exists
|
| 31 |
+
if os.path.exists(indices_path):
|
| 32 |
+
loaded_indices = np.load(indices_path, allow_pickle=True).item()
|
| 33 |
+
# Update class indices with loaded values
|
| 34 |
+
self.class_indices.update(loaded_indices)
|
| 35 |
+
return True
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error loading class indices: {str(e)}")
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
def preprocess_image(self, image):
|
| 41 |
+
"""Preprocess image for model input"""
|
| 42 |
+
if isinstance(image, bytes):
|
| 43 |
+
# Convert bytes to PIL Image
|
| 44 |
+
image = Image.open(io.BytesIO(image))
|
| 45 |
+
|
| 46 |
+
# Convert RGBA to RGB if necessary
|
| 47 |
+
if image.mode == 'RGBA':
|
| 48 |
+
image = image.convert('RGB')
|
| 49 |
+
|
| 50 |
+
# Convert PIL Image to numpy array
|
| 51 |
+
image = np.array(image)
|
| 52 |
+
|
| 53 |
+
# Convert to float32 and normalize
|
| 54 |
+
image = image.astype(np.float32) / 255.0
|
| 55 |
+
|
| 56 |
+
# Resize image
|
| 57 |
+
image = tf.image.resize(image, (self.input_shape[0], self.input_shape[1]))
|
| 58 |
+
|
| 59 |
+
# Add batch dimension
|
| 60 |
+
image = tf.expand_dims(image, 0)
|
| 61 |
+
|
| 62 |
+
return image
|
| 63 |
+
else:
|
| 64 |
+
raise ValueError("Input must be bytes (image file content)")
|
| 65 |
+
|
| 66 |
+
def predict(self, image):
|
| 67 |
+
"""Make prediction on input image"""
|
| 68 |
+
if self.model is None:
|
| 69 |
+
raise ValueError("Model not loaded. Call load_model() first.")
|
| 70 |
+
|
| 71 |
+
# Preprocess image
|
| 72 |
+
processed_image = self.preprocess_image(image)
|
| 73 |
+
|
| 74 |
+
# Get prediction
|
| 75 |
+
predictions = self.model.predict(processed_image)
|
| 76 |
+
|
| 77 |
+
# Get top prediction
|
| 78 |
+
top_pred_idx = np.argmax(predictions[0])
|
| 79 |
+
confidence = predictions[0][top_pred_idx]
|
| 80 |
+
|
| 81 |
+
# Get class name from indices
|
| 82 |
+
class_name = self.class_indices.get(str(top_pred_idx), f"Class_{top_pred_idx}")
|
| 83 |
+
|
| 84 |
+
return {
|
| 85 |
+
'class_name': class_name,
|
| 86 |
+
'confidence': float(confidence),
|
| 87 |
+
'all_predictions': predictions[0].tolist()
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def compile_model(input_shape, n_classes, optimizer, fine_tune=None):
|
| 92 |
+
"""Compile a new ResNet50 model (for training)"""
|
| 93 |
+
conv_base = ResNet50(include_top=False,
|
| 94 |
+
weights='imagenet',
|
| 95 |
+
input_shape=input_shape)
|
| 96 |
+
|
| 97 |
+
top_model = conv_base.output
|
| 98 |
+
top_model = Flatten()(top_model)
|
| 99 |
+
top_model = Dense(2048, activation='relu')(top_model)
|
| 100 |
+
top_model = Dropout(0.2)(top_model)
|
| 101 |
+
output_layer = Dense(n_classes, activation='softmax')(top_model)
|
| 102 |
+
|
| 103 |
+
model = Model(inputs=conv_base.input, outputs=output_layer)
|
| 104 |
+
|
| 105 |
+
if isinstance(fine_tune, int):
|
| 106 |
+
for layer in conv_base.layers[fine_tune:]:
|
| 107 |
+
layer.trainable = True
|
| 108 |
+
else:
|
| 109 |
+
for layer in conv_base.layers:
|
| 110 |
+
layer.trainable = False
|
| 111 |
+
|
| 112 |
+
model.compile(optimizer=optimizer,
|
| 113 |
+
loss='categorical_crossentropy',
|
| 114 |
+
metrics=['categorical_accuracy'])
|
| 115 |
+
|
| 116 |
+
return model
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tensorflow>=2.0.0
|
| 2 |
+
streamlit>=1.0.0
|
| 3 |
+
plotly>=4.14.3
|
| 4 |
+
pandas>=1.2.0
|
| 5 |
+
numpy>=1.19.2
|
| 6 |
+
Pillow>=8.0.0
|
| 7 |
+
scikit-learn>=0.24.0
|
| 8 |
+
seaborn>=0.11.0
|
| 9 |
+
matplotlib>=3.3.0
|
| 10 |
+
python-dotenv>=0.19.0
|
| 11 |
+
tqdm>=4.62.0
|
| 12 |
+
opencv-python>=4.5.0
|
visualizer.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import seaborn as sns
|
| 3 |
+
import numpy as np
|
| 4 |
+
import plotly.express as px
|
| 5 |
+
import cv2
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import io
|
| 9 |
+
import streamlit as st
|
| 10 |
+
|
| 11 |
+
class Visualizer:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
def plot_confusion_matrix(self, cm, labels):
|
| 16 |
+
plt.figure(figsize=(10, 8))
|
| 17 |
+
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)
|
| 18 |
+
plt.title('Confusion Matrix')
|
| 19 |
+
plt.ylabel('True Label')
|
| 20 |
+
plt.xlabel('Predicted Label')
|
| 21 |
+
return plt
|
| 22 |
+
|
| 23 |
+
def plot_training_history(self, history):
|
| 24 |
+
plt.figure(figsize=(12, 4))
|
| 25 |
+
|
| 26 |
+
plt.subplot(1, 2, 1)
|
| 27 |
+
plt.plot(history.history['accuracy'], label='Training Accuracy')
|
| 28 |
+
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
|
| 29 |
+
plt.title('Model Accuracy')
|
| 30 |
+
plt.xlabel('Epoch')
|
| 31 |
+
plt.ylabel('Accuracy')
|
| 32 |
+
plt.legend()
|
| 33 |
+
|
| 34 |
+
plt.subplot(1, 2, 2)
|
| 35 |
+
plt.plot(history.history['loss'], label='Training Loss')
|
| 36 |
+
plt.plot(history.history['val_loss'], label='Validation Loss')
|
| 37 |
+
plt.title('Model Loss')
|
| 38 |
+
plt.xlabel('Epoch')
|
| 39 |
+
plt.ylabel('Loss')
|
| 40 |
+
plt.legend()
|
| 41 |
+
|
| 42 |
+
return plt
|
| 43 |
+
|
| 44 |
+
def plot_confidence_bar(self, class_names, predictions):
|
| 45 |
+
df = pd.DataFrame({
|
| 46 |
+
'Class': class_names,
|
| 47 |
+
'Confidence': predictions
|
| 48 |
+
}).sort_values('Confidence', ascending=False)
|
| 49 |
+
fig = px.bar(df,
|
| 50 |
+
x='Class',
|
| 51 |
+
y='Confidence',
|
| 52 |
+
title='Classification Confidence Scores',
|
| 53 |
+
labels={'Confidence': 'Confidence Score'},
|
| 54 |
+
color='Confidence',
|
| 55 |
+
color_continuous_scale='Viridis')
|
| 56 |
+
fig.update_layout(
|
| 57 |
+
xaxis_title="Land Cover Class",
|
| 58 |
+
yaxis_title="Confidence Score",
|
| 59 |
+
yaxis_tickformat='.1%',
|
| 60 |
+
showlegend=False
|
| 61 |
+
)
|
| 62 |
+
return fig
|
| 63 |
+
|
| 64 |
+
def plot_rgb_histograms(self, img_array):
|
| 65 |
+
colors = ['Red', 'Green', 'Blue']
|
| 66 |
+
figs = []
|
| 67 |
+
for i, color in enumerate(colors):
|
| 68 |
+
fig, ax = plt.subplots()
|
| 69 |
+
histogram = np.histogram(img_array[:,:,i], bins=256, range=(0,256))[0]
|
| 70 |
+
ax.plot(histogram, color=color.lower(), alpha=0.8)
|
| 71 |
+
ax.set_title(f"{color} Channel")
|
| 72 |
+
ax.set_xlabel("Pixel Intensity")
|
| 73 |
+
ax.set_ylabel("Frequency")
|
| 74 |
+
figs.append(fig)
|
| 75 |
+
return figs
|
| 76 |
+
|
| 77 |
+
def image_statistics(self, img_array):
|
| 78 |
+
stats = {
|
| 79 |
+
"Mean Brightness": float(np.mean(img_array)),
|
| 80 |
+
"Standard Deviation": float(np.std(img_array)),
|
| 81 |
+
"Min Value": int(np.min(img_array)),
|
| 82 |
+
"Max Value": int(np.max(img_array)),
|
| 83 |
+
"Image Size": f"{img_array.shape[1]}x{img_array.shape[0]}",
|
| 84 |
+
"Channels": img_array.shape[2]
|
| 85 |
+
}
|
| 86 |
+
return stats
|
| 87 |
+
|
| 88 |
+
def edge_detection(self, img_array):
|
| 89 |
+
gray = np.mean(img_array, axis=2).astype(np.uint8)
|
| 90 |
+
edges = cv2.Canny(gray, 100, 200)
|
| 91 |
+
return edges
|
| 92 |
+
|
| 93 |
+
def intensity_map(self, img_array):
|
| 94 |
+
gray = np.mean(img_array, axis=2)
|
| 95 |
+
fig = px.imshow(gray,
|
| 96 |
+
title="Intensity Map",
|
| 97 |
+
color_continuous_scale='viridis')
|
| 98 |
+
return fig
|