Ashkan Taghipour (The University of Western Australia) commited on
Commit
f5648f5
·
0 Parent(s):

Initial commit

Browse files
Files changed (14) hide show
  1. .gitignore +66 -0
  2. .streamlit/config.toml +23 -0
  3. Dockerfile +27 -0
  4. LICENSE +21 -0
  5. README.md +9 -0
  6. app.py +636 -0
  7. assets/style.css +328 -0
  8. data/examples.json +70 -0
  9. requirements.txt +33 -0
  10. src/__init__.py +0 -0
  11. src/analysis.py +742 -0
  12. src/report.py +357 -0
  13. src/stac_utils.py +752 -0
  14. src/visualization.py +1103 -0
.gitignore ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual environments
24
+ env/
25
+ venv/
26
+ ENV/
27
+ .venv/
28
+ rehabwatch_env/
29
+
30
+ # IDE
31
+ .idea/
32
+ .vscode/
33
+ *.swp
34
+ *.swo
35
+ *~
36
+
37
+ # Jupyter
38
+ .ipynb_checkpoints/
39
+
40
+ # Testing
41
+ .pytest_cache/
42
+ .coverage
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+
47
+ # Streamlit
48
+ .streamlit/secrets.toml
49
+
50
+ # Logs
51
+ *.log
52
+ streamlit_*.log
53
+ nohup.out
54
+
55
+ # OS
56
+ .DS_Store
57
+ Thumbs.db
58
+
59
+ # Project specific
60
+ *.pdf
61
+ *.csv
62
+ *.pptx
63
+ CLAUDE.md
64
+ pitch_deck_generator.py
65
+ runtime.txt
66
+ tests/
.streamlit/config.toml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor = "#2E7D32"
3
+ backgroundColor = "#FFFFFF"
4
+ secondaryBackgroundColor = "#F5F5F5"
5
+ textColor = "#212121"
6
+
7
+ [server]
8
+ maxUploadSize = 50
9
+ enableCORS = false
10
+ enableXsrfProtection = true
11
+ headless = true
12
+ runOnSave = false
13
+
14
+ [browser]
15
+ gatherUsageStats = false
16
+
17
+ [client]
18
+ showErrorDetails = false
19
+ toolbarMode = "minimal"
20
+
21
+ [runner]
22
+ magicEnabled = true
23
+ fastReruns = true
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies for GDAL
6
+ RUN apt-get update && apt-get install -y \
7
+ gdal-bin \
8
+ libgdal-dev \
9
+ libgeos-dev \
10
+ libproj-dev \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Set GDAL environment variables
14
+ ENV GDAL_CONFIG=/usr/bin/gdal-config
15
+
16
+ # Copy requirements first for caching
17
+ COPY requirements.txt .
18
+ RUN pip install --no-cache-dir -r requirements.txt
19
+
20
+ # Copy the rest of the application
21
+ COPY . .
22
+
23
+ # Expose Streamlit port
24
+ EXPOSE 7860
25
+
26
+ # Run Streamlit
27
+ CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0", "--server.headless=true"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 RehabWatch
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MineWatchAI
3
+ emoji: 🤖
4
+ colorFrom: green
5
+ colorTo: blue
6
+ sdk: docker
7
+ pinned: false
8
+ license: mit
9
+ ---
app.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MineWatchAI - AI-Powered Mining Rehabilitation Monitoring
3
+ Main Streamlit Application
4
+
5
+ AI-powered environmental intelligence for measurable,
6
+ audit-ready mining rehabilitation.
7
+ """
8
+
9
+ import streamlit as st
10
+ import json
11
+ import gc
12
+ from datetime import datetime, timedelta
13
+ from pathlib import Path
14
+
15
+ # Page configuration - MUST be first Streamlit command
16
+ st.set_page_config(
17
+ page_title="MineWatchAI - Green Tech",
18
+ page_icon="🤖",
19
+ layout="wide",
20
+ initial_sidebar_state="expanded"
21
+ )
22
+
23
+ # Show loading message while imports happen
24
+ with st.spinner("Loading MineWatchAI..."):
25
+ # Lazy imports for faster initial load
26
+ from streamlit_folium import st_folium
27
+ import folium
28
+
29
+
30
+ def load_css():
31
+ """Load custom CSS styling."""
32
+ css_path = Path(__file__).parent / "assets" / "style.css"
33
+ if css_path.exists():
34
+ with open(css_path) as f:
35
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
36
+
37
+
38
+ @st.cache_data
39
+ def load_examples():
40
+ """Load example mines data from JSON file."""
41
+ json_path = Path(__file__).parent / "data" / "examples.json"
42
+ if json_path.exists():
43
+ with open(json_path) as f:
44
+ return json.load(f)
45
+ return {"examples": [], "default_location": {"center": [-28.0, 121.0], "zoom": 6}}
46
+
47
+
48
+ def get_bbox_from_example(example: dict) -> tuple:
49
+ """Get bounding box from example mine data."""
50
+ geom = example.get("geometry", {})
51
+ coords = geom.get("coordinates", [])
52
+ if len(coords) == 4:
53
+ return tuple(coords)
54
+ else:
55
+ center = example.get("center", [-28.0, 121.0])
56
+ lat, lon = center
57
+ return (lon - 0.05, lat - 0.05, lon + 0.05, lat + 0.05)
58
+
59
+
60
+ def create_simple_map(center, zoom=10, bbox=None):
61
+ """Create a simple Folium map."""
62
+ m = folium.Map(location=center, zoom_start=zoom)
63
+
64
+ # Add satellite imagery
65
+ folium.TileLayer(
66
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
67
+ attr='Esri',
68
+ name='Satellite',
69
+ overlay=False
70
+ ).add_to(m)
71
+
72
+ if bbox is not None:
73
+ min_lon, min_lat, max_lon, max_lat = bbox
74
+ boundary_coords = [
75
+ [min_lat, min_lon],
76
+ [min_lat, max_lon],
77
+ [max_lat, max_lon],
78
+ [max_lat, min_lon],
79
+ [min_lat, min_lon]
80
+ ]
81
+ folium.Polygon(
82
+ locations=boundary_coords,
83
+ color='#1B5E20',
84
+ weight=3,
85
+ fill=True,
86
+ fillColor='#2E7D32',
87
+ fillOpacity=0.2,
88
+ popup='Analysis Area'
89
+ ).add_to(m)
90
+
91
+ folium.LayerControl().add_to(m)
92
+ return m
93
+
94
+
95
+ def run_analysis(bbox, mine_info, date_before, date_after):
96
+ """Run the comprehensive vegetation change analysis."""
97
+ # Import heavy modules only when needed
98
+ from src.stac_utils import get_bbox_center
99
+ from src.analysis import (
100
+ analyze_vegetation_change,
101
+ analyze_terrain,
102
+ analyze_land_cover,
103
+ calculate_reference_ndvi,
104
+ calculate_rehab_score,
105
+ calculate_comprehensive_rehab_score,
106
+ generate_interpretation
107
+ )
108
+
109
+ progress = st.progress(0, text="Initializing analysis...")
110
+
111
+ try:
112
+ progress.progress(10, text="Searching for satellite imagery...")
113
+
114
+ # Run vegetation change analysis with all indices
115
+ results = analyze_vegetation_change(
116
+ bbox,
117
+ date_before,
118
+ date_after,
119
+ window_days=15,
120
+ cloud_threshold=30
121
+ )
122
+
123
+ progress.progress(50, text="Analyzing terrain...")
124
+
125
+ # Run terrain analysis
126
+ bsi_after = results.get('indices_after', {}).get('bsi')
127
+ terrain_results = analyze_terrain(bbox, bsi=bsi_after)
128
+ terrain_stats = terrain_results.get('stats', {})
129
+
130
+ progress.progress(70, text="Analyzing land cover...")
131
+
132
+ # Run land cover analysis
133
+ year_before = int(date_before[:4])
134
+ year_after = int(date_after[:4])
135
+ # Clamp years to available data range (2017-2023)
136
+ year_before = max(2017, min(2023, year_before))
137
+ year_after = max(2017, min(2023, year_after))
138
+
139
+ land_cover_results = analyze_land_cover(bbox, year_before, year_after)
140
+ land_cover_stats = land_cover_results.get('stats', {})
141
+
142
+ progress.progress(85, text="Calculating rehabilitation metrics...")
143
+
144
+ # Calculate reference NDVI
145
+ reference_ndvi = calculate_reference_ndvi(
146
+ bbox,
147
+ date_after,
148
+ window_days=15,
149
+ buffer_deg=0.01
150
+ )
151
+
152
+ # Calculate comprehensive rehabilitation scores
153
+ rehab_scores = calculate_comprehensive_rehab_score(
154
+ results['stats'],
155
+ terrain_stats=terrain_stats,
156
+ land_cover_stats=land_cover_stats,
157
+ reference_ndvi=reference_ndvi if reference_ndvi > 0 else 0.5
158
+ )
159
+
160
+ # Legacy score for backwards compatibility
161
+ site_ndvi = results['stats']['ndvi_after_mean']
162
+ rehab_score = calculate_rehab_score(site_ndvi, reference_ndvi)
163
+
164
+ progress.progress(95, text="Generating interpretation...")
165
+
166
+ # Generate comprehensive interpretation
167
+ interpretation = generate_interpretation(
168
+ results['stats'],
169
+ rehab_score,
170
+ terrain_stats=terrain_stats,
171
+ land_cover_stats=land_cover_stats
172
+ )
173
+
174
+ # Get center coordinates
175
+ center = get_bbox_center(bbox)
176
+
177
+ progress.progress(100, text="Analysis complete!")
178
+
179
+ # Store only necessary results (exclude large arrays to save memory)
180
+ st.session_state.analysis_results = {
181
+ 'ndvi_before': results['ndvi_before'],
182
+ 'ndvi_after': results['ndvi_after'],
183
+ 'ndvi_change': results['ndvi_change'],
184
+ 'indices_after': results.get('indices_after', {}),
185
+ 'index_changes': results.get('index_changes', {}),
186
+ 'stats': results['stats'],
187
+ 'date_before': results['date_before'],
188
+ 'date_after': results['date_after'],
189
+ 'bbox': results['bbox'],
190
+ 'terrain_results': terrain_results,
191
+ 'terrain_stats': terrain_stats,
192
+ 'land_cover_results': land_cover_results,
193
+ 'land_cover_stats': land_cover_stats,
194
+ 'rehab_score': rehab_score,
195
+ 'rehab_scores': rehab_scores,
196
+ 'reference_ndvi': reference_ndvi,
197
+ 'interpretation': interpretation,
198
+ 'mine_info': mine_info,
199
+ 'center': center
200
+ }
201
+
202
+ # Clean up memory
203
+ del results
204
+ gc.collect()
205
+
206
+ return True
207
+
208
+ except ValueError as e:
209
+ st.error(f"Analysis Error: {str(e)}")
210
+ return False
211
+ except Exception as e:
212
+ st.error(f"Unexpected error: {str(e)}")
213
+ st.info("Try adjusting the date range or selecting a different site.")
214
+ return False
215
+
216
+
217
+ def display_results():
218
+ """Display comprehensive analysis results."""
219
+ # Import visualization only when showing results
220
+ from src.visualization import (
221
+ create_comparison_map,
222
+ create_multi_index_map,
223
+ create_terrain_map,
224
+ create_land_cover_map,
225
+ create_comprehensive_stats_display,
226
+ create_area_breakdown_chart,
227
+ create_ndvi_comparison_chart,
228
+ create_multi_index_chart,
229
+ create_terrain_stats_chart,
230
+ create_land_cover_chart,
231
+ create_vegetation_health_chart,
232
+ create_environmental_indicators_chart,
233
+ create_statistics_table,
234
+ create_time_series_chart
235
+ )
236
+ from src.report import generate_pdf_report, stats_to_csv
237
+
238
+ results = st.session_state.analysis_results
239
+ stats = results['stats']
240
+ rehab_score = results['rehab_score']
241
+ rehab_scores = results.get('rehab_scores', {})
242
+ interpretation = results['interpretation']
243
+ mine_info = results.get('mine_info', {})
244
+ terrain_stats = results.get('terrain_stats', {})
245
+ land_cover_stats = results.get('land_cover_stats', {})
246
+ terrain_results = results.get('terrain_results', {})
247
+ land_cover_results = results.get('land_cover_results', {})
248
+
249
+ mine_name = mine_info.get('name', 'Selected Site')
250
+ tenement_id = mine_info.get('tenement_id', 'N/A')
251
+
252
+ st.markdown(f"### Analysis Results: {mine_name}")
253
+ st.markdown(f"*Tenement: {tenement_id} | Period: {results['date_before']} to {results['date_after']}*")
254
+
255
+ if st.button("🔄 New Analysis", type="secondary"):
256
+ st.session_state.analysis_results = None
257
+ st.rerun()
258
+
259
+ st.markdown("---")
260
+
261
+ # Map section with tabs for different views
262
+ st.markdown("### Maps")
263
+ map_tab1, map_tab2, map_tab3, map_tab4 = st.tabs([
264
+ "Vegetation Change", "Multi-Index", "Terrain", "Land Cover"
265
+ ])
266
+
267
+ with map_tab1:
268
+ with st.spinner("Rendering vegetation change map..."):
269
+ comparison_map = create_comparison_map(
270
+ bbox=results['bbox'],
271
+ ndvi_before=results['ndvi_before'],
272
+ ndvi_after=results['ndvi_after'],
273
+ ndvi_change=results['ndvi_change'],
274
+ center_coords=results['center'],
275
+ zoom=mine_info.get('zoom', 13)
276
+ )
277
+ st_folium(comparison_map, width=None, height=450, returned_objects=[])
278
+ st.caption("Green = improvement, Red = decline. Use layer control to toggle views.")
279
+
280
+ with map_tab2:
281
+ with st.spinner("Rendering multi-index map..."):
282
+ indices_after = results.get('indices_after', {})
283
+ index_changes = results.get('index_changes', {})
284
+ if indices_after:
285
+ multi_map = create_multi_index_map(
286
+ bbox=results['bbox'],
287
+ indices_after=indices_after,
288
+ index_changes=index_changes,
289
+ center_coords=results['center'],
290
+ zoom=mine_info.get('zoom', 13)
291
+ )
292
+ st_folium(multi_map, width=None, height=450, returned_objects=[])
293
+ st.caption("Toggle layers to view different indices: NDVI, SAVI, EVI, BSI, NDWI, NDMI")
294
+ else:
295
+ st.info("Multi-index data not available")
296
+
297
+ with map_tab3:
298
+ if terrain_results and 'slope' in terrain_results:
299
+ with st.spinner("Rendering terrain map..."):
300
+ terrain_map = create_terrain_map(
301
+ bbox=results['bbox'],
302
+ slope=terrain_results['slope'],
303
+ aspect=terrain_results.get('aspect'),
304
+ erosion_risk=terrain_results.get('erosion_risk'),
305
+ center_coords=results['center'],
306
+ zoom=mine_info.get('zoom', 13)
307
+ )
308
+ st_folium(terrain_map, width=None, height=450, returned_objects=[])
309
+ st.caption("Slope analysis from Copernicus DEM GLO-30")
310
+ else:
311
+ st.info("Terrain data not available for this location")
312
+
313
+ with map_tab4:
314
+ if land_cover_results and 'lulc_after' in land_cover_results:
315
+ with st.spinner("Rendering land cover map..."):
316
+ lulc_map = create_land_cover_map(
317
+ bbox=results['bbox'],
318
+ lulc=land_cover_results['lulc_after'],
319
+ center_coords=results['center'],
320
+ zoom=mine_info.get('zoom', 13),
321
+ year=land_cover_stats.get('year_after', 2023)
322
+ )
323
+ st_folium(lulc_map, width=None, height=450, returned_objects=[])
324
+ st.caption("IO-LULC land cover classification")
325
+ else:
326
+ st.info("Land cover data not available for this location")
327
+
328
+ st.markdown("---")
329
+
330
+ # Main analysis tabs
331
+ tab1, tab2, tab3, tab4, tab5 = st.tabs([
332
+ "Summary", "All Indices", "Terrain & Land Cover", "Time Series", "Export"
333
+ ])
334
+
335
+ with tab1:
336
+ # Use comprehensive stats display
337
+ create_comprehensive_stats_display(
338
+ stats, rehab_score,
339
+ terrain_stats=terrain_stats,
340
+ land_cover_stats=land_cover_stats
341
+ )
342
+
343
+ st.markdown("---")
344
+ st.markdown("### Interpretation")
345
+ st.info(interpretation)
346
+
347
+ col1, col2 = st.columns(2)
348
+ with col1:
349
+ st.plotly_chart(create_area_breakdown_chart(stats), use_container_width=True)
350
+ with col2:
351
+ st.plotly_chart(create_vegetation_health_chart(stats), use_container_width=True)
352
+
353
+ # Environmental indicators radar chart
354
+ st.plotly_chart(create_environmental_indicators_chart(stats), use_container_width=True)
355
+
356
+ with tab2:
357
+ st.markdown("### Multi-Index Analysis")
358
+ st.markdown("""
359
+ Multiple vegetation and soil indices provide a comprehensive view:
360
+ - **NDVI**: Overall vegetation health
361
+ - **SAVI**: Better for sparse vegetation (soil-adjusted)
362
+ - **EVI**: Better for dense vegetation
363
+ - **NDWI**: Water presence
364
+ - **NDMI**: Vegetation moisture content
365
+ - **BSI**: Bare soil extent
366
+ """)
367
+
368
+ st.plotly_chart(create_multi_index_chart(stats), use_container_width=True)
369
+
370
+ # Detailed stats table
371
+ st.markdown("### Detailed Statistics")
372
+ create_statistics_table(stats)
373
+
374
+ with tab3:
375
+ col1, col2 = st.columns(2)
376
+
377
+ with col1:
378
+ st.markdown("### Terrain Analysis")
379
+ if terrain_stats:
380
+ st.plotly_chart(create_terrain_stats_chart(terrain_stats), use_container_width=True)
381
+
382
+ st.markdown("**Terrain Metrics:**")
383
+ st.write(f"- Mean Slope: {terrain_stats.get('slope_mean', 0):.1f}°")
384
+ st.write(f"- Max Slope: {terrain_stats.get('slope_max', 0):.1f}°")
385
+ st.write(f"- Elevation Range: {terrain_stats.get('elevation_min', 0):.0f}m - {terrain_stats.get('elevation_max', 0):.0f}m")
386
+ if 'percent_high_erosion_risk' in terrain_stats:
387
+ st.write(f"- High Erosion Risk: {terrain_stats['percent_high_erosion_risk']:.1f}%")
388
+ else:
389
+ st.info("Terrain analysis not available")
390
+
391
+ with col2:
392
+ st.markdown("### Land Cover Change")
393
+ if land_cover_stats and 'class_changes' in land_cover_stats:
394
+ st.plotly_chart(create_land_cover_chart(land_cover_stats), use_container_width=True)
395
+
396
+ st.markdown("**Land Cover Metrics:**")
397
+ st.write(f"- Vegetation Cover: {land_cover_stats.get('vegetation_cover_after', 0):.1f}%")
398
+ st.write(f"- Vegetation Change: {land_cover_stats.get('vegetation_cover_change', 0):+.1f}%")
399
+ st.write(f"- Bare Ground: {land_cover_stats.get('bare_ground_after', 0):.1f}%")
400
+ st.write(f"- Bare Ground Change: {land_cover_stats.get('bare_ground_change', 0):+.1f}%")
401
+ else:
402
+ st.info("Land cover analysis not available")
403
+
404
+ with tab4:
405
+ st.markdown("### Time Series")
406
+ st.info("Click button below to load historical NDVI data (may take a few minutes)")
407
+
408
+ if st.button("Load Time Series"):
409
+ from src.analysis import get_monthly_ndvi_timeseries
410
+
411
+ date_before = datetime.strptime(results['date_before'], '%Y-%m-%d')
412
+ date_after = datetime.strptime(results['date_after'], '%Y-%m-%d')
413
+
414
+ with st.spinner("Loading time series..."):
415
+ try:
416
+ timeseries = get_monthly_ndvi_timeseries(
417
+ results['bbox'],
418
+ date_before.year,
419
+ date_after.year
420
+ )
421
+ if timeseries:
422
+ fig = create_time_series_chart(timeseries)
423
+ st.plotly_chart(fig, use_container_width=True)
424
+ else:
425
+ st.warning("No time series data available.")
426
+ except Exception as e:
427
+ st.error(f"Error: {e}")
428
+
429
+ with tab5:
430
+ st.markdown("### Export Results")
431
+
432
+ col1, col2 = st.columns(2)
433
+
434
+ with col1:
435
+ st.markdown("#### PDF Report")
436
+ pdf_bytes = generate_pdf_report(
437
+ tenement_id=tenement_id,
438
+ stats=stats,
439
+ rehab_score=rehab_score,
440
+ interpretation=interpretation,
441
+ date_before=results['date_before'],
442
+ date_after=results['date_after'],
443
+ mine_name=mine_name
444
+ )
445
+ st.download_button(
446
+ "Download PDF",
447
+ data=pdf_bytes,
448
+ file_name=f"rehabwatch_{tenement_id.replace(' ', '_')}.pdf",
449
+ mime="application/pdf",
450
+ use_container_width=True
451
+ )
452
+
453
+ with col2:
454
+ st.markdown("#### CSV Data")
455
+ csv_data = stats_to_csv(
456
+ stats=stats,
457
+ tenement_id=tenement_id,
458
+ rehab_score=rehab_score,
459
+ date_before=results['date_before'],
460
+ date_after=results['date_after'],
461
+ mine_name=mine_name
462
+ )
463
+ st.download_button(
464
+ "Download CSV",
465
+ data=csv_data,
466
+ file_name=f"rehabwatch_{tenement_id.replace(' ', '_')}.csv",
467
+ mime="text/csv",
468
+ use_container_width=True
469
+ )
470
+
471
+
472
+ def main():
473
+ """Main application function."""
474
+ load_css()
475
+
476
+ examples_data = load_examples()
477
+ examples = examples_data.get("examples", [])
478
+ default_location = examples_data.get("default_location", {"center": [-28.0, 121.0], "zoom": 6})
479
+
480
+ # Initialize session state
481
+ if "analysis_results" not in st.session_state:
482
+ st.session_state.analysis_results = None
483
+ if "selected_bbox" not in st.session_state:
484
+ st.session_state.selected_bbox = None
485
+ if "selected_mine" not in st.session_state:
486
+ st.session_state.selected_mine = None
487
+ if "analyzing" not in st.session_state:
488
+ st.session_state.analyzing = False
489
+
490
+ # Sidebar
491
+ with st.sidebar:
492
+ st.markdown("## 🌱 Analysis Hub")
493
+ st.markdown("**MineWatchAI**")
494
+ st.markdown("---")
495
+
496
+ st.markdown("### Select Mining Site")
497
+
498
+ example_names = ["Select a mine..."] + [e["name"] for e in examples]
499
+ selected_name = st.selectbox("Choose a mine:", example_names)
500
+
501
+ if selected_name != "Select a mine...":
502
+ for example in examples:
503
+ if example["name"] == selected_name:
504
+ st.session_state.selected_mine = example
505
+ base_bbox = get_bbox_from_example(example)
506
+
507
+ # Area size adjustment
508
+ st.markdown("#### Adjust Analysis Area")
509
+ area_scale = st.slider(
510
+ "Area Size",
511
+ min_value=0.5,
512
+ max_value=2.0,
513
+ value=1.0,
514
+ step=0.1,
515
+ help="Adjust the analysis area: <1.0 = smaller, >1.0 = larger"
516
+ )
517
+
518
+ # Apply scaling to bbox
519
+ min_lon, min_lat, max_lon, max_lat = base_bbox
520
+ center_lon = (min_lon + max_lon) / 2
521
+ center_lat = (min_lat + max_lat) / 2
522
+ half_width = (max_lon - min_lon) / 2 * area_scale
523
+ half_height = (max_lat - min_lat) / 2 * area_scale
524
+
525
+ st.session_state.selected_bbox = (
526
+ center_lon - half_width,
527
+ center_lat - half_height,
528
+ center_lon + half_width,
529
+ center_lat + half_height
530
+ )
531
+
532
+ st.info(f"📌 {example['description']}")
533
+ break
534
+
535
+ st.markdown("---")
536
+ st.markdown("### Analysis Period")
537
+
538
+ col1, col2, col3 = st.columns(3)
539
+ today = datetime.now().date()
540
+
541
+ with col1:
542
+ if st.button("1Y", use_container_width=True):
543
+ st.session_state.date_before = today - timedelta(days=365)
544
+ st.session_state.date_after = today
545
+ with col2:
546
+ if st.button("2Y", use_container_width=True):
547
+ st.session_state.date_before = today - timedelta(days=730)
548
+ st.session_state.date_after = today
549
+ with col3:
550
+ if st.button("5Y", use_container_width=True):
551
+ st.session_state.date_before = today - timedelta(days=1825)
552
+ st.session_state.date_after = today
553
+
554
+ default_before = st.session_state.get("date_before", today - timedelta(days=730))
555
+ default_after = st.session_state.get("date_after", today)
556
+
557
+ date_before = st.date_input("Start Date", value=default_before, max_value=today)
558
+ date_after = st.date_input("End Date", value=default_after, max_value=today)
559
+
560
+ if date_after <= date_before:
561
+ st.error("End date must be after start date")
562
+
563
+ st.markdown("---")
564
+
565
+ analyze_disabled = st.session_state.selected_bbox is None or date_after <= date_before
566
+
567
+ if st.button("🤖 AI-Analyze", type="primary", use_container_width=True, disabled=analyze_disabled):
568
+ st.session_state.analyzing = True
569
+
570
+ if analyze_disabled:
571
+ st.caption("Select a mine and valid dates")
572
+
573
+ st.markdown("---")
574
+
575
+ with st.expander("ℹ️ About"):
576
+ st.markdown("""
577
+ **MineWatchAI** - AI-driven green technology
578
+ for sustainable mining rehabilitation.
579
+
580
+ ---
581
+
582
+ Developed by [Ashkan Taghipour](https://ashkantaghipour.github.io/)
583
+
584
+ ⚠️ *This is a research startup project currently
585
+ under active development.*
586
+ """)
587
+
588
+ # Main area
589
+ st.markdown("# 🤖 MineWatchAI")
590
+ st.markdown("*AI-powered environmental intelligence for measurable, audit-ready mining rehabilitation*")
591
+
592
+ # Handle analysis
593
+ if st.session_state.analyzing:
594
+ st.session_state.analyzing = False
595
+ success = run_analysis(
596
+ st.session_state.selected_bbox,
597
+ st.session_state.selected_mine,
598
+ str(date_before),
599
+ str(date_after)
600
+ )
601
+ if success:
602
+ st.rerun()
603
+
604
+ # Display results or initial map
605
+ if st.session_state.analysis_results is not None:
606
+ display_results()
607
+ else:
608
+ st.markdown("### Select a Mining Site")
609
+
610
+ if st.session_state.selected_bbox is not None:
611
+ mine = st.session_state.selected_mine
612
+ center = mine.get("center", default_location["center"])
613
+ zoom = mine.get("zoom", 12)
614
+
615
+ st.markdown(f"**Selected:** {mine['name']} ({mine['tenement_id']})")
616
+ m = create_simple_map(tuple(center), zoom, st.session_state.selected_bbox)
617
+ st_folium(m, width=None, height=400, returned_objects=[])
618
+ else:
619
+ st.info("👆 Select a mining site from the sidebar")
620
+ m = create_simple_map(tuple(default_location["center"]), default_location["zoom"])
621
+ st_folium(m, width=None, height=400, returned_objects=[])
622
+
623
+ st.markdown("### Available Sites")
624
+ cols = st.columns(len(examples))
625
+ for i, ex in enumerate(examples):
626
+ with cols[i]:
627
+ st.markdown(f"**{ex['name']}**")
628
+ st.caption(ex['tenement_id'])
629
+
630
+ # Footer
631
+ st.markdown("---")
632
+ st.caption("MineWatchAI - AI-driven green technology for sustainable mining rehabilitation.")
633
+
634
+
635
+ if __name__ == "__main__":
636
+ main()
assets/style.css ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* RehabWatch Custom Styles */
2
+
3
+ /* Main container styling */
4
+ .main .block-container {
5
+ padding-top: 2rem;
6
+ padding-bottom: 2rem;
7
+ max-width: 1200px;
8
+ }
9
+
10
+ /* Header styling */
11
+ h1 {
12
+ color: #2E7D32;
13
+ }
14
+
15
+ h2, h3 {
16
+ color: #1B5E20;
17
+ }
18
+
19
+ /* Sidebar styling */
20
+ .css-1d391kg {
21
+ background-color: #F5F5F5;
22
+ }
23
+
24
+ /* Sidebar header - Analysis Hub */
25
+ [data-testid="stSidebar"] > div:first-child {
26
+ padding-top: 1rem;
27
+ }
28
+
29
+ [data-testid="stSidebar"] h2 {
30
+ background: linear-gradient(135deg, #2E7D32, #4CAF50);
31
+ -webkit-background-clip: text;
32
+ -webkit-text-fill-color: transparent;
33
+ background-clip: text;
34
+ font-weight: 700;
35
+ }
36
+
37
+ /* Mobile sidebar toggle enhancement */
38
+ [data-testid="collapsedControl"] {
39
+ background-color: #2E7D32;
40
+ border-radius: 8px;
41
+ padding: 8px;
42
+ }
43
+
44
+ [data-testid="collapsedControl"]:hover {
45
+ background-color: #1B5E20;
46
+ }
47
+
48
+ [data-testid="collapsedControl"] svg {
49
+ color: white;
50
+ }
51
+
52
+ /* Button styling */
53
+ .stButton > button {
54
+ border-radius: 8px;
55
+ font-weight: 600;
56
+ transition: all 0.3s ease;
57
+ }
58
+
59
+ .stButton > button:hover {
60
+ transform: translateY(-2px);
61
+ box-shadow: 0 4px 12px rgba(46, 125, 50, 0.3);
62
+ }
63
+
64
+ /* Primary button */
65
+ .stButton > button[kind="primary"] {
66
+ background-color: #2E7D32;
67
+ border-color: #2E7D32;
68
+ }
69
+
70
+ .stButton > button[kind="primary"]:hover {
71
+ background-color: #1B5E20;
72
+ border-color: #1B5E20;
73
+ }
74
+
75
+ /* Metric cards */
76
+ [data-testid="metric-container"] {
77
+ background-color: #FAFAFA;
78
+ border: 1px solid #E0E0E0;
79
+ border-radius: 8px;
80
+ padding: 1rem;
81
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
82
+ }
83
+
84
+ [data-testid="metric-container"]:hover {
85
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
86
+ }
87
+
88
+ /* Success/Error messages */
89
+ .stSuccess {
90
+ background-color: #E8F5E9;
91
+ border-left: 4px solid #4CAF50;
92
+ }
93
+
94
+ .stError {
95
+ background-color: #FFEBEE;
96
+ border-left: 4px solid #F44336;
97
+ }
98
+
99
+ .stWarning {
100
+ background-color: #FFF3E0;
101
+ border-left: 4px solid #FF9800;
102
+ }
103
+
104
+ .stInfo {
105
+ background-color: #E3F2FD;
106
+ border-left: 4px solid #2196F3;
107
+ }
108
+
109
+ /* Tab styling */
110
+ .stTabs [data-baseweb="tab-list"] {
111
+ gap: 8px;
112
+ }
113
+
114
+ .stTabs [data-baseweb="tab"] {
115
+ background-color: #F5F5F5;
116
+ border-radius: 8px 8px 0 0;
117
+ padding: 10px 20px;
118
+ }
119
+
120
+ .stTabs [aria-selected="true"] {
121
+ background-color: #2E7D32;
122
+ color: white;
123
+ }
124
+
125
+ /* Expander styling */
126
+ .streamlit-expanderHeader {
127
+ background-color: #F5F5F5;
128
+ border-radius: 8px;
129
+ }
130
+
131
+ /* Progress bar */
132
+ .stProgress > div > div > div {
133
+ background-color: #4CAF50;
134
+ }
135
+
136
+ /* Map container */
137
+ iframe {
138
+ border-radius: 8px;
139
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
140
+ }
141
+
142
+ /* Download buttons */
143
+ .stDownloadButton > button {
144
+ background-color: #1976D2;
145
+ color: white;
146
+ border: none;
147
+ }
148
+
149
+ .stDownloadButton > button:hover {
150
+ background-color: #1565C0;
151
+ }
152
+
153
+ /* Date input */
154
+ .stDateInput > div {
155
+ border-radius: 8px;
156
+ }
157
+
158
+ /* Select box */
159
+ .stSelectbox > div > div {
160
+ border-radius: 8px;
161
+ }
162
+
163
+ /* Text input */
164
+ .stTextInput > div > div {
165
+ border-radius: 8px;
166
+ }
167
+
168
+ /* Footer styling */
169
+ footer {
170
+ visibility: hidden;
171
+ }
172
+
173
+ /* Custom scrollbar */
174
+ ::-webkit-scrollbar {
175
+ width: 8px;
176
+ height: 8px;
177
+ }
178
+
179
+ ::-webkit-scrollbar-track {
180
+ background: #F5F5F5;
181
+ }
182
+
183
+ ::-webkit-scrollbar-thumb {
184
+ background: #BDBDBD;
185
+ border-radius: 4px;
186
+ }
187
+
188
+ ::-webkit-scrollbar-thumb:hover {
189
+ background: #9E9E9E;
190
+ }
191
+
192
+ /* Responsive adjustments */
193
+ @media (max-width: 768px) {
194
+ .main .block-container {
195
+ padding-left: 0.5rem;
196
+ padding-right: 0.5rem;
197
+ }
198
+
199
+ h1 {
200
+ font-size: 1.6rem;
201
+ }
202
+
203
+ h2 {
204
+ font-size: 1.3rem;
205
+ }
206
+
207
+ h3 {
208
+ font-size: 1.1rem;
209
+ }
210
+
211
+ /* Make metrics stack vertically on mobile */
212
+ [data-testid="metric-container"] {
213
+ padding: 0.5rem;
214
+ margin-bottom: 0.5rem;
215
+ }
216
+
217
+ /* Smaller score display on mobile */
218
+ .score-display span {
219
+ font-size: 48px !important;
220
+ }
221
+
222
+ /* Make charts more compact */
223
+ .plotly-graph-div {
224
+ height: 280px !important;
225
+ }
226
+
227
+ /* Improve tab navigation on mobile */
228
+ .stTabs [data-baseweb="tab"] {
229
+ padding: 8px 12px;
230
+ font-size: 0.85rem;
231
+ }
232
+
233
+ /* Better button sizing on mobile */
234
+ .stButton > button {
235
+ padding: 0.6rem 1rem;
236
+ font-size: 0.9rem;
237
+ }
238
+
239
+ /* Reduce map height on mobile */
240
+ iframe {
241
+ max-height: 350px;
242
+ }
243
+ }
244
+
245
+ /* Extra small screens */
246
+ @media (max-width: 480px) {
247
+ .main .block-container {
248
+ padding-left: 0.3rem;
249
+ padding-right: 0.3rem;
250
+ }
251
+
252
+ h1 {
253
+ font-size: 1.4rem;
254
+ }
255
+
256
+ /* Stack columns on very small screens */
257
+ [data-testid="column"] {
258
+ width: 100% !important;
259
+ flex: 100% !important;
260
+ }
261
+ }
262
+
263
+ /* Animation for loading */
264
+ @keyframes pulse {
265
+ 0%, 100% {
266
+ opacity: 1;
267
+ }
268
+ 50% {
269
+ opacity: 0.5;
270
+ }
271
+ }
272
+
273
+ .loading {
274
+ animation: pulse 1.5s ease-in-out infinite;
275
+ }
276
+
277
+ /* Score display styling */
278
+ .score-display {
279
+ text-align: center;
280
+ padding: 20px;
281
+ border-radius: 10px;
282
+ margin-bottom: 20px;
283
+ }
284
+
285
+ .score-excellent {
286
+ background-color: rgba(27, 94, 32, 0.1);
287
+ }
288
+
289
+ .score-good {
290
+ background-color: rgba(76, 175, 80, 0.1);
291
+ }
292
+
293
+ .score-moderate {
294
+ background-color: rgba(255, 152, 0, 0.1);
295
+ }
296
+
297
+ .score-low {
298
+ background-color: rgba(183, 28, 28, 0.1);
299
+ }
300
+
301
+ /* Chart container */
302
+ .plotly-graph-div {
303
+ border-radius: 8px;
304
+ }
305
+
306
+ /* Table styling */
307
+ .dataframe {
308
+ border-radius: 8px;
309
+ overflow: hidden;
310
+ }
311
+
312
+ .dataframe th {
313
+ background-color: #2E7D32;
314
+ color: white;
315
+ }
316
+
317
+ .dataframe tr:nth-child(even) {
318
+ background-color: #F5F5F5;
319
+ }
320
+
321
+ /* Tooltip styling */
322
+ [data-testid="stTooltipIcon"] {
323
+ color: #757575;
324
+ }
325
+
326
+ [data-testid="stTooltipIcon"]:hover {
327
+ color: #2E7D32;
328
+ }
data/examples.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "examples": [
3
+ {
4
+ "name": "Boddington Gold Mine",
5
+ "tenement_id": "M 70/1198",
6
+ "center": [-32.7500, 116.3700],
7
+ "zoom": 13,
8
+ "description": "Large gold/copper mine with active rehabilitation",
9
+ "geometry": {
10
+ "type": "rectangle",
11
+ "coordinates": [116.30, -32.80, 116.45, -32.70]
12
+ }
13
+ },
14
+ {
15
+ "name": "Kalgoorlie Super Pit",
16
+ "tenement_id": "M 26/767",
17
+ "center": [-30.7749, 121.5040],
18
+ "zoom": 13,
19
+ "description": "Iconic open-pit gold mine",
20
+ "geometry": {
21
+ "type": "rectangle",
22
+ "coordinates": [121.45, -30.82, 121.55, -30.74]
23
+ }
24
+ },
25
+ {
26
+ "name": "Greenbushes Lithium",
27
+ "tenement_id": "M 70/1412",
28
+ "center": [-33.8500, 116.0600],
29
+ "zoom": 14,
30
+ "description": "Major lithium mining operation",
31
+ "geometry": {
32
+ "type": "rectangle",
33
+ "coordinates": [116.02, -33.88, 116.10, -33.82]
34
+ }
35
+ },
36
+ {
37
+ "name": "Worsley Alumina",
38
+ "tenement_id": "M 70/199",
39
+ "center": [-33.2600, 116.0800],
40
+ "zoom": 13,
41
+ "description": "Bauxite mining with progressive rehabilitation",
42
+ "geometry": {
43
+ "type": "rectangle",
44
+ "coordinates": [115.98, -33.32, 116.18, -33.20]
45
+ }
46
+ },
47
+ {
48
+ "name": "Wiluna Gold",
49
+ "tenement_id": "M 53/1085",
50
+ "center": [-26.5900, 120.2300],
51
+ "zoom": 14,
52
+ "description": "Historical gold mining area",
53
+ "geometry": {
54
+ "type": "rectangle",
55
+ "coordinates": [120.18, -26.65, 120.28, -26.53]
56
+ }
57
+ }
58
+ ],
59
+ "default_location": {
60
+ "center": [-28.0, 121.0],
61
+ "zoom": 6,
62
+ "description": "Western Australia mining region"
63
+ },
64
+ "analysis_parameters": {
65
+ "cloud_threshold": 20,
66
+ "window_days": 15,
67
+ "change_threshold": 0.05,
68
+ "scale": 10
69
+ }
70
+ }
requirements.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core Streamlit
2
+ streamlit>=1.28.0,<2.0.0
3
+
4
+ # Satellite Data Access (Planetary Computer)
5
+ pystac-client>=0.7.0,<1.0.0
6
+ planetary-computer>=1.0.0,<2.0.0
7
+ stackstac>=0.5.0,<1.0.0
8
+
9
+ # Geospatial Processing
10
+ rioxarray>=0.15.0,<1.0.0
11
+ xarray>=2023.1.0,<2025.0.0
12
+ rasterio>=1.3.0,<2.0.0
13
+ geopandas>=0.14.0,<1.0.0
14
+ shapely>=2.0.0,<3.0.0
15
+ pyproj>=3.6.0,<4.0.0
16
+
17
+ # Data Processing
18
+ pandas>=2.0.0,<3.0.0
19
+ numpy>=1.24.0,<2.0.0
20
+ dask>=2023.1.0,<2025.0.0
21
+
22
+ # Visualization
23
+ plotly>=5.18.0,<6.0.0
24
+ folium>=0.15.0,<1.0.0
25
+ streamlit-folium>=0.15.0,<1.0.0
26
+ matplotlib>=3.7.0,<4.0.0
27
+
28
+ # Reporting
29
+ fpdf2>=2.7.0,<3.0.0
30
+
31
+ # Utilities
32
+ requests>=2.31.0,<3.0.0
33
+ Pillow>=10.0.0,<11.0.0
src/__init__.py ADDED
File without changes
src/analysis.py ADDED
@@ -0,0 +1,742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Comprehensive vegetation and terrain analysis module for RehabWatch.
3
+
4
+ Performs multi-index analysis including:
5
+ - Vegetation indices (NDVI, SAVI, EVI)
6
+ - Soil/water indices (BSI, NDWI, NDMI, NBR)
7
+ - Terrain analysis (slope, aspect, erosion risk)
8
+ - Land cover classification and change
9
+ - Rehabilitation metrics and scoring
10
+ """
11
+
12
+ import numpy as np
13
+ import xarray as xr
14
+ from datetime import datetime, timedelta
15
+ from typing import Dict, Any, Optional, Tuple, List
16
+
17
+ from .stac_utils import (
18
+ get_sentinel_composite,
19
+ calculate_ndvi,
20
+ calculate_savi,
21
+ calculate_evi,
22
+ calculate_ndwi,
23
+ calculate_ndmi,
24
+ calculate_bsi,
25
+ calculate_nbr,
26
+ calculate_all_indices,
27
+ calculate_vegetation_heterogeneity,
28
+ get_dem_data,
29
+ calculate_slope,
30
+ calculate_aspect,
31
+ calculate_terrain_ruggedness,
32
+ calculate_erosion_risk,
33
+ get_land_cover,
34
+ get_worldcover,
35
+ calculate_land_cover_change,
36
+ calculate_vegetation_cover_percent,
37
+ calculate_bare_ground_percent,
38
+ search_sentinel2,
39
+ create_reference_bbox,
40
+ get_bbox_center,
41
+ LULC_CLASSES,
42
+ WORLDCOVER_CLASSES
43
+ )
44
+
45
+
46
+ def analyze_vegetation_change(
47
+ bbox: Tuple[float, float, float, float],
48
+ date_before: str,
49
+ date_after: str,
50
+ window_days: int = 15,
51
+ cloud_threshold: int = 25
52
+ ) -> Dict[str, Any]:
53
+ """
54
+ Analyze vegetation change between two dates using multiple indices.
55
+
56
+ Args:
57
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
58
+ date_before: Start date (YYYY-MM-DD)
59
+ date_after: End date (YYYY-MM-DD)
60
+ window_days: Days before/after each date for composite (default 15)
61
+ cloud_threshold: Maximum cloud cover percentage
62
+
63
+ Returns:
64
+ Dict containing composites, indices, and statistics
65
+ """
66
+ # Parse dates and create windows
67
+ before_dt = datetime.strptime(date_before, '%Y-%m-%d')
68
+ after_dt = datetime.strptime(date_after, '%Y-%m-%d')
69
+
70
+ before_start = (before_dt - timedelta(days=window_days)).strftime('%Y-%m-%d')
71
+ before_end = (before_dt + timedelta(days=window_days)).strftime('%Y-%m-%d')
72
+ after_start = (after_dt - timedelta(days=window_days)).strftime('%Y-%m-%d')
73
+ after_end = (after_dt + timedelta(days=window_days)).strftime('%Y-%m-%d')
74
+
75
+ # Get cloud-free composites
76
+ composite_before = get_sentinel_composite(
77
+ bbox, before_start, before_end, cloud_threshold
78
+ )
79
+ composite_after = get_sentinel_composite(
80
+ bbox, after_start, after_end, cloud_threshold
81
+ )
82
+
83
+ # Calculate all indices for both periods
84
+ indices_before = calculate_all_indices(composite_before)
85
+ indices_after = calculate_all_indices(composite_after)
86
+
87
+ # Calculate changes for each index
88
+ index_changes = {}
89
+ for key in indices_before:
90
+ index_changes[key] = indices_after[key] - indices_before[key]
91
+
92
+ # Calculate vegetation heterogeneity (proxy for diversity)
93
+ heterogeneity_before = calculate_vegetation_heterogeneity(indices_before['ndvi'])
94
+ heterogeneity_after = calculate_vegetation_heterogeneity(indices_after['ndvi'])
95
+
96
+ # Calculate comprehensive statistics
97
+ stats = calculate_statistics(
98
+ indices_before, indices_after, index_changes, bbox
99
+ )
100
+
101
+ return {
102
+ 'composite_before': composite_before,
103
+ 'composite_after': composite_after,
104
+ 'indices_before': indices_before,
105
+ 'indices_after': indices_after,
106
+ 'index_changes': index_changes,
107
+ 'ndvi_before': indices_before['ndvi'],
108
+ 'ndvi_after': indices_after['ndvi'],
109
+ 'ndvi_change': index_changes['ndvi'],
110
+ 'heterogeneity_before': heterogeneity_before,
111
+ 'heterogeneity_after': heterogeneity_after,
112
+ 'stats': stats,
113
+ 'date_before': date_before,
114
+ 'date_after': date_after,
115
+ 'bbox': bbox
116
+ }
117
+
118
+
119
+ def analyze_terrain(
120
+ bbox: Tuple[float, float, float, float],
121
+ bsi: Optional[xr.DataArray] = None
122
+ ) -> Dict[str, Any]:
123
+ """
124
+ Analyze terrain characteristics including slope, aspect, and erosion risk.
125
+
126
+ Args:
127
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
128
+ bsi: Bare Soil Index array (optional, for erosion risk)
129
+
130
+ Returns:
131
+ Dict containing terrain data and statistics
132
+ """
133
+ try:
134
+ # Get DEM data
135
+ dem = get_dem_data(bbox)
136
+
137
+ # Calculate terrain derivatives
138
+ slope = calculate_slope(dem)
139
+ aspect = calculate_aspect(dem)
140
+ ruggedness = calculate_terrain_ruggedness(dem)
141
+
142
+ # Calculate erosion risk if BSI provided
143
+ erosion_risk = None
144
+ if bsi is not None:
145
+ # Resample BSI to match DEM resolution if needed
146
+ erosion_risk = calculate_erosion_risk(slope, bsi)
147
+
148
+ # Calculate statistics
149
+ terrain_stats = {
150
+ 'elevation_min': float(np.nanmin(dem.values)),
151
+ 'elevation_max': float(np.nanmax(dem.values)),
152
+ 'elevation_mean': float(np.nanmean(dem.values)),
153
+ 'slope_mean': float(np.nanmean(slope.values)),
154
+ 'slope_max': float(np.nanmax(slope.values)),
155
+ 'ruggedness_mean': float(np.nanmean(ruggedness.values)),
156
+ }
157
+
158
+ # Slope classification
159
+ flat_pixels = np.sum(slope.values < 5)
160
+ gentle_pixels = np.sum((slope.values >= 5) & (slope.values < 15))
161
+ moderate_pixels = np.sum((slope.values >= 15) & (slope.values < 30))
162
+ steep_pixels = np.sum(slope.values >= 30)
163
+ total_pixels = slope.size
164
+
165
+ terrain_stats['percent_flat'] = round((flat_pixels / total_pixels) * 100, 1)
166
+ terrain_stats['percent_gentle'] = round((gentle_pixels / total_pixels) * 100, 1)
167
+ terrain_stats['percent_moderate'] = round((moderate_pixels / total_pixels) * 100, 1)
168
+ terrain_stats['percent_steep'] = round((steep_pixels / total_pixels) * 100, 1)
169
+
170
+ if erosion_risk is not None:
171
+ terrain_stats['erosion_risk_mean'] = float(np.nanmean(erosion_risk.values))
172
+ high_risk = np.sum(erosion_risk.values > 0.6)
173
+ terrain_stats['percent_high_erosion_risk'] = round((high_risk / total_pixels) * 100, 1)
174
+
175
+ return {
176
+ 'dem': dem,
177
+ 'slope': slope,
178
+ 'aspect': aspect,
179
+ 'ruggedness': ruggedness,
180
+ 'erosion_risk': erosion_risk,
181
+ 'stats': terrain_stats
182
+ }
183
+
184
+ except Exception as e:
185
+ return {
186
+ 'error': str(e),
187
+ 'stats': {}
188
+ }
189
+
190
+
191
+ def analyze_land_cover(
192
+ bbox: Tuple[float, float, float, float],
193
+ year_before: int,
194
+ year_after: int
195
+ ) -> Dict[str, Any]:
196
+ """
197
+ Analyze land cover and its changes between two years.
198
+
199
+ Args:
200
+ bbox: Bounding box
201
+ year_before: Earlier year (2017-2023)
202
+ year_after: Later year (2017-2023)
203
+
204
+ Returns:
205
+ Dict containing land cover data and statistics
206
+ """
207
+ try:
208
+ # Get land cover for both years
209
+ lulc_before = get_land_cover(bbox, year_before)
210
+ lulc_after = get_land_cover(bbox, year_after)
211
+
212
+ # Calculate change statistics
213
+ change_stats = calculate_land_cover_change(lulc_before, lulc_after)
214
+
215
+ # Calculate vegetation and bare ground percentages
216
+ veg_cover_before = calculate_vegetation_cover_percent(lulc_before)
217
+ veg_cover_after = calculate_vegetation_cover_percent(lulc_after)
218
+ bare_before = calculate_bare_ground_percent(lulc_before)
219
+ bare_after = calculate_bare_ground_percent(lulc_after)
220
+
221
+ land_cover_stats = {
222
+ 'vegetation_cover_before': round(veg_cover_before, 1),
223
+ 'vegetation_cover_after': round(veg_cover_after, 1),
224
+ 'vegetation_cover_change': round(veg_cover_after - veg_cover_before, 1),
225
+ 'bare_ground_before': round(bare_before, 1),
226
+ 'bare_ground_after': round(bare_after, 1),
227
+ 'bare_ground_change': round(bare_after - bare_before, 1),
228
+ 'year_before': year_before,
229
+ 'year_after': year_after,
230
+ 'class_changes': change_stats['changes']
231
+ }
232
+
233
+ return {
234
+ 'lulc_before': lulc_before,
235
+ 'lulc_after': lulc_after,
236
+ 'stats': land_cover_stats,
237
+ 'classes': LULC_CLASSES
238
+ }
239
+
240
+ except Exception as e:
241
+ return {
242
+ 'error': str(e),
243
+ 'stats': {}
244
+ }
245
+
246
+
247
+ def calculate_statistics(
248
+ indices_before: Dict[str, xr.DataArray],
249
+ indices_after: Dict[str, xr.DataArray],
250
+ index_changes: Dict[str, xr.DataArray],
251
+ bbox: Tuple[float, float, float, float]
252
+ ) -> Dict[str, float]:
253
+ """
254
+ Calculate comprehensive vegetation and soil statistics.
255
+
256
+ Args:
257
+ indices_before: Dict of index arrays at start date
258
+ indices_after: Dict of index arrays at end date
259
+ index_changes: Dict of index change arrays
260
+ bbox: Bounding box for area calculation
261
+
262
+ Returns:
263
+ Dict with comprehensive statistics
264
+ """
265
+ stats = {}
266
+
267
+ # Get NDVI arrays
268
+ ndvi_before = indices_before['ndvi']
269
+ ndvi_after = indices_after['ndvi']
270
+ ndvi_change = index_changes['ndvi']
271
+
272
+ # Get valid (non-NaN) data
273
+ valid_before = ndvi_before.values[~np.isnan(ndvi_before.values)]
274
+ valid_after = ndvi_after.values[~np.isnan(ndvi_after.values)]
275
+ valid_change = ndvi_change.values[~np.isnan(ndvi_change.values)]
276
+
277
+ # NDVI statistics
278
+ stats['ndvi_before_mean'] = round(float(np.nanmean(valid_before)), 4) if len(valid_before) > 0 else 0
279
+ stats['ndvi_after_mean'] = round(float(np.nanmean(valid_after)), 4) if len(valid_after) > 0 else 0
280
+ stats['ndvi_change_mean'] = round(float(np.nanmean(valid_change)), 4) if len(valid_change) > 0 else 0
281
+ stats['ndvi_change_std'] = round(float(np.nanstd(valid_change)), 4) if len(valid_change) > 0 else 0
282
+
283
+ # Calculate percent change
284
+ if stats['ndvi_before_mean'] > 0:
285
+ stats['percent_change'] = round(((stats['ndvi_after_mean'] - stats['ndvi_before_mean']) /
286
+ stats['ndvi_before_mean']) * 100, 2)
287
+ else:
288
+ stats['percent_change'] = 0
289
+
290
+ # All other indices - before/after means
291
+ for idx_name in ['savi', 'evi', 'ndwi', 'ndmi', 'bsi', 'nbr']:
292
+ if idx_name in indices_before and idx_name in indices_after:
293
+ before_vals = indices_before[idx_name].values
294
+ after_vals = indices_after[idx_name].values
295
+
296
+ valid_b = before_vals[~np.isnan(before_vals)]
297
+ valid_a = after_vals[~np.isnan(after_vals)]
298
+
299
+ stats[f'{idx_name}_before_mean'] = round(float(np.nanmean(valid_b)), 4) if len(valid_b) > 0 else 0
300
+ stats[f'{idx_name}_after_mean'] = round(float(np.nanmean(valid_a)), 4) if len(valid_a) > 0 else 0
301
+ stats[f'{idx_name}_change'] = round(stats[f'{idx_name}_after_mean'] - stats[f'{idx_name}_before_mean'], 4)
302
+
303
+ # Area calculations (improved, degraded, stable)
304
+ pixel_area_ha = (10 * 10) / 10000 # 0.01 ha per pixel
305
+
306
+ total_pixels = len(valid_change)
307
+ improved_pixels = np.sum(valid_change > 0.05)
308
+ degraded_pixels = np.sum(valid_change < -0.05)
309
+ stable_pixels = np.sum((valid_change >= -0.05) & (valid_change <= 0.05))
310
+
311
+ stats['area_improved_ha'] = round(float(improved_pixels * pixel_area_ha), 2)
312
+ stats['area_degraded_ha'] = round(float(degraded_pixels * pixel_area_ha), 2)
313
+ stats['area_stable_ha'] = round(float(stable_pixels * pixel_area_ha), 2)
314
+ stats['total_area_ha'] = round(float(total_pixels * pixel_area_ha), 2)
315
+
316
+ # Calculate percentages
317
+ if total_pixels > 0:
318
+ stats['percent_improved'] = round((improved_pixels / total_pixels) * 100, 2)
319
+ stats['percent_degraded'] = round((degraded_pixels / total_pixels) * 100, 2)
320
+ stats['percent_stable'] = round((stable_pixels / total_pixels) * 100, 2)
321
+ else:
322
+ stats['percent_improved'] = 0
323
+ stats['percent_degraded'] = 0
324
+ stats['percent_stable'] = 0
325
+
326
+ # Water presence (from NDWI)
327
+ if 'ndwi' in indices_after:
328
+ ndwi_vals = indices_after['ndwi'].values
329
+ valid_ndwi = ndwi_vals[~np.isnan(ndwi_vals)]
330
+ water_pixels = np.sum(valid_ndwi > 0)
331
+ stats['percent_water'] = round((water_pixels / len(valid_ndwi)) * 100, 2) if len(valid_ndwi) > 0 else 0
332
+
333
+ # Bare soil extent (from BSI)
334
+ if 'bsi' in indices_after:
335
+ bsi_vals = indices_after['bsi'].values
336
+ valid_bsi = bsi_vals[~np.isnan(bsi_vals)]
337
+ bare_pixels = np.sum(valid_bsi > 0.1)
338
+ stats['percent_bare_soil'] = round((bare_pixels / len(valid_bsi)) * 100, 2) if len(valid_bsi) > 0 else 0
339
+
340
+ # Moisture stress (from NDMI)
341
+ if 'ndmi' in indices_after:
342
+ ndmi_vals = indices_after['ndmi'].values
343
+ valid_ndmi = ndmi_vals[~np.isnan(ndmi_vals)]
344
+ stressed_pixels = np.sum(valid_ndmi < 0)
345
+ stats['percent_moisture_stressed'] = round((stressed_pixels / len(valid_ndmi)) * 100, 2) if len(valid_ndmi) > 0 else 0
346
+
347
+ # Vegetation health classification
348
+ if len(valid_after) > 0:
349
+ sparse = np.sum((valid_after > 0) & (valid_after <= 0.2))
350
+ low = np.sum((valid_after > 0.2) & (valid_after <= 0.4))
351
+ moderate = np.sum((valid_after > 0.4) & (valid_after <= 0.6))
352
+ dense = np.sum(valid_after > 0.6)
353
+
354
+ stats['percent_sparse_veg'] = round((sparse / len(valid_after)) * 100, 2)
355
+ stats['percent_low_veg'] = round((low / len(valid_after)) * 100, 2)
356
+ stats['percent_moderate_veg'] = round((moderate / len(valid_after)) * 100, 2)
357
+ stats['percent_dense_veg'] = round((dense / len(valid_after)) * 100, 2)
358
+
359
+ return stats
360
+
361
+
362
+ def calculate_reference_ndvi(
363
+ bbox: Tuple[float, float, float, float],
364
+ date: str,
365
+ window_days: int = 15,
366
+ cloud_threshold: int = 25,
367
+ buffer_deg: float = 0.01
368
+ ) -> float:
369
+ """
370
+ Calculate mean NDVI for reference area (buffer around site).
371
+ """
372
+ dt = datetime.strptime(date, '%Y-%m-%d')
373
+ start = (dt - timedelta(days=window_days)).strftime('%Y-%m-%d')
374
+ end = (dt + timedelta(days=window_days)).strftime('%Y-%m-%d')
375
+
376
+ ref_bbox = create_reference_bbox(bbox, buffer_deg)
377
+
378
+ try:
379
+ composite = get_sentinel_composite(ref_bbox, start, end, cloud_threshold)
380
+ ndvi = calculate_ndvi(composite)
381
+
382
+ valid_ndvi = ndvi.values[~np.isnan(ndvi.values)]
383
+ return float(np.nanmean(valid_ndvi)) if len(valid_ndvi) > 0 else 0
384
+
385
+ except Exception:
386
+ return 0
387
+
388
+
389
+ def calculate_rehab_score(site_ndvi: float, reference_ndvi: float) -> int:
390
+ """
391
+ Calculate rehabilitation score (0-100).
392
+
393
+ The score represents how close the site's vegetation is to
394
+ the reference (undisturbed) area.
395
+ """
396
+ if reference_ndvi <= 0:
397
+ return 0
398
+
399
+ score = (site_ndvi / reference_ndvi) * 100
400
+ return min(100, max(0, round(score)))
401
+
402
+
403
+ def calculate_comprehensive_rehab_score(
404
+ stats: Dict[str, float],
405
+ terrain_stats: Optional[Dict[str, float]] = None,
406
+ land_cover_stats: Optional[Dict[str, float]] = None,
407
+ reference_ndvi: float = 0.5
408
+ ) -> Dict[str, Any]:
409
+ """
410
+ Calculate comprehensive rehabilitation score using multiple metrics.
411
+
412
+ Returns:
413
+ Dict with component scores and overall score
414
+ """
415
+ scores = {}
416
+
417
+ # Vegetation score (based on NDVI)
418
+ site_ndvi = stats.get('ndvi_after_mean', 0)
419
+ scores['vegetation_score'] = min(100, max(0, round((site_ndvi / reference_ndvi) * 100)))
420
+
421
+ # Improvement score (based on change)
422
+ improvement = stats.get('percent_improved', 0)
423
+ degradation = stats.get('percent_degraded', 0)
424
+ scores['improvement_score'] = min(100, max(0, round(50 + improvement - degradation)))
425
+
426
+ # Soil stability score (based on BSI - lower is better)
427
+ bare_soil = stats.get('percent_bare_soil', 50)
428
+ scores['soil_stability_score'] = min(100, max(0, round(100 - bare_soil)))
429
+
430
+ # Moisture score (based on NDMI)
431
+ moisture_stressed = stats.get('percent_moisture_stressed', 50)
432
+ scores['moisture_score'] = min(100, max(0, round(100 - moisture_stressed)))
433
+
434
+ # Terrain score (if available)
435
+ if terrain_stats:
436
+ erosion_risk = terrain_stats.get('percent_high_erosion_risk', 50)
437
+ scores['terrain_score'] = min(100, max(0, round(100 - erosion_risk)))
438
+
439
+ # Land cover score (if available)
440
+ if land_cover_stats:
441
+ veg_cover = land_cover_stats.get('vegetation_cover_after', 0)
442
+ scores['land_cover_score'] = min(100, max(0, round(veg_cover)))
443
+
444
+ # Calculate weighted overall score
445
+ weights = {
446
+ 'vegetation_score': 0.30,
447
+ 'improvement_score': 0.25,
448
+ 'soil_stability_score': 0.20,
449
+ 'moisture_score': 0.10,
450
+ 'terrain_score': 0.10,
451
+ 'land_cover_score': 0.05
452
+ }
453
+
454
+ total_weight = 0
455
+ weighted_sum = 0
456
+ for key, weight in weights.items():
457
+ if key in scores:
458
+ weighted_sum += scores[key] * weight
459
+ total_weight += weight
460
+
461
+ scores['overall_score'] = round(weighted_sum / total_weight) if total_weight > 0 else 0
462
+
463
+ return scores
464
+
465
+
466
+ def generate_interpretation(
467
+ stats: Dict[str, float],
468
+ rehab_score: int,
469
+ terrain_stats: Optional[Dict] = None,
470
+ land_cover_stats: Optional[Dict] = None
471
+ ) -> str:
472
+ """
473
+ Generate comprehensive plain-language interpretation of the analysis results.
474
+ """
475
+ interpretation_parts = []
476
+
477
+ # Vegetation change interpretation
478
+ change = stats.get('percent_change', 0)
479
+ if change > 10:
480
+ change_text = f"Vegetation cover has significantly improved by {change:.1f}%"
481
+ elif change > 0:
482
+ change_text = f"Vegetation cover has moderately improved by {change:.1f}%"
483
+ elif change > -10:
484
+ change_text = f"Vegetation cover has slightly declined by {abs(change):.1f}%"
485
+ else:
486
+ change_text = f"Vegetation cover has significantly declined by {abs(change):.1f}%"
487
+
488
+ interpretation_parts.append(change_text + " over the analysis period.")
489
+
490
+ # Area breakdown
491
+ if stats.get('percent_improved', 0) > stats.get('percent_degraded', 0):
492
+ area_text = (f"Approximately {stats['percent_improved']:.0f}% of the site "
493
+ f"({stats['area_improved_ha']:.1f} ha) shows vegetation improvement, "
494
+ f"while {stats['percent_degraded']:.0f}% ({stats['area_degraded_ha']:.1f} ha) "
495
+ "shows decline.")
496
+ else:
497
+ area_text = (f"Approximately {stats['percent_degraded']:.0f}% of the site "
498
+ f"({stats['area_degraded_ha']:.1f} ha) shows vegetation decline, "
499
+ f"while {stats['percent_improved']:.0f}% ({stats['area_improved_ha']:.1f} ha) "
500
+ "shows improvement.")
501
+
502
+ interpretation_parts.append(area_text)
503
+
504
+ # Soil and moisture conditions
505
+ bare_soil = stats.get('percent_bare_soil', 0)
506
+ moisture_stress = stats.get('percent_moisture_stressed', 0)
507
+
508
+ if bare_soil > 30:
509
+ interpretation_parts.append(f"Bare soil covers {bare_soil:.0f}% of the area, indicating potential erosion risk.")
510
+ elif bare_soil > 10:
511
+ interpretation_parts.append(f"Moderate bare soil exposure ({bare_soil:.0f}%) is present.")
512
+
513
+ if moisture_stress > 50:
514
+ interpretation_parts.append(f"Significant moisture stress detected in {moisture_stress:.0f}% of vegetation.")
515
+
516
+ # Water presence
517
+ water = stats.get('percent_water', 0)
518
+ if water > 5:
519
+ interpretation_parts.append(f"Water bodies or saturated areas cover {water:.0f}% of the site.")
520
+
521
+ # Terrain interpretation
522
+ if terrain_stats:
523
+ steep = terrain_stats.get('percent_steep', 0)
524
+ erosion = terrain_stats.get('percent_high_erosion_risk', 0)
525
+
526
+ if steep > 20:
527
+ interpretation_parts.append(f"The terrain includes {steep:.0f}% steep slopes (>30 degrees).")
528
+
529
+ if erosion > 30:
530
+ interpretation_parts.append(f"High erosion risk identified in {erosion:.0f}% of the area.")
531
+
532
+ # Land cover interpretation
533
+ if land_cover_stats:
534
+ veg_change = land_cover_stats.get('vegetation_cover_change', 0)
535
+ bare_change = land_cover_stats.get('bare_ground_change', 0)
536
+
537
+ if veg_change > 5:
538
+ interpretation_parts.append(f"Land cover analysis shows {veg_change:.0f}% increase in vegetated area.")
539
+ elif veg_change < -5:
540
+ interpretation_parts.append(f"Land cover analysis shows {abs(veg_change):.0f}% decrease in vegetated area.")
541
+
542
+ if bare_change < -5:
543
+ interpretation_parts.append(f"Bare ground has decreased by {abs(bare_change):.0f}%.")
544
+
545
+ # Rehabilitation score interpretation
546
+ if rehab_score >= 80:
547
+ rehab_text = (f"The site has achieved {rehab_score}% of reference vegetation conditions, "
548
+ "indicating excellent rehabilitation progress.")
549
+ elif rehab_score >= 60:
550
+ rehab_text = (f"The site has achieved {rehab_score}% of reference vegetation conditions, "
551
+ "indicating good rehabilitation progress.")
552
+ elif rehab_score >= 40:
553
+ rehab_text = (f"The site has achieved {rehab_score}% of reference vegetation conditions, "
554
+ "indicating moderate rehabilitation progress.")
555
+ elif rehab_score >= 20:
556
+ rehab_text = (f"The site has achieved {rehab_score}% of reference vegetation conditions, "
557
+ "indicating early-stage rehabilitation.")
558
+ else:
559
+ rehab_text = (f"The site has achieved {rehab_score}% of reference vegetation conditions, "
560
+ "indicating limited rehabilitation progress to date.")
561
+
562
+ interpretation_parts.append(rehab_text)
563
+
564
+ return " ".join(interpretation_parts)
565
+
566
+
567
+ def get_monthly_ndvi_timeseries(
568
+ bbox: Tuple[float, float, float, float],
569
+ start_year: int,
570
+ end_year: int,
571
+ cloud_threshold: int = 30
572
+ ) -> List[Dict[str, Any]]:
573
+ """
574
+ Get monthly NDVI time series for a bounding box.
575
+ """
576
+ results = []
577
+
578
+ for year in range(start_year, end_year + 1):
579
+ for month in range(1, 13):
580
+ now = datetime.now()
581
+ if year > now.year or (year == now.year and month > now.month):
582
+ continue
583
+
584
+ start_date = f"{year}-{month:02d}-01"
585
+
586
+ if month == 12:
587
+ end_date = f"{year}-12-31"
588
+ else:
589
+ next_month = datetime(year, month + 1, 1)
590
+ end_of_month = next_month - timedelta(days=1)
591
+ end_date = end_of_month.strftime('%Y-%m-%d')
592
+
593
+ try:
594
+ items = search_sentinel2(bbox, start_date, end_date, cloud_threshold)
595
+
596
+ if len(items) > 0:
597
+ composite = get_sentinel_composite(
598
+ bbox, start_date, end_date, cloud_threshold
599
+ )
600
+ ndvi = calculate_ndvi(composite)
601
+ valid_ndvi = ndvi.values[~np.isnan(ndvi.values)]
602
+
603
+ if len(valid_ndvi) > 0:
604
+ mean_ndvi = float(np.nanmean(valid_ndvi))
605
+ results.append({
606
+ 'date': f"{year}-{month:02d}-15",
607
+ 'ndvi': mean_ndvi
608
+ })
609
+
610
+ except Exception:
611
+ continue
612
+
613
+ return sorted(results, key=lambda x: x['date'])
614
+
615
+
616
+ def get_multi_index_timeseries(
617
+ bbox: Tuple[float, float, float, float],
618
+ start_year: int,
619
+ end_year: int,
620
+ cloud_threshold: int = 30
621
+ ) -> List[Dict[str, Any]]:
622
+ """
623
+ Get monthly time series for multiple indices.
624
+ """
625
+ results = []
626
+
627
+ for year in range(start_year, end_year + 1):
628
+ for month in range(1, 13):
629
+ now = datetime.now()
630
+ if year > now.year or (year == now.year and month > now.month):
631
+ continue
632
+
633
+ start_date = f"{year}-{month:02d}-01"
634
+
635
+ if month == 12:
636
+ end_date = f"{year}-12-31"
637
+ else:
638
+ next_month = datetime(year, month + 1, 1)
639
+ end_of_month = next_month - timedelta(days=1)
640
+ end_date = end_of_month.strftime('%Y-%m-%d')
641
+
642
+ try:
643
+ items = search_sentinel2(bbox, start_date, end_date, cloud_threshold)
644
+
645
+ if len(items) > 0:
646
+ composite = get_sentinel_composite(
647
+ bbox, start_date, end_date, cloud_threshold
648
+ )
649
+
650
+ indices = calculate_all_indices(composite)
651
+
652
+ record = {'date': f"{year}-{month:02d}-15"}
653
+
654
+ for idx_name, idx_data in indices.items():
655
+ valid_vals = idx_data.values[~np.isnan(idx_data.values)]
656
+ if len(valid_vals) > 0:
657
+ record[idx_name] = float(np.nanmean(valid_vals))
658
+
659
+ if len(record) > 1:
660
+ results.append(record)
661
+
662
+ except Exception:
663
+ continue
664
+
665
+ return sorted(results, key=lambda x: x['date'])
666
+
667
+
668
+ def calculate_seasonal_stability(timeseries: List[Dict[str, Any]]) -> Dict[str, float]:
669
+ """
670
+ Calculate seasonal stability metrics from time series data.
671
+ Lower variance indicates more stable ecosystem function.
672
+ """
673
+ if len(timeseries) < 4:
674
+ return {}
675
+
676
+ ndvi_values = [r.get('ndvi', 0) for r in timeseries if 'ndvi' in r]
677
+
678
+ if len(ndvi_values) < 4:
679
+ return {}
680
+
681
+ return {
682
+ 'ndvi_mean': round(float(np.mean(ndvi_values)), 4),
683
+ 'ndvi_std': round(float(np.std(ndvi_values)), 4),
684
+ 'ndvi_cv': round(float(np.std(ndvi_values) / np.mean(ndvi_values)) * 100, 2),
685
+ 'ndvi_min': round(float(np.min(ndvi_values)), 4),
686
+ 'ndvi_max': round(float(np.max(ndvi_values)), 4),
687
+ 'ndvi_range': round(float(np.max(ndvi_values) - np.min(ndvi_values)), 4)
688
+ }
689
+
690
+
691
+ def ndvi_to_image_array(ndvi: xr.DataArray) -> np.ndarray:
692
+ """Convert NDVI xarray to a colored numpy array for visualization."""
693
+ import matplotlib.pyplot as plt
694
+ from matplotlib.colors import LinearSegmentedColormap
695
+
696
+ colors = ['#8B4513', '#D2B48C', '#FFFF00', '#90EE90', '#228B22', '#006400']
697
+ cmap = LinearSegmentedColormap.from_list('ndvi', colors)
698
+
699
+ ndvi_normalized = (ndvi.values - (-0.1)) / (0.8 - (-0.1))
700
+ ndvi_normalized = np.clip(ndvi_normalized, 0, 1)
701
+
702
+ rgba = cmap(ndvi_normalized)
703
+ rgb = (rgba[:, :, :3] * 255).astype(np.uint8)
704
+
705
+ return rgb
706
+
707
+
708
+ def change_to_image_array(change: xr.DataArray) -> np.ndarray:
709
+ """Convert NDVI change xarray to a colored numpy array for visualization."""
710
+ import matplotlib.pyplot as plt
711
+ from matplotlib.colors import LinearSegmentedColormap
712
+
713
+ colors = ['#B71C1C', '#EF9A9A', '#FFFFFF', '#A5D6A7', '#1B5E20']
714
+ cmap = LinearSegmentedColormap.from_list('change', colors)
715
+
716
+ change_normalized = (change.values - (-0.3)) / (0.3 - (-0.3))
717
+ change_normalized = np.clip(change_normalized, 0, 1)
718
+
719
+ rgba = cmap(change_normalized)
720
+ rgb = (rgba[:, :, :3] * 255).astype(np.uint8)
721
+
722
+ return rgb
723
+
724
+
725
+ def index_to_image_array(
726
+ data: xr.DataArray,
727
+ colormap: str = 'viridis',
728
+ vmin: float = -1,
729
+ vmax: float = 1
730
+ ) -> np.ndarray:
731
+ """Convert any index xarray to a colored numpy array."""
732
+ import matplotlib.pyplot as plt
733
+
734
+ cmap = plt.get_cmap(colormap)
735
+
736
+ data_normalized = (data.values - vmin) / (vmax - vmin)
737
+ data_normalized = np.clip(data_normalized, 0, 1)
738
+
739
+ rgba = cmap(data_normalized)
740
+ rgb = (rgba[:, :, :3] * 255).astype(np.uint8)
741
+
742
+ return rgb
src/report.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Report generation module for RehabWatch.
3
+ Creates PDF reports and CSV exports.
4
+ """
5
+
6
+ from fpdf import FPDF
7
+ from datetime import datetime
8
+ from typing import Dict, Any, Optional
9
+ import io
10
+
11
+
12
+ class RehabWatchPDF(FPDF):
13
+ """Custom PDF class for RehabWatch reports."""
14
+
15
+ def header(self):
16
+ """Add header to each page."""
17
+ self.set_font('Helvetica', 'B', 16)
18
+ self.set_text_color(46, 125, 50) # Green
19
+ self.cell(0, 10, 'RehabWatch', 0, 0, 'L')
20
+ self.set_font('Helvetica', '', 10)
21
+ self.set_text_color(100, 100, 100)
22
+ self.cell(0, 10, 'Mining Rehabilitation Assessment', 0, 1, 'R')
23
+ self.line(10, 25, 200, 25)
24
+ self.ln(10)
25
+
26
+ def footer(self):
27
+ """Add footer to each page."""
28
+ self.set_y(-15)
29
+ self.set_font('Helvetica', 'I', 8)
30
+ self.set_text_color(128, 128, 128)
31
+ self.cell(0, 10, f'Page {self.page_no()}/{{nb}}', 0, 0, 'C')
32
+
33
+ def chapter_title(self, title: str):
34
+ """Add a section title."""
35
+ self.set_font('Helvetica', 'B', 14)
36
+ self.set_text_color(33, 33, 33)
37
+ self.cell(0, 10, title, 0, 1, 'L')
38
+ self.ln(2)
39
+
40
+ def chapter_body(self, body: str):
41
+ """Add body text."""
42
+ self.set_font('Helvetica', '', 11)
43
+ self.set_text_color(66, 66, 66)
44
+ self.multi_cell(0, 6, body)
45
+ self.ln(4)
46
+
47
+
48
+ def generate_pdf_report(
49
+ tenement_id: str,
50
+ stats: Dict[str, float],
51
+ rehab_score: int,
52
+ interpretation: str,
53
+ date_before: str,
54
+ date_after: str,
55
+ mine_name: Optional[str] = None
56
+ ) -> bytes:
57
+ """
58
+ Generate a PDF report of the rehabilitation assessment.
59
+
60
+ Args:
61
+ tenement_id: Mining tenement identifier
62
+ stats: Statistics dictionary from analysis
63
+ rehab_score: Rehabilitation score (0-100)
64
+ interpretation: Plain-language interpretation
65
+ date_before: Analysis start date
66
+ date_after: Analysis end date
67
+ mine_name: Optional name of the mine
68
+
69
+ Returns:
70
+ PDF as bytes for download
71
+ """
72
+ pdf = RehabWatchPDF()
73
+ pdf.alias_nb_pages()
74
+ pdf.add_page()
75
+
76
+ # Title
77
+ pdf.set_font('Helvetica', 'B', 24)
78
+ pdf.set_text_color(33, 33, 33)
79
+ pdf.cell(0, 15, 'Rehabilitation Assessment Report', 0, 1, 'C')
80
+ pdf.ln(5)
81
+
82
+ # Site Information
83
+ pdf.set_font('Helvetica', '', 12)
84
+ pdf.set_text_color(66, 66, 66)
85
+
86
+ if mine_name:
87
+ pdf.cell(0, 8, f'Site: {mine_name}', 0, 1, 'C')
88
+ pdf.cell(0, 8, f'Tenement ID: {tenement_id}', 0, 1, 'C')
89
+ pdf.cell(0, 8, f'Analysis Period: {date_before} to {date_after}', 0, 1, 'C')
90
+ pdf.cell(0, 8, f'Report Generated: {datetime.now().strftime("%Y-%m-%d %H:%M")}', 0, 1, 'C')
91
+ pdf.ln(10)
92
+
93
+ # Rehabilitation Score Box
94
+ pdf.set_fill_color(240, 240, 240)
95
+ pdf.rect(60, pdf.get_y(), 90, 35, 'F')
96
+
97
+ pdf.set_font('Helvetica', 'B', 12)
98
+ pdf.set_text_color(66, 66, 66)
99
+ pdf.cell(0, 8, 'Rehabilitation Score', 0, 1, 'C')
100
+
101
+ # Score color based on value
102
+ if rehab_score >= 60:
103
+ pdf.set_text_color(46, 125, 50) # Green
104
+ elif rehab_score >= 40:
105
+ pdf.set_text_color(255, 152, 0) # Orange
106
+ else:
107
+ pdf.set_text_color(183, 28, 28) # Red
108
+
109
+ pdf.set_font('Helvetica', 'B', 36)
110
+ pdf.cell(0, 15, f'{rehab_score}/100', 0, 1, 'C')
111
+ pdf.ln(15)
112
+
113
+ # Interpretation
114
+ pdf.chapter_title('Summary')
115
+ pdf.chapter_body(interpretation)
116
+
117
+ # Statistics Table
118
+ pdf.chapter_title('Detailed Statistics')
119
+
120
+ # Table header
121
+ pdf.set_font('Helvetica', 'B', 10)
122
+ pdf.set_fill_color(46, 125, 50)
123
+ pdf.set_text_color(255, 255, 255)
124
+ pdf.cell(90, 8, 'Metric', 1, 0, 'C', True)
125
+ pdf.cell(50, 8, 'Value', 1, 0, 'C', True)
126
+ pdf.cell(50, 8, 'Unit', 1, 1, 'C', True)
127
+
128
+ # Table data
129
+ pdf.set_font('Helvetica', '', 10)
130
+ pdf.set_text_color(66, 66, 66)
131
+
132
+ table_data = [
133
+ ('NDVI Before (mean)', f"{stats['ndvi_before_mean']:.4f}", 'index'),
134
+ ('NDVI After (mean)', f"{stats['ndvi_after_mean']:.4f}", 'index'),
135
+ ('NDVI Change (mean)', f"{stats['ndvi_change_mean']:.4f}", 'index'),
136
+ ('Relative Change', f"{stats['percent_change']:.2f}", '%'),
137
+ ('SAVI (mean)', f"{stats.get('savi_after_mean', 0):.4f}", 'index'),
138
+ ('EVI (mean)', f"{stats.get('evi_after_mean', 0):.4f}", 'index'),
139
+ ('NDMI (moisture)', f"{stats.get('ndmi_after_mean', 0):.4f}", 'index'),
140
+ ('BSI (bare soil)', f"{stats.get('bsi_after_mean', 0):.4f}", 'index'),
141
+ ('Water Presence', f"{stats.get('percent_water', 0):.2f}", '%'),
142
+ ('Bare Soil Extent', f"{stats.get('percent_bare_soil', 0):.2f}", '%'),
143
+ ('Moisture Stressed', f"{stats.get('percent_moisture_stressed', 0):.2f}", '%'),
144
+ ('Area Improved', f"{stats['area_improved_ha']:.2f}", 'hectares'),
145
+ ('Area Stable', f"{stats['area_stable_ha']:.2f}", 'hectares'),
146
+ ('Area Degraded', f"{stats['area_degraded_ha']:.2f}", 'hectares'),
147
+ ('Total Area', f"{stats['total_area_ha']:.2f}", 'hectares'),
148
+ ('Percentage Improved', f"{stats['percent_improved']:.2f}", '%'),
149
+ ('Percentage Degraded', f"{stats['percent_degraded']:.2f}", '%'),
150
+ ]
151
+
152
+ fill = False
153
+ for row in table_data:
154
+ if fill:
155
+ pdf.set_fill_color(245, 245, 245)
156
+ else:
157
+ pdf.set_fill_color(255, 255, 255)
158
+ pdf.cell(90, 7, row[0], 1, 0, 'L', fill)
159
+ pdf.cell(50, 7, row[1], 1, 0, 'C', fill)
160
+ pdf.cell(50, 7, row[2], 1, 1, 'C', fill)
161
+ fill = not fill
162
+
163
+ pdf.ln(10)
164
+
165
+ # Methodology
166
+ pdf.chapter_title('Methodology')
167
+ methodology_text = """This assessment uses multiple vegetation and soil indices derived from Sentinel-2 satellite imagery, Copernicus DEM, and IO-LULC land cover data.
168
+
169
+ Vegetation Indices:
170
+ - NDVI (Normalized Difference Vegetation Index): Overall vegetation health
171
+ - SAVI (Soil Adjusted Vegetation Index): Better for sparse vegetation
172
+ - EVI (Enhanced Vegetation Index): Better for dense vegetation
173
+
174
+ Soil & Water Indices:
175
+ - BSI (Bare Soil Index): Identifies exposed soil areas
176
+ - NDWI (Normalized Difference Water Index): Water body detection
177
+ - NDMI (Normalized Difference Moisture Index): Vegetation moisture content
178
+
179
+ Terrain Analysis:
180
+ - Slope and aspect from Copernicus DEM GLO-30 (30m resolution)
181
+ - Erosion risk combining slope steepness and bare soil exposure
182
+
183
+ Land Cover:
184
+ - IO-LULC annual land cover classification (2017-2023)
185
+
186
+ The Rehabilitation Score combines vegetation health, improvement trends, soil stability, and moisture status compared to reference conditions."""
187
+
188
+ pdf.chapter_body(methodology_text)
189
+
190
+ # Data Sources and Disclaimers
191
+ pdf.add_page()
192
+ pdf.chapter_title('Data Sources')
193
+ sources_text = """- Satellite Imagery: Copernicus Sentinel-2 L2A (Surface Reflectance)
194
+ - Spatial Resolution: 10 meters
195
+ - Temporal Resolution: ~5 days revisit
196
+ - Cloud Masking: Applied using Scene Classification Layer (SCL)
197
+ - Compositing Method: Median composite over 30-day windows
198
+ - Digital Elevation: Copernicus DEM GLO-30 (30m resolution)
199
+ - Land Cover: IO-LULC Annual v02 (10m resolution, 2017-2023)
200
+ - Data Access: Microsoft Planetary Computer (free, open access)"""
201
+
202
+ pdf.chapter_body(sources_text)
203
+
204
+ pdf.chapter_title('Disclaimer')
205
+ disclaimer_text = """This report is generated automatically using satellite remote sensing data and should be used for preliminary assessment purposes only. Results may be affected by:
206
+
207
+ - Cloud cover and atmospheric conditions
208
+ - Seasonal vegetation variations
209
+ - Sensor calibration differences
210
+ - Topographic effects
211
+
212
+ For regulatory compliance or detailed rehabilitation assessment, ground-based verification is recommended. This analysis does not constitute professional advice and should be interpreted by qualified personnel.
213
+
214
+ RehabWatch is a demonstration tool and the developers assume no liability for decisions made based on this analysis."""
215
+
216
+ pdf.chapter_body(disclaimer_text)
217
+
218
+ # Output PDF bytes
219
+ return bytes(pdf.output())
220
+
221
+
222
+ def stats_to_csv(
223
+ stats: Dict[str, float],
224
+ tenement_id: str,
225
+ rehab_score: int,
226
+ date_before: str,
227
+ date_after: str,
228
+ mine_name: Optional[str] = None
229
+ ) -> str:
230
+ """
231
+ Convert statistics to CSV format.
232
+
233
+ Args:
234
+ stats: Statistics dictionary
235
+ tenement_id: Mining tenement identifier
236
+ rehab_score: Rehabilitation score
237
+ date_before: Analysis start date
238
+ date_after: Analysis end date
239
+ mine_name: Optional mine name
240
+
241
+ Returns:
242
+ CSV string for download
243
+ """
244
+ lines = []
245
+
246
+ # Header
247
+ lines.append("RehabWatch Rehabilitation Assessment Export")
248
+ lines.append(f"Generated,{datetime.now().strftime('%Y-%m-%d %H:%M')}")
249
+ if mine_name:
250
+ lines.append(f"Site Name,{mine_name}")
251
+ lines.append(f"Tenement ID,{tenement_id}")
252
+ lines.append(f"Analysis Start Date,{date_before}")
253
+ lines.append(f"Analysis End Date,{date_after}")
254
+ lines.append("")
255
+
256
+ # Statistics
257
+ lines.append("Metric,Value,Unit")
258
+ lines.append(f"Rehabilitation Score,{rehab_score},/100")
259
+ lines.append(f"NDVI Before (mean),{stats['ndvi_before_mean']:.4f},index")
260
+ lines.append(f"NDVI After (mean),{stats['ndvi_after_mean']:.4f},index")
261
+ lines.append(f"NDVI Change (mean),{stats['ndvi_change_mean']:.4f},index")
262
+ lines.append(f"NDVI Change (std dev),{stats['ndvi_change_std']:.4f},index")
263
+ lines.append(f"Relative Change,{stats['percent_change']:.2f},%")
264
+ lines.append(f"SAVI Before,{stats.get('savi_before_mean', 0):.4f},index")
265
+ lines.append(f"SAVI After,{stats.get('savi_after_mean', 0):.4f},index")
266
+ lines.append(f"EVI Before,{stats.get('evi_before_mean', 0):.4f},index")
267
+ lines.append(f"EVI After,{stats.get('evi_after_mean', 0):.4f},index")
268
+ lines.append(f"NDWI After,{stats.get('ndwi_after_mean', 0):.4f},index")
269
+ lines.append(f"NDMI After,{stats.get('ndmi_after_mean', 0):.4f},index")
270
+ lines.append(f"BSI After,{stats.get('bsi_after_mean', 0):.4f},index")
271
+ lines.append(f"Water Presence,{stats.get('percent_water', 0):.2f},%")
272
+ lines.append(f"Bare Soil Extent,{stats.get('percent_bare_soil', 0):.2f},%")
273
+ lines.append(f"Moisture Stressed,{stats.get('percent_moisture_stressed', 0):.2f},%")
274
+ lines.append(f"Sparse Vegetation,{stats.get('percent_sparse_veg', 0):.2f},%")
275
+ lines.append(f"Low Vegetation,{stats.get('percent_low_veg', 0):.2f},%")
276
+ lines.append(f"Moderate Vegetation,{stats.get('percent_moderate_veg', 0):.2f},%")
277
+ lines.append(f"Dense Vegetation,{stats.get('percent_dense_veg', 0):.2f},%")
278
+ lines.append(f"Area Improved,{stats['area_improved_ha']:.2f},hectares")
279
+ lines.append(f"Area Stable,{stats['area_stable_ha']:.2f},hectares")
280
+ lines.append(f"Area Degraded,{stats['area_degraded_ha']:.2f},hectares")
281
+ lines.append(f"Total Area,{stats['total_area_ha']:.2f},hectares")
282
+ lines.append(f"Percentage Improved,{stats['percent_improved']:.2f},%")
283
+ lines.append(f"Percentage Stable,{stats['percent_stable']:.2f},%")
284
+ lines.append(f"Percentage Degraded,{stats['percent_degraded']:.2f},%")
285
+
286
+ return "\n".join(lines)
287
+
288
+
289
+ def generate_summary_text(
290
+ tenement_id: str,
291
+ stats: Dict[str, float],
292
+ rehab_score: int,
293
+ interpretation: str,
294
+ date_before: str,
295
+ date_after: str
296
+ ) -> str:
297
+ """
298
+ Generate a plain text summary of the assessment.
299
+
300
+ Args:
301
+ tenement_id: Mining tenement identifier
302
+ stats: Statistics dictionary
303
+ rehab_score: Rehabilitation score
304
+ interpretation: Interpretation text
305
+ date_before: Analysis start date
306
+ date_after: Analysis end date
307
+
308
+ Returns:
309
+ Formatted text summary
310
+ """
311
+ summary = f"""
312
+ ================================================================================
313
+ REHABWATCH REHABILITATION ASSESSMENT
314
+ ================================================================================
315
+
316
+ Tenement: {tenement_id}
317
+ Analysis Period: {date_before} to {date_after}
318
+ Report Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}
319
+
320
+ --------------------------------------------------------------------------------
321
+ REHABILITATION SCORE
322
+ --------------------------------------------------------------------------------
323
+
324
+ {rehab_score} / 100
325
+
326
+ --------------------------------------------------------------------------------
327
+ KEY FINDINGS
328
+ --------------------------------------------------------------------------------
329
+
330
+ {interpretation}
331
+
332
+ --------------------------------------------------------------------------------
333
+ DETAILED STATISTICS
334
+ --------------------------------------------------------------------------------
335
+
336
+ Vegetation Index (NDVI):
337
+ - Before: {stats['ndvi_before_mean']:.4f}
338
+ - After: {stats['ndvi_after_mean']:.4f}
339
+ - Change: {stats['ndvi_change_mean']:.4f} ({stats['percent_change']:.2f}%)
340
+
341
+ Area Analysis:
342
+ - Total Area: {stats['total_area_ha']:.2f} ha
343
+ - Area Improved: {stats['area_improved_ha']:.2f} ha ({stats['percent_improved']:.2f}%)
344
+ - Area Stable: {stats['area_stable_ha']:.2f} ha ({stats['percent_stable']:.2f}%)
345
+ - Area Degraded: {stats['area_degraded_ha']:.2f} ha ({stats['percent_degraded']:.2f}%)
346
+
347
+ ================================================================================
348
+ DATA SOURCES
349
+ ================================================================================
350
+
351
+ Satellite: Copernicus Sentinel-2 L2A
352
+ Resolution: 10 meters
353
+ Analysis: NDVI vegetation index
354
+
355
+ ================================================================================
356
+ """
357
+ return summary
src/stac_utils.py ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ STAC/Planetary Computer utilities for RehabWatch.
3
+ Handles satellite data access via Microsoft Planetary Computer.
4
+
5
+ Data Sources:
6
+ - Sentinel-2 L2A: Multispectral imagery for vegetation indices
7
+ - Copernicus DEM GLO-30: Digital elevation model for terrain analysis
8
+ - IO-LULC: Land cover classification (2017-2023)
9
+ - ESA WorldCover: Land cover classification (2020-2021)
10
+ """
11
+
12
+ import numpy as np
13
+ import xarray as xr
14
+ import rioxarray
15
+ import stackstac
16
+ import planetary_computer
17
+ from pystac_client import Client
18
+ from shapely.geometry import box, shape, mapping
19
+ from datetime import datetime, timedelta
20
+ from typing import Optional, List, Dict, Any, Tuple
21
+ import warnings
22
+
23
+ warnings.filterwarnings('ignore')
24
+
25
+ # Planetary Computer STAC endpoint
26
+ STAC_URL = "https://planetarycomputer.microsoft.com/api/stac/v1"
27
+
28
+ # Collection names
29
+ SENTINEL2_COLLECTION = "sentinel-2-l2a"
30
+ COPERNICUS_DEM_COLLECTION = "cop-dem-glo-30"
31
+ IO_LULC_COLLECTION = "io-lulc-annual-v02"
32
+ ESA_WORLDCOVER_COLLECTION = "esa-worldcover"
33
+
34
+ # Land cover class mappings for IO-LULC
35
+ LULC_CLASSES = {
36
+ 1: "Water",
37
+ 2: "Trees",
38
+ 4: "Flooded Vegetation",
39
+ 5: "Crops",
40
+ 7: "Built Area",
41
+ 8: "Bare Ground",
42
+ 9: "Snow/Ice",
43
+ 10: "Clouds",
44
+ 11: "Rangeland"
45
+ }
46
+
47
+ # ESA WorldCover class mappings
48
+ WORLDCOVER_CLASSES = {
49
+ 10: "Tree cover",
50
+ 20: "Shrubland",
51
+ 30: "Grassland",
52
+ 40: "Cropland",
53
+ 50: "Built-up",
54
+ 60: "Bare / sparse vegetation",
55
+ 70: "Snow and ice",
56
+ 80: "Permanent water bodies",
57
+ 90: "Herbaceous wetland",
58
+ 95: "Mangroves",
59
+ 100: "Moss and lichen"
60
+ }
61
+
62
+
63
+ def get_stac_client() -> Client:
64
+ """
65
+ Get a STAC client for Planetary Computer.
66
+
67
+ Returns:
68
+ pystac_client.Client instance
69
+ """
70
+ return Client.open(STAC_URL, modifier=planetary_computer.sign_inplace)
71
+
72
+
73
+ # =============================================================================
74
+ # SENTINEL-2 DATA ACCESS
75
+ # =============================================================================
76
+
77
+ def search_sentinel2(
78
+ bbox: Tuple[float, float, float, float],
79
+ start_date: str,
80
+ end_date: str,
81
+ cloud_cover: int = 20
82
+ ) -> List[Any]:
83
+ """
84
+ Search for Sentinel-2 scenes in the Planetary Computer catalog.
85
+
86
+ Args:
87
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
88
+ start_date: Start date (YYYY-MM-DD)
89
+ end_date: End date (YYYY-MM-DD)
90
+ cloud_cover: Maximum cloud cover percentage
91
+
92
+ Returns:
93
+ List of STAC items
94
+ """
95
+ client = get_stac_client()
96
+
97
+ search = client.search(
98
+ collections=[SENTINEL2_COLLECTION],
99
+ bbox=bbox,
100
+ datetime=f"{start_date}/{end_date}",
101
+ query={"eo:cloud_cover": {"lt": cloud_cover}}
102
+ )
103
+
104
+ items = list(search.items())
105
+ return items
106
+
107
+
108
+ def get_sentinel_composite(
109
+ bbox: Tuple[float, float, float, float],
110
+ start_date: str,
111
+ end_date: str,
112
+ cloud_threshold: int = 20,
113
+ resolution: int = 20
114
+ ) -> xr.DataArray:
115
+ """
116
+ Get a cloud-free Sentinel-2 composite for a given bbox and date range.
117
+ Includes all bands needed for comprehensive vegetation analysis.
118
+
119
+ Args:
120
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
121
+ start_date: Start date string (YYYY-MM-DD)
122
+ end_date: End date string (YYYY-MM-DD)
123
+ cloud_threshold: Maximum cloud cover percentage (0-100)
124
+ resolution: Output resolution in meters (default 20m for memory efficiency)
125
+
126
+ Returns:
127
+ xarray DataArray with median composite
128
+
129
+ Raises:
130
+ ValueError: If no images found for the specified criteria
131
+ """
132
+ items = search_sentinel2(bbox, start_date, end_date, cloud_threshold)
133
+
134
+ if len(items) == 0:
135
+ raise ValueError(
136
+ f"No Sentinel-2 images found for the specified location and date range "
137
+ f"({start_date} to {end_date}) with cloud cover below {cloud_threshold}%. "
138
+ "Try expanding the date range or increasing the cloud threshold."
139
+ )
140
+
141
+ # Limit number of items to reduce memory usage
142
+ if len(items) > 5:
143
+ items = sorted(items, key=lambda x: x.properties.get('eo:cloud_cover', 100))[:5]
144
+
145
+ # Select all bands needed for indices:
146
+ # B02 (Blue), B03 (Green), B04 (Red), B05 (Red Edge 1),
147
+ # B06 (Red Edge 2), B07 (Red Edge 3), B08 (NIR),
148
+ # B8A (NIR narrow), B11 (SWIR1), B12 (SWIR2), SCL (Scene Classification)
149
+ bands = ["B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B11", "B12", "SCL"]
150
+
151
+ stack = stackstac.stack(
152
+ items,
153
+ assets=bands,
154
+ bounds_latlon=bbox,
155
+ resolution=resolution,
156
+ epsg=32750, # UTM zone for Western Australia
157
+ dtype="float64",
158
+ rescale=False,
159
+ fill_value=np.nan,
160
+ chunksize=1024 # Smaller chunks for memory efficiency
161
+ )
162
+
163
+ # Apply cloud masking using SCL (Scene Classification Layer)
164
+ scl = stack.sel(band="SCL")
165
+ cloud_mask = (scl >= 7) & (scl <= 10)
166
+
167
+ # Apply mask to reflectance bands
168
+ masked = stack.where(~cloud_mask)
169
+
170
+ # Calculate median composite
171
+ composite = masked.median(dim="time", skipna=True)
172
+
173
+ # Scale to 0-1 reflectance (Sentinel-2 L2A is in 0-10000)
174
+ composite = composite / 10000.0
175
+
176
+ return composite.compute()
177
+
178
+
179
+ # =============================================================================
180
+ # VEGETATION INDICES
181
+ # =============================================================================
182
+
183
+ def calculate_ndvi(data: xr.DataArray) -> xr.DataArray:
184
+ """
185
+ Calculate NDVI (Normalized Difference Vegetation Index).
186
+
187
+ NDVI = (NIR - Red) / (NIR + Red)
188
+
189
+ Range: -1 to 1 (higher = more vegetation)
190
+ """
191
+ red = data.sel(band="B04")
192
+ nir = data.sel(band="B08")
193
+
194
+ ndvi = (nir - red) / (nir + red + 1e-10)
195
+ return ndvi.clip(-1, 1)
196
+
197
+
198
+ def calculate_savi(data: xr.DataArray, L: float = 0.5) -> xr.DataArray:
199
+ """
200
+ Calculate SAVI (Soil Adjusted Vegetation Index).
201
+
202
+ SAVI = ((NIR - Red) / (NIR + Red + L)) * (1 + L)
203
+
204
+ Better than NDVI for areas with sparse vegetation.
205
+ L = 0.5 works well for most conditions.
206
+
207
+ Range: -1 to 1
208
+ """
209
+ red = data.sel(band="B04")
210
+ nir = data.sel(band="B08")
211
+
212
+ savi = ((nir - red) / (nir + red + L + 1e-10)) * (1 + L)
213
+ return savi.clip(-1, 1)
214
+
215
+
216
+ def calculate_evi(data: xr.DataArray) -> xr.DataArray:
217
+ """
218
+ Calculate EVI (Enhanced Vegetation Index).
219
+
220
+ EVI = 2.5 * ((NIR - Red) / (NIR + 6*Red - 7.5*Blue + 1))
221
+
222
+ More sensitive in high biomass regions, corrects for atmospheric influences.
223
+
224
+ Range: approximately -1 to 1
225
+ """
226
+ blue = data.sel(band="B02")
227
+ red = data.sel(band="B04")
228
+ nir = data.sel(band="B08")
229
+
230
+ evi = 2.5 * ((nir - red) / (nir + 6 * red - 7.5 * blue + 1 + 1e-10))
231
+ return evi.clip(-1, 1)
232
+
233
+
234
+ def calculate_ndwi(data: xr.DataArray) -> xr.DataArray:
235
+ """
236
+ Calculate NDWI (Normalized Difference Water Index).
237
+
238
+ NDWI = (Green - NIR) / (Green + NIR)
239
+
240
+ Detects water bodies. Higher values indicate water presence.
241
+
242
+ Range: -1 to 1
243
+ """
244
+ green = data.sel(band="B03")
245
+ nir = data.sel(band="B08")
246
+
247
+ ndwi = (green - nir) / (green + nir + 1e-10)
248
+ return ndwi.clip(-1, 1)
249
+
250
+
251
+ def calculate_ndmi(data: xr.DataArray) -> xr.DataArray:
252
+ """
253
+ Calculate NDMI (Normalized Difference Moisture Index).
254
+
255
+ NDMI = (NIR - SWIR1) / (NIR + SWIR1)
256
+
257
+ Measures vegetation water content/moisture stress.
258
+
259
+ Range: -1 to 1 (higher = more moisture)
260
+ """
261
+ nir = data.sel(band="B08")
262
+ swir1 = data.sel(band="B11")
263
+
264
+ ndmi = (nir - swir1) / (nir + swir1 + 1e-10)
265
+ return ndmi.clip(-1, 1)
266
+
267
+
268
+ def calculate_bsi(data: xr.DataArray) -> xr.DataArray:
269
+ """
270
+ Calculate BSI (Bare Soil Index).
271
+
272
+ BSI = ((SWIR1 + Red) - (NIR + Blue)) / ((SWIR1 + Red) + (NIR + Blue))
273
+
274
+ Identifies bare soil areas. Higher values indicate more bare soil.
275
+
276
+ Range: -1 to 1
277
+ """
278
+ blue = data.sel(band="B02")
279
+ red = data.sel(band="B04")
280
+ nir = data.sel(band="B08")
281
+ swir1 = data.sel(band="B11")
282
+
283
+ bsi = ((swir1 + red) - (nir + blue)) / ((swir1 + red) + (nir + blue) + 1e-10)
284
+ return bsi.clip(-1, 1)
285
+
286
+
287
+ def calculate_nbr(data: xr.DataArray) -> xr.DataArray:
288
+ """
289
+ Calculate NBR (Normalized Burn Ratio).
290
+
291
+ NBR = (NIR - SWIR2) / (NIR + SWIR2)
292
+
293
+ Useful for detecting burned areas and vegetation disturbance.
294
+
295
+ Range: -1 to 1
296
+ """
297
+ nir = data.sel(band="B08")
298
+ swir2 = data.sel(band="B12")
299
+
300
+ nbr = (nir - swir2) / (nir + swir2 + 1e-10)
301
+ return nbr.clip(-1, 1)
302
+
303
+
304
+ def calculate_all_indices(data: xr.DataArray) -> Dict[str, xr.DataArray]:
305
+ """
306
+ Calculate all vegetation and soil indices from Sentinel-2 data.
307
+
308
+ Returns:
309
+ Dictionary with index names as keys and DataArrays as values
310
+ """
311
+ return {
312
+ 'ndvi': calculate_ndvi(data),
313
+ 'savi': calculate_savi(data),
314
+ 'evi': calculate_evi(data),
315
+ 'ndwi': calculate_ndwi(data),
316
+ 'ndmi': calculate_ndmi(data),
317
+ 'bsi': calculate_bsi(data),
318
+ 'nbr': calculate_nbr(data)
319
+ }
320
+
321
+
322
+ def calculate_vegetation_heterogeneity(ndvi: xr.DataArray, window_size: int = 5) -> xr.DataArray:
323
+ """
324
+ Calculate vegetation heterogeneity as local standard deviation of NDVI.
325
+
326
+ Higher values indicate more diverse/heterogeneous vegetation.
327
+ This serves as a proxy for species diversity.
328
+
329
+ Args:
330
+ ndvi: NDVI DataArray
331
+ window_size: Size of the moving window (default 5 = 50m at 10m resolution)
332
+
333
+ Returns:
334
+ DataArray with heterogeneity values
335
+ """
336
+ # Use rolling window to calculate local std
337
+ heterogeneity = ndvi.rolling(x=window_size, y=window_size, center=True).std()
338
+ return heterogeneity
339
+
340
+
341
+ # =============================================================================
342
+ # COPERNICUS DEM DATA ACCESS
343
+ # =============================================================================
344
+
345
+ def get_dem_data(
346
+ bbox: Tuple[float, float, float, float],
347
+ resolution: int = 30
348
+ ) -> xr.DataArray:
349
+ """
350
+ Get Copernicus DEM GLO-30 elevation data.
351
+
352
+ Args:
353
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
354
+ resolution: Output resolution in meters (default 30m)
355
+
356
+ Returns:
357
+ xarray DataArray with elevation values in meters
358
+ """
359
+ client = get_stac_client()
360
+
361
+ search = client.search(
362
+ collections=[COPERNICUS_DEM_COLLECTION],
363
+ bbox=bbox
364
+ )
365
+
366
+ items = list(search.items())
367
+
368
+ if len(items) == 0:
369
+ raise ValueError("No DEM data found for the specified location.")
370
+
371
+ stack = stackstac.stack(
372
+ items,
373
+ assets=["data"],
374
+ bounds_latlon=bbox,
375
+ resolution=resolution,
376
+ epsg=32750,
377
+ dtype="float32",
378
+ fill_value=np.nan,
379
+ chunksize=2048
380
+ )
381
+
382
+ # Take the first (or merge if multiple tiles)
383
+ dem = stack.median(dim="time", skipna=True).squeeze()
384
+
385
+ return dem.compute()
386
+
387
+
388
+ def calculate_slope(dem: xr.DataArray, resolution: float = 30.0) -> xr.DataArray:
389
+ """
390
+ Calculate slope from DEM in degrees.
391
+
392
+ Args:
393
+ dem: Elevation DataArray
394
+ resolution: Pixel resolution in meters
395
+
396
+ Returns:
397
+ Slope in degrees (0-90)
398
+ """
399
+ # Calculate gradients
400
+ dy, dx = np.gradient(dem.values, resolution)
401
+
402
+ # Calculate slope in degrees
403
+ slope = np.degrees(np.arctan(np.sqrt(dx**2 + dy**2)))
404
+
405
+ # Create DataArray with same coordinates
406
+ slope_da = xr.DataArray(
407
+ slope,
408
+ dims=dem.dims,
409
+ coords=dem.coords,
410
+ name='slope'
411
+ )
412
+
413
+ return slope_da
414
+
415
+
416
+ def calculate_aspect(dem: xr.DataArray, resolution: float = 30.0) -> xr.DataArray:
417
+ """
418
+ Calculate aspect from DEM in degrees.
419
+
420
+ Args:
421
+ dem: Elevation DataArray
422
+ resolution: Pixel resolution in meters
423
+
424
+ Returns:
425
+ Aspect in degrees (0-360, 0=North, 90=East)
426
+ """
427
+ dy, dx = np.gradient(dem.values, resolution)
428
+
429
+ # Calculate aspect
430
+ aspect = np.degrees(np.arctan2(-dx, dy))
431
+ aspect = np.where(aspect < 0, aspect + 360, aspect)
432
+
433
+ aspect_da = xr.DataArray(
434
+ aspect,
435
+ dims=dem.dims,
436
+ coords=dem.coords,
437
+ name='aspect'
438
+ )
439
+
440
+ return aspect_da
441
+
442
+
443
+ def calculate_terrain_ruggedness(dem: xr.DataArray, window_size: int = 3) -> xr.DataArray:
444
+ """
445
+ Calculate Terrain Ruggedness Index (TRI).
446
+
447
+ TRI is the mean of the absolute differences between the center cell
448
+ and its surrounding cells.
449
+
450
+ Args:
451
+ dem: Elevation DataArray
452
+ window_size: Size of the moving window
453
+
454
+ Returns:
455
+ TRI values (higher = more rugged terrain)
456
+ """
457
+ # Calculate local range as a proxy for ruggedness
458
+ rolling = dem.rolling(x=window_size, y=window_size, center=True)
459
+ tri = rolling.max() - rolling.min()
460
+
461
+ return tri
462
+
463
+
464
+ def calculate_erosion_risk(
465
+ slope: xr.DataArray,
466
+ bsi: xr.DataArray,
467
+ slope_weight: float = 0.6,
468
+ bare_soil_weight: float = 0.4
469
+ ) -> xr.DataArray:
470
+ """
471
+ Calculate erosion risk index combining slope and bare soil.
472
+
473
+ Higher values indicate greater erosion risk.
474
+
475
+ Args:
476
+ slope: Slope in degrees
477
+ bsi: Bare Soil Index
478
+ slope_weight: Weight for slope component
479
+ bare_soil_weight: Weight for bare soil component
480
+
481
+ Returns:
482
+ Erosion risk index (0-1)
483
+ """
484
+ # Normalize slope to 0-1 (assuming max slope of 45 degrees)
485
+ slope_norm = (slope / 45.0).clip(0, 1)
486
+
487
+ # Normalize BSI to 0-1
488
+ bsi_norm = ((bsi + 1) / 2).clip(0, 1)
489
+
490
+ # Combined erosion risk
491
+ erosion_risk = slope_weight * slope_norm + bare_soil_weight * bsi_norm
492
+
493
+ return erosion_risk.clip(0, 1)
494
+
495
+
496
+ # =============================================================================
497
+ # LAND COVER DATA ACCESS
498
+ # =============================================================================
499
+
500
+ def get_land_cover(
501
+ bbox: Tuple[float, float, float, float],
502
+ year: int = 2023,
503
+ resolution: int = 10
504
+ ) -> xr.DataArray:
505
+ """
506
+ Get IO-LULC annual land cover data.
507
+
508
+ Args:
509
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
510
+ year: Year of land cover data (2017-2023)
511
+ resolution: Output resolution in meters
512
+
513
+ Returns:
514
+ xarray DataArray with land cover classes
515
+ """
516
+ client = get_stac_client()
517
+
518
+ search = client.search(
519
+ collections=[IO_LULC_COLLECTION],
520
+ bbox=bbox,
521
+ datetime=f"{year}-01-01/{year}-12-31"
522
+ )
523
+
524
+ items = list(search.items())
525
+
526
+ if len(items) == 0:
527
+ raise ValueError(f"No land cover data found for year {year}.")
528
+
529
+ stack = stackstac.stack(
530
+ items,
531
+ assets=["data"],
532
+ bounds_latlon=bbox,
533
+ resolution=resolution,
534
+ epsg=32750,
535
+ dtype="uint8",
536
+ fill_value=0,
537
+ chunksize=2048
538
+ )
539
+
540
+ lulc = stack.max(dim="time").squeeze()
541
+
542
+ return lulc.compute()
543
+
544
+
545
+ def get_worldcover(
546
+ bbox: Tuple[float, float, float, float],
547
+ year: int = 2021,
548
+ resolution: int = 10
549
+ ) -> xr.DataArray:
550
+ """
551
+ Get ESA WorldCover land cover data.
552
+
553
+ Args:
554
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
555
+ year: Year (2020 or 2021)
556
+ resolution: Output resolution in meters
557
+
558
+ Returns:
559
+ xarray DataArray with land cover classes
560
+ """
561
+ client = get_stac_client()
562
+
563
+ search = client.search(
564
+ collections=[ESA_WORLDCOVER_COLLECTION],
565
+ bbox=bbox,
566
+ datetime=f"{year}-01-01/{year}-12-31"
567
+ )
568
+
569
+ items = list(search.items())
570
+
571
+ if len(items) == 0:
572
+ raise ValueError(f"No WorldCover data found for year {year}.")
573
+
574
+ stack = stackstac.stack(
575
+ items,
576
+ assets=["map"],
577
+ bounds_latlon=bbox,
578
+ resolution=resolution,
579
+ epsg=32750,
580
+ dtype="uint8",
581
+ fill_value=0,
582
+ chunksize=2048
583
+ )
584
+
585
+ worldcover = stack.max(dim="time").squeeze()
586
+
587
+ return worldcover.compute()
588
+
589
+
590
+ def calculate_land_cover_change(
591
+ lulc_before: xr.DataArray,
592
+ lulc_after: xr.DataArray
593
+ ) -> Dict[str, Any]:
594
+ """
595
+ Calculate land cover change statistics between two periods.
596
+
597
+ Args:
598
+ lulc_before: Land cover data for earlier period
599
+ lulc_after: Land cover data for later period
600
+
601
+ Returns:
602
+ Dictionary with change statistics
603
+ """
604
+ # Calculate pixel counts for each class
605
+ before_counts = {}
606
+ after_counts = {}
607
+
608
+ for class_id, class_name in LULC_CLASSES.items():
609
+ before_counts[class_name] = int((lulc_before == class_id).sum().values)
610
+ after_counts[class_name] = int((lulc_after == class_id).sum().values)
611
+
612
+ # Calculate changes
613
+ changes = {}
614
+ for class_name in LULC_CLASSES.values():
615
+ before = before_counts.get(class_name, 0)
616
+ after = after_counts.get(class_name, 0)
617
+ changes[class_name] = {
618
+ 'before': before,
619
+ 'after': after,
620
+ 'change': after - before,
621
+ 'percent_change': ((after - before) / (before + 1)) * 100
622
+ }
623
+
624
+ return {
625
+ 'before': before_counts,
626
+ 'after': after_counts,
627
+ 'changes': changes
628
+ }
629
+
630
+
631
+ def calculate_vegetation_cover_percent(
632
+ lulc: xr.DataArray,
633
+ source: str = 'io-lulc'
634
+ ) -> float:
635
+ """
636
+ Calculate percentage of area covered by vegetation.
637
+
638
+ Args:
639
+ lulc: Land cover DataArray
640
+ source: 'io-lulc' or 'worldcover'
641
+
642
+ Returns:
643
+ Vegetation cover percentage (0-100)
644
+ """
645
+ total_pixels = lulc.size
646
+
647
+ if source == 'io-lulc':
648
+ # Vegetation classes: Trees (2), Flooded Vegetation (4), Crops (5), Rangeland (11)
649
+ veg_classes = [2, 4, 5, 11]
650
+ else: # worldcover
651
+ # Vegetation classes: Tree cover (10), Shrubland (20), Grassland (30),
652
+ # Cropland (40), Herbaceous wetland (90), Mangroves (95)
653
+ veg_classes = [10, 20, 30, 40, 90, 95]
654
+
655
+ veg_pixels = sum(int((lulc == c).sum().values) for c in veg_classes)
656
+
657
+ return (veg_pixels / total_pixels) * 100
658
+
659
+
660
+ def calculate_bare_ground_percent(
661
+ lulc: xr.DataArray,
662
+ source: str = 'io-lulc'
663
+ ) -> float:
664
+ """
665
+ Calculate percentage of area that is bare ground.
666
+
667
+ Args:
668
+ lulc: Land cover DataArray
669
+ source: 'io-lulc' or 'worldcover'
670
+
671
+ Returns:
672
+ Bare ground percentage (0-100)
673
+ """
674
+ total_pixels = lulc.size
675
+
676
+ if source == 'io-lulc':
677
+ bare_classes = [8] # Bare Ground
678
+ else: # worldcover
679
+ bare_classes = [60] # Bare / sparse vegetation
680
+
681
+ bare_pixels = sum(int((lulc == c).sum().values) for c in bare_classes)
682
+
683
+ return (bare_pixels / total_pixels) * 100
684
+
685
+
686
+ # =============================================================================
687
+ # UTILITY FUNCTIONS
688
+ # =============================================================================
689
+
690
+ def get_image_count(
691
+ bbox: Tuple[float, float, float, float],
692
+ start_date: str,
693
+ end_date: str,
694
+ cloud_threshold: int = 20
695
+ ) -> int:
696
+ """Get count of available Sentinel-2 images for a location."""
697
+ items = search_sentinel2(bbox, start_date, end_date, cloud_threshold)
698
+ return len(items)
699
+
700
+
701
+ def get_image_dates(
702
+ bbox: Tuple[float, float, float, float],
703
+ start_date: str,
704
+ end_date: str,
705
+ cloud_threshold: int = 30
706
+ ) -> List[str]:
707
+ """Get list of available Sentinel-2 image dates for a location."""
708
+ items = search_sentinel2(bbox, start_date, end_date, cloud_threshold)
709
+ dates = [item.datetime.strftime("%Y-%m-%d") for item in items if item.datetime]
710
+ return sorted(list(set(dates)))
711
+
712
+
713
+ def geometry_to_bbox(geometry: Dict[str, Any]) -> Tuple[float, float, float, float]:
714
+ """Convert a GeoJSON geometry to a bounding box."""
715
+ geom = shape(geometry)
716
+ bounds = geom.bounds
717
+ return bounds
718
+
719
+
720
+ def bbox_to_geometry(bbox: Tuple[float, float, float, float]) -> Dict[str, Any]:
721
+ """Convert a bounding box to GeoJSON geometry."""
722
+ return mapping(box(*bbox))
723
+
724
+
725
+ def get_bbox_center(bbox: Tuple[float, float, float, float]) -> Tuple[float, float]:
726
+ """Get the center point of a bounding box."""
727
+ min_lon, min_lat, max_lon, max_lat = bbox
728
+ center_lat = (min_lat + max_lat) / 2
729
+ center_lon = (min_lon + max_lon) / 2
730
+ return (center_lat, center_lon)
731
+
732
+
733
+ def expand_bbox(
734
+ bbox: Tuple[float, float, float, float],
735
+ buffer_deg: float = 0.01
736
+ ) -> Tuple[float, float, float, float]:
737
+ """Expand a bounding box by a buffer in degrees."""
738
+ min_lon, min_lat, max_lon, max_lat = bbox
739
+ return (
740
+ min_lon - buffer_deg,
741
+ min_lat - buffer_deg,
742
+ max_lon + buffer_deg,
743
+ max_lat + buffer_deg
744
+ )
745
+
746
+
747
+ def create_reference_bbox(
748
+ bbox: Tuple[float, float, float, float],
749
+ buffer_deg: float = 0.01
750
+ ) -> Tuple[float, float, float, float]:
751
+ """Create a reference bounding box around the site."""
752
+ return expand_bbox(bbox, buffer_deg)
src/visualization.py ADDED
@@ -0,0 +1,1103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visualization module for RehabWatch.
3
+ Creates maps and charts using Folium and Plotly.
4
+ """
5
+
6
+ import numpy as np
7
+ import xarray as xr
8
+ import folium
9
+ from folium import plugins
10
+ from folium.raster_layers import ImageOverlay
11
+ import plotly.graph_objects as go
12
+ import streamlit as st
13
+ from typing import Dict, Any, List, Optional, Tuple
14
+ from matplotlib.colors import LinearSegmentedColormap
15
+ import base64
16
+ from io import BytesIO
17
+ from PIL import Image
18
+
19
+
20
+ # NDVI color palette (brown to green)
21
+ NDVI_COLORS = ['#8B4513', '#D2B48C', '#FFFF00', '#90EE90', '#228B22', '#006400']
22
+
23
+ # Change color palette (red-white-green diverging)
24
+ CHANGE_COLORS = ['#B71C1C', '#EF9A9A', '#FFFFFF', '#A5D6A7', '#1B5E20']
25
+
26
+
27
+ def array_to_colored_image(
28
+ data: np.ndarray,
29
+ colors: List[str],
30
+ vmin: float,
31
+ vmax: float
32
+ ) -> np.ndarray:
33
+ """
34
+ Convert a 2D array to a colored RGBA image.
35
+
36
+ Args:
37
+ data: 2D numpy array
38
+ colors: List of hex color strings for colormap
39
+ vmin: Minimum value for normalization
40
+ vmax: Maximum value for normalization
41
+
42
+ Returns:
43
+ RGBA numpy array (H, W, 4) with values 0-255
44
+ """
45
+ cmap = LinearSegmentedColormap.from_list('custom', colors)
46
+
47
+ # Normalize data
48
+ normalized = (data - vmin) / (vmax - vmin)
49
+ normalized = np.clip(normalized, 0, 1)
50
+
51
+ # Handle NaN values
52
+ mask = np.isnan(data)
53
+
54
+ # Apply colormap
55
+ rgba = cmap(normalized)
56
+ rgba = (rgba * 255).astype(np.uint8)
57
+
58
+ # Set NaN pixels to transparent
59
+ rgba[mask, 3] = 0
60
+
61
+ return rgba
62
+
63
+
64
+ def create_image_overlay(
65
+ data: xr.DataArray,
66
+ colors: List[str],
67
+ vmin: float,
68
+ vmax: float,
69
+ bounds: List[List[float]]
70
+ ) -> str:
71
+ """
72
+ Create a base64-encoded PNG image for Folium overlay.
73
+
74
+ Args:
75
+ data: xarray DataArray
76
+ colors: Color palette
77
+ vmin: Min value for normalization
78
+ vmax: Max value for normalization
79
+ bounds: [[south, west], [north, east]]
80
+
81
+ Returns:
82
+ Base64 encoded PNG string
83
+ """
84
+ # Get the 2D array
85
+ arr = data.values
86
+ if arr.ndim > 2:
87
+ arr = arr.squeeze()
88
+
89
+ # Create colored image
90
+ rgba = array_to_colored_image(arr, colors, vmin, vmax)
91
+
92
+ # Flip vertically for correct orientation
93
+ rgba = np.flipud(rgba)
94
+
95
+ # Convert to PNG
96
+ img = Image.fromarray(rgba, mode='RGBA')
97
+ buffer = BytesIO()
98
+ img.save(buffer, format='PNG')
99
+ buffer.seek(0)
100
+
101
+ # Encode to base64
102
+ img_base64 = base64.b64encode(buffer.getvalue()).decode()
103
+ return f"data:image/png;base64,{img_base64}"
104
+
105
+
106
+ def create_comparison_map(
107
+ bbox: Tuple[float, float, float, float],
108
+ ndvi_before: xr.DataArray,
109
+ ndvi_after: xr.DataArray,
110
+ ndvi_change: xr.DataArray,
111
+ center_coords: Tuple[float, float],
112
+ zoom: int = 12
113
+ ) -> folium.Map:
114
+ """
115
+ Create an interactive comparison map with multiple layers.
116
+
117
+ Args:
118
+ bbox: Bounding box (min_lon, min_lat, max_lon, max_lat)
119
+ ndvi_before: NDVI xarray at start date
120
+ ndvi_after: NDVI xarray at end date
121
+ ndvi_change: NDVI change xarray
122
+ center_coords: Map center (lat, lon)
123
+ zoom: Initial zoom level
124
+
125
+ Returns:
126
+ Folium Map object with all layers
127
+ """
128
+ # Create base map
129
+ m = folium.Map(
130
+ location=center_coords,
131
+ zoom_start=zoom,
132
+ tiles=None
133
+ )
134
+
135
+ # Add satellite basemap
136
+ folium.TileLayer(
137
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
138
+ attr='Esri',
139
+ name='Satellite Imagery',
140
+ overlay=False
141
+ ).add_to(m)
142
+
143
+ # Add OpenStreetMap as alternative
144
+ folium.TileLayer(
145
+ tiles='openstreetmap',
146
+ name='OpenStreetMap',
147
+ overlay=False
148
+ ).add_to(m)
149
+
150
+ # Calculate bounds for image overlay
151
+ min_lon, min_lat, max_lon, max_lat = bbox
152
+ bounds = [[min_lat, min_lon], [max_lat, max_lon]]
153
+
154
+ # Add NDVI Before layer
155
+ try:
156
+ ndvi_before_img = create_image_overlay(
157
+ ndvi_before, NDVI_COLORS, -0.1, 0.8, bounds
158
+ )
159
+ ImageOverlay(
160
+ image=ndvi_before_img,
161
+ bounds=bounds,
162
+ opacity=0.7,
163
+ name='NDVI Before',
164
+ show=False
165
+ ).add_to(m)
166
+ except Exception as e:
167
+ print(f"Error adding NDVI Before layer: {e}")
168
+
169
+ # Add NDVI After layer
170
+ try:
171
+ ndvi_after_img = create_image_overlay(
172
+ ndvi_after, NDVI_COLORS, -0.1, 0.8, bounds
173
+ )
174
+ ImageOverlay(
175
+ image=ndvi_after_img,
176
+ bounds=bounds,
177
+ opacity=0.7,
178
+ name='NDVI After',
179
+ show=False
180
+ ).add_to(m)
181
+ except Exception as e:
182
+ print(f"Error adding NDVI After layer: {e}")
183
+
184
+ # Add Change Map layer (shown by default)
185
+ try:
186
+ change_img = create_image_overlay(
187
+ ndvi_change, CHANGE_COLORS, -0.3, 0.3, bounds
188
+ )
189
+ ImageOverlay(
190
+ image=change_img,
191
+ bounds=bounds,
192
+ opacity=0.7,
193
+ name='Vegetation Change',
194
+ show=True
195
+ ).add_to(m)
196
+ except Exception as e:
197
+ print(f"Error adding Change layer: {e}")
198
+
199
+ # Add tenement boundary
200
+ boundary_coords = [
201
+ [min_lat, min_lon],
202
+ [min_lat, max_lon],
203
+ [max_lat, max_lon],
204
+ [max_lat, min_lon],
205
+ [min_lat, min_lon]
206
+ ]
207
+ folium.PolyLine(
208
+ locations=boundary_coords,
209
+ color='#000000',
210
+ weight=3,
211
+ fill=False,
212
+ popup='Analysis Boundary'
213
+ ).add_to(m)
214
+
215
+ # Add layer control
216
+ folium.LayerControl(position='topright').add_to(m)
217
+
218
+ # Add legends
219
+ _add_legends(m)
220
+
221
+ return m
222
+
223
+
224
+ def _add_legends(m: folium.Map) -> None:
225
+ """Add color legends to the map."""
226
+ legend_html = '''
227
+ <div style="position: fixed; bottom: 50px; left: 50px; z-index: 1000;
228
+ background-color: white; padding: 10px; border-radius: 5px;
229
+ border: 2px solid grey; font-size: 12px; max-width: 150px;">
230
+ <p style="margin: 0 0 5px 0; font-weight: bold;">NDVI Scale</p>
231
+ <div style="background: linear-gradient(to right, #8B4513, #D2B48C, #FFFF00, #90EE90, #228B22, #006400);
232
+ width: 100%; height: 15px; border-radius: 3px;"></div>
233
+ <div style="display: flex; justify-content: space-between;">
234
+ <span>-0.1</span><span>0.8</span>
235
+ </div>
236
+ <hr style="margin: 8px 0;">
237
+ <p style="margin: 0 0 5px 0; font-weight: bold;">Change</p>
238
+ <div style="background: linear-gradient(to right, #B71C1C, #EF9A9A, #FFFFFF, #A5D6A7, #1B5E20);
239
+ width: 100%; height: 15px; border-radius: 3px;"></div>
240
+ <div style="display: flex; justify-content: space-between;">
241
+ <span style="color: #B71C1C;">-0.3</span>
242
+ <span style="color: #1B5E20;">+0.3</span>
243
+ </div>
244
+ <p style="margin: 5px 0 0 0; font-size: 10px; text-align: center;">
245
+ Red=Decline | Green=Growth
246
+ </p>
247
+ </div>
248
+ '''
249
+ m.get_root().html.add_child(folium.Element(legend_html))
250
+
251
+
252
+ def create_simple_map(
253
+ center_coords: Tuple[float, float],
254
+ zoom: int = 10,
255
+ bbox: Optional[Tuple[float, float, float, float]] = None
256
+ ) -> folium.Map:
257
+ """
258
+ Create a simple map for location preview.
259
+
260
+ Args:
261
+ center_coords: Map center (lat, lon)
262
+ zoom: Zoom level
263
+ bbox: Optional bounding box to display
264
+
265
+ Returns:
266
+ Folium Map object
267
+ """
268
+ m = folium.Map(location=center_coords, zoom_start=zoom)
269
+
270
+ # Add satellite imagery
271
+ folium.TileLayer(
272
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
273
+ attr='Esri',
274
+ name='Satellite',
275
+ overlay=False
276
+ ).add_to(m)
277
+
278
+ if bbox is not None:
279
+ min_lon, min_lat, max_lon, max_lat = bbox
280
+ boundary_coords = [
281
+ [min_lat, min_lon],
282
+ [min_lat, max_lon],
283
+ [max_lat, max_lon],
284
+ [max_lat, min_lon],
285
+ [min_lat, min_lon]
286
+ ]
287
+ folium.Polygon(
288
+ locations=boundary_coords,
289
+ color='#1B5E20',
290
+ weight=3,
291
+ fill=True,
292
+ fillColor='#2E7D32',
293
+ fillOpacity=0.2,
294
+ popup='Analysis Area'
295
+ ).add_to(m)
296
+
297
+ folium.LayerControl().add_to(m)
298
+ return m
299
+
300
+
301
+ def create_time_series_chart(
302
+ timeseries_data: List[Dict[str, Any]],
303
+ title: str = "NDVI Time Series"
304
+ ) -> go.Figure:
305
+ """
306
+ Create an interactive NDVI time series chart.
307
+
308
+ Args:
309
+ timeseries_data: List of dicts with 'date' and 'ndvi' keys
310
+ title: Chart title
311
+
312
+ Returns:
313
+ Plotly Figure object
314
+ """
315
+ if not timeseries_data:
316
+ fig = go.Figure()
317
+ fig.add_annotation(
318
+ text="No time series data available",
319
+ xref="paper", yref="paper",
320
+ x=0.5, y=0.5, showarrow=False,
321
+ font=dict(size=16)
322
+ )
323
+ return fig
324
+
325
+ dates = [d['date'] for d in timeseries_data]
326
+ ndvi_values = [d['ndvi'] for d in timeseries_data]
327
+
328
+ fig = go.Figure()
329
+
330
+ # Add NDVI line
331
+ fig.add_trace(go.Scatter(
332
+ x=dates,
333
+ y=ndvi_values,
334
+ mode='lines+markers',
335
+ name='NDVI',
336
+ line=dict(color='#2E7D32', width=2),
337
+ marker=dict(size=6),
338
+ hovertemplate='Date: %{x}<br>NDVI: %{y:.3f}<extra></extra>'
339
+ ))
340
+
341
+ # Add reference lines
342
+ fig.add_hline(y=0.6, line_dash="dash", line_color="#4CAF50",
343
+ annotation_text="Healthy Vegetation", annotation_position="right")
344
+ fig.add_hline(y=0.2, line_dash="dash", line_color="#FF9800",
345
+ annotation_text="Sparse Vegetation", annotation_position="right")
346
+
347
+ fig.update_layout(
348
+ title=dict(text=title, font=dict(size=18)),
349
+ xaxis_title="Date",
350
+ yaxis_title="NDVI",
351
+ yaxis=dict(range=[0, 1]),
352
+ template="plotly_white",
353
+ hovermode="x unified",
354
+ height=400,
355
+ margin=dict(l=60, r=40, t=60, b=60)
356
+ )
357
+
358
+ return fig
359
+
360
+
361
+ def create_stats_display(stats: Dict[str, float], rehab_score: int) -> None:
362
+ """
363
+ Display statistics using Streamlit components.
364
+
365
+ Args:
366
+ stats: Statistics dictionary
367
+ rehab_score: Rehabilitation score (0-100)
368
+ """
369
+ # Rehabilitation Score with large display
370
+ st.markdown("### Rehabilitation Score")
371
+
372
+ score_color = _get_score_color(rehab_score)
373
+ st.markdown(f"""
374
+ <div style="text-align: center; padding: 20px; background-color: {score_color}20;
375
+ border-radius: 10px; margin-bottom: 20px;">
376
+ <span style="font-size: 72px; font-weight: bold; color: {score_color};">
377
+ {rehab_score}
378
+ </span>
379
+ <span style="font-size: 24px; color: {score_color};">/100</span>
380
+ </div>
381
+ """, unsafe_allow_html=True)
382
+
383
+ # Progress bar
384
+ st.progress(rehab_score / 100)
385
+
386
+ # Key Metrics in columns
387
+ # Logic: arrow direction = numeric delta; color = good/bad for nature
388
+ st.markdown("### Key Metrics")
389
+ col1, col2, col3 = st.columns(3)
390
+
391
+ ndvi_change = stats.get('ndvi_change_mean', 0)
392
+ percent_change = stats.get('percent_change', 0)
393
+
394
+ with col1:
395
+ st.metric(
396
+ label="NDVI Before",
397
+ value=f"{stats['ndvi_before_mean']:.3f}",
398
+ help="Normalized Difference Vegetation Index: measures vegetation health (-1 to 1)"
399
+ )
400
+ st.metric(
401
+ label="Area Improved",
402
+ value=f"{stats['area_improved_ha']:.1f} ha",
403
+ delta=f"+{stats['percent_improved']:.1f}%",
404
+ delta_color="normal" # improvement is always good
405
+ )
406
+
407
+ with col2:
408
+ # NDVI: increase = good (green), decrease = bad (red)
409
+ st.metric(
410
+ label="NDVI After",
411
+ value=f"{stats['ndvi_after_mean']:.3f}",
412
+ delta=f"{ndvi_change:+.3f}" if ndvi_change != 0 else None,
413
+ delta_color="normal", # green for +, red for -
414
+ help="Current vegetation index value"
415
+ )
416
+ st.metric(
417
+ label="Area Degraded",
418
+ value=f"{stats['area_degraded_ha']:.1f} ha",
419
+ delta=f"-{stats['percent_degraded']:.1f}%",
420
+ delta_color="inverse" # degradation showing as negative is correct
421
+ )
422
+
423
+ with col3:
424
+ # Vegetation Change: increase = good (green), decrease = bad (red)
425
+ st.metric(
426
+ label="Vegetation Change",
427
+ value=f"{percent_change:+.1f}%",
428
+ delta=f"{percent_change:+.1f}%" if percent_change != 0 else None,
429
+ delta_color="normal", # green for +, red for -
430
+ help="Percentage change in vegetation cover"
431
+ )
432
+ st.metric(
433
+ label="Total Area",
434
+ value=f"{stats['total_area_ha']:.1f} ha"
435
+ )
436
+
437
+
438
+ def _get_score_color(score: int) -> str:
439
+ """Get color based on rehabilitation score."""
440
+ if score >= 80:
441
+ return "#1B5E20"
442
+ elif score >= 60:
443
+ return "#4CAF50"
444
+ elif score >= 40:
445
+ return "#FF9800"
446
+ elif score >= 20:
447
+ return "#F57C00"
448
+ else:
449
+ return "#B71C1C"
450
+
451
+
452
+ def create_area_breakdown_chart(stats: Dict[str, float]) -> go.Figure:
453
+ """
454
+ Create a pie chart showing area breakdown.
455
+
456
+ Args:
457
+ stats: Statistics dictionary with area values
458
+
459
+ Returns:
460
+ Plotly Figure object
461
+ """
462
+ labels = ['Improved', 'Stable', 'Degraded']
463
+ values = [
464
+ stats['area_improved_ha'],
465
+ stats['area_stable_ha'],
466
+ stats['area_degraded_ha']
467
+ ]
468
+ colors = ['#4CAF50', '#FFC107', '#F44336']
469
+
470
+ fig = go.Figure(data=[go.Pie(
471
+ labels=labels,
472
+ values=values,
473
+ marker_colors=colors,
474
+ hole=0.4,
475
+ textinfo='label+percent',
476
+ hovertemplate='%{label}<br>%{value:.1f} ha<br>%{percent}<extra></extra>'
477
+ )])
478
+
479
+ fig.update_layout(
480
+ title="Area Breakdown",
481
+ annotations=[dict(text='Area', x=0.5, y=0.5, font_size=16, showarrow=False)],
482
+ showlegend=True,
483
+ height=350
484
+ )
485
+
486
+ return fig
487
+
488
+
489
+ def create_ndvi_comparison_chart(stats: Dict[str, float]) -> go.Figure:
490
+ """
491
+ Create a bar chart comparing before/after NDVI.
492
+
493
+ Args:
494
+ stats: Statistics dictionary
495
+
496
+ Returns:
497
+ Plotly Figure object
498
+ """
499
+ fig = go.Figure()
500
+
501
+ fig.add_trace(go.Bar(
502
+ x=['Before', 'After'],
503
+ y=[stats['ndvi_before_mean'], stats['ndvi_after_mean']],
504
+ marker_color=['#8B4513', '#228B22'],
505
+ text=[f"{stats['ndvi_before_mean']:.3f}", f"{stats['ndvi_after_mean']:.3f}"],
506
+ textposition='outside'
507
+ ))
508
+
509
+ fig.update_layout(
510
+ title="NDVI Comparison",
511
+ yaxis_title="NDVI",
512
+ yaxis=dict(range=[0, max(stats['ndvi_after_mean'], stats['ndvi_before_mean']) * 1.3]),
513
+ template="plotly_white",
514
+ height=350
515
+ )
516
+
517
+ return fig
518
+
519
+
520
+ def create_statistics_table(stats: Dict[str, float]) -> None:
521
+ """
522
+ Display full statistics as a formatted table.
523
+
524
+ Args:
525
+ stats: Statistics dictionary
526
+ """
527
+ import pandas as pd
528
+
529
+ data = {
530
+ 'Metric': [
531
+ 'NDVI Before (mean)',
532
+ 'NDVI After (mean)',
533
+ 'NDVI Change (mean)',
534
+ 'NDVI Change (std dev)',
535
+ 'Relative Change',
536
+ 'Area Improved',
537
+ 'Area Stable',
538
+ 'Area Degraded',
539
+ 'Total Area',
540
+ '% Improved',
541
+ '% Stable',
542
+ '% Degraded'
543
+ ],
544
+ 'Value': [
545
+ f"{stats['ndvi_before_mean']:.4f}",
546
+ f"{stats['ndvi_after_mean']:.4f}",
547
+ f"{stats['ndvi_change_mean']:.4f}",
548
+ f"{stats['ndvi_change_std']:.4f}",
549
+ f"{stats['percent_change']:.2f}%",
550
+ f"{stats['area_improved_ha']:.2f} ha",
551
+ f"{stats['area_stable_ha']:.2f} ha",
552
+ f"{stats['area_degraded_ha']:.2f} ha",
553
+ f"{stats['total_area_ha']:.2f} ha",
554
+ f"{stats['percent_improved']:.2f}%",
555
+ f"{stats['percent_stable']:.2f}%",
556
+ f"{stats['percent_degraded']:.2f}%"
557
+ ],
558
+ 'Description': [
559
+ 'Mean vegetation index at analysis start',
560
+ 'Mean vegetation index at analysis end',
561
+ 'Average change in vegetation index',
562
+ 'Variation in vegetation change',
563
+ 'Percentage change in mean NDVI',
564
+ 'Area with NDVI increase > 0.05',
565
+ 'Area with NDVI change between -0.05 and 0.05',
566
+ 'Area with NDVI decrease > 0.05',
567
+ 'Total analyzed area',
568
+ 'Percentage of area showing improvement',
569
+ 'Percentage of area remaining stable',
570
+ 'Percentage of area showing degradation'
571
+ ]
572
+ }
573
+
574
+ df = pd.DataFrame(data)
575
+ st.dataframe(df, use_container_width=True, hide_index=True)
576
+
577
+
578
+ # =============================================================================
579
+ # NEW EXTENDED VISUALIZATIONS
580
+ # =============================================================================
581
+
582
+ # Color palettes for different indices
583
+ BSI_COLORS = ['#228B22', '#90EE90', '#FFFF00', '#D2B48C', '#8B4513'] # Green to brown
584
+ WATER_COLORS = ['#8B4513', '#D2B48C', '#87CEEB', '#4169E1', '#000080'] # Brown to blue
585
+ MOISTURE_COLORS = ['#B71C1C', '#FF5722', '#FFEB3B', '#8BC34A', '#1B5E20'] # Dry to wet
586
+ SLOPE_COLORS = ['#1B5E20', '#4CAF50', '#FFEB3B', '#FF9800', '#B71C1C'] # Flat to steep
587
+ EROSION_COLORS = ['#1B5E20', '#4CAF50', '#FFEB3B', '#FF5722', '#B71C1C'] # Low to high risk
588
+
589
+ # Land cover color mapping
590
+ LULC_COLORS = {
591
+ 1: '#0000FF', # Water - Blue
592
+ 2: '#228B22', # Trees - Forest Green
593
+ 4: '#006400', # Flooded Vegetation - Dark Green
594
+ 5: '#FFD700', # Crops - Gold
595
+ 7: '#808080', # Built Area - Gray
596
+ 8: '#D2691E', # Bare Ground - Chocolate
597
+ 9: '#FFFFFF', # Snow/Ice - White
598
+ 10: '#C0C0C0', # Clouds - Silver
599
+ 11: '#9ACD32' # Rangeland - Yellow Green
600
+ }
601
+
602
+
603
+ def create_multi_index_map(
604
+ bbox: Tuple[float, float, float, float],
605
+ indices_after: Dict[str, xr.DataArray],
606
+ index_changes: Dict[str, xr.DataArray],
607
+ center_coords: Tuple[float, float],
608
+ zoom: int = 12
609
+ ) -> folium.Map:
610
+ """
611
+ Create an interactive map with multiple index layers.
612
+ """
613
+ m = folium.Map(location=center_coords, zoom_start=zoom, tiles=None)
614
+
615
+ # Add basemaps
616
+ folium.TileLayer(
617
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
618
+ attr='Esri', name='Satellite', overlay=False
619
+ ).add_to(m)
620
+
621
+ folium.TileLayer(tiles='openstreetmap', name='OpenStreetMap', overlay=False).add_to(m)
622
+
623
+ min_lon, min_lat, max_lon, max_lat = bbox
624
+ bounds = [[min_lat, min_lon], [max_lat, max_lon]]
625
+
626
+ # Index configurations: (data, colors, vmin, vmax, name)
627
+ index_configs = [
628
+ ('ndvi', NDVI_COLORS, -0.1, 0.8, 'NDVI'),
629
+ ('savi', NDVI_COLORS, -0.1, 0.8, 'SAVI'),
630
+ ('evi', NDVI_COLORS, -0.1, 0.8, 'EVI'),
631
+ ('bsi', BSI_COLORS, -0.5, 0.5, 'Bare Soil Index'),
632
+ ('ndwi', WATER_COLORS, -0.5, 0.5, 'Water Index (NDWI)'),
633
+ ('ndmi', MOISTURE_COLORS, -0.5, 0.5, 'Moisture Index (NDMI)'),
634
+ ]
635
+
636
+ # Add current state layers
637
+ for idx_key, colors, vmin, vmax, name in index_configs:
638
+ if idx_key in indices_after:
639
+ try:
640
+ img = create_image_overlay(indices_after[idx_key], colors, vmin, vmax, bounds)
641
+ ImageOverlay(
642
+ image=img, bounds=bounds, opacity=0.7,
643
+ name=f'{name} (Current)', show=(idx_key == 'ndvi')
644
+ ).add_to(m)
645
+ except Exception:
646
+ pass
647
+
648
+ # Add change layers
649
+ for idx_key, _, _, _, name in index_configs:
650
+ if idx_key in index_changes:
651
+ try:
652
+ img = create_image_overlay(index_changes[idx_key], CHANGE_COLORS, -0.3, 0.3, bounds)
653
+ ImageOverlay(
654
+ image=img, bounds=bounds, opacity=0.7,
655
+ name=f'{name} Change', show=False
656
+ ).add_to(m)
657
+ except Exception:
658
+ pass
659
+
660
+ # Add boundary
661
+ boundary_coords = [
662
+ [min_lat, min_lon], [min_lat, max_lon],
663
+ [max_lat, max_lon], [max_lat, min_lon], [min_lat, min_lon]
664
+ ]
665
+ folium.PolyLine(locations=boundary_coords, color='#000000', weight=3).add_to(m)
666
+
667
+ folium.LayerControl(position='topright').add_to(m)
668
+ _add_legends(m)
669
+
670
+ return m
671
+
672
+
673
+ def create_terrain_map(
674
+ bbox: Tuple[float, float, float, float],
675
+ slope: xr.DataArray,
676
+ aspect: Optional[xr.DataArray],
677
+ erosion_risk: Optional[xr.DataArray],
678
+ center_coords: Tuple[float, float],
679
+ zoom: int = 12
680
+ ) -> folium.Map:
681
+ """
682
+ Create an interactive terrain analysis map.
683
+ """
684
+ m = folium.Map(location=center_coords, zoom_start=zoom, tiles=None)
685
+
686
+ folium.TileLayer(
687
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
688
+ attr='Esri', name='Satellite', overlay=False
689
+ ).add_to(m)
690
+
691
+ min_lon, min_lat, max_lon, max_lat = bbox
692
+ bounds = [[min_lat, min_lon], [max_lat, max_lon]]
693
+
694
+ # Add slope layer
695
+ try:
696
+ slope_img = create_image_overlay(slope, SLOPE_COLORS, 0, 45, bounds)
697
+ ImageOverlay(
698
+ image=slope_img, bounds=bounds, opacity=0.7,
699
+ name='Slope (degrees)', show=True
700
+ ).add_to(m)
701
+ except Exception:
702
+ pass
703
+
704
+ # Add erosion risk layer
705
+ if erosion_risk is not None:
706
+ try:
707
+ erosion_img = create_image_overlay(erosion_risk, EROSION_COLORS, 0, 1, bounds)
708
+ ImageOverlay(
709
+ image=erosion_img, bounds=bounds, opacity=0.7,
710
+ name='Erosion Risk', show=False
711
+ ).add_to(m)
712
+ except Exception:
713
+ pass
714
+
715
+ folium.LayerControl(position='topright').add_to(m)
716
+
717
+ return m
718
+
719
+
720
+ def create_land_cover_map(
721
+ bbox: Tuple[float, float, float, float],
722
+ lulc: xr.DataArray,
723
+ center_coords: Tuple[float, float],
724
+ zoom: int = 12,
725
+ year: int = 2023
726
+ ) -> folium.Map:
727
+ """
728
+ Create a land cover classification map.
729
+ """
730
+ from matplotlib.colors import ListedColormap
731
+
732
+ m = folium.Map(location=center_coords, zoom_start=zoom, tiles=None)
733
+
734
+ folium.TileLayer(
735
+ tiles='https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
736
+ attr='Esri', name='Satellite', overlay=False
737
+ ).add_to(m)
738
+
739
+ min_lon, min_lat, max_lon, max_lat = bbox
740
+ bounds = [[min_lat, min_lon], [max_lat, max_lon]]
741
+
742
+ # Create categorical colormap
743
+ try:
744
+ arr = lulc.values.squeeze()
745
+ rgba = np.zeros((*arr.shape, 4), dtype=np.uint8)
746
+
747
+ for class_id, color in LULC_COLORS.items():
748
+ mask = arr == class_id
749
+ r = int(color[1:3], 16)
750
+ g = int(color[3:5], 16)
751
+ b = int(color[5:7], 16)
752
+ rgba[mask] = [r, g, b, 200]
753
+
754
+ rgba = np.flipud(rgba)
755
+ img = Image.fromarray(rgba, mode='RGBA')
756
+ buffer = BytesIO()
757
+ img.save(buffer, format='PNG')
758
+ buffer.seek(0)
759
+ img_base64 = base64.b64encode(buffer.getvalue()).decode()
760
+ img_url = f"data:image/png;base64,{img_base64}"
761
+
762
+ ImageOverlay(
763
+ image=img_url, bounds=bounds, opacity=0.7,
764
+ name=f'Land Cover {year}', show=True
765
+ ).add_to(m)
766
+ except Exception:
767
+ pass
768
+
769
+ folium.LayerControl(position='topright').add_to(m)
770
+
771
+ return m
772
+
773
+
774
+ def create_multi_index_chart(stats: Dict[str, float]) -> go.Figure:
775
+ """
776
+ Create a grouped bar chart comparing all indices before/after.
777
+ """
778
+ indices = ['NDVI', 'SAVI', 'EVI', 'NDWI', 'NDMI', 'BSI']
779
+ before_values = []
780
+ after_values = []
781
+
782
+ for idx in ['ndvi', 'savi', 'evi', 'ndwi', 'ndmi', 'bsi']:
783
+ before_values.append(stats.get(f'{idx}_before_mean', 0))
784
+ after_values.append(stats.get(f'{idx}_after_mean', 0))
785
+
786
+ fig = go.Figure()
787
+
788
+ fig.add_trace(go.Bar(
789
+ name='Before', x=indices, y=before_values,
790
+ marker_color='#8B4513', text=[f'{v:.3f}' for v in before_values],
791
+ textposition='outside'
792
+ ))
793
+
794
+ fig.add_trace(go.Bar(
795
+ name='After', x=indices, y=after_values,
796
+ marker_color='#228B22', text=[f'{v:.3f}' for v in after_values],
797
+ textposition='outside'
798
+ ))
799
+
800
+ fig.update_layout(
801
+ title='Multi-Index Comparison',
802
+ barmode='group',
803
+ yaxis_title='Index Value',
804
+ template='plotly_white',
805
+ height=400,
806
+ legend=dict(yanchor="top", y=0.99, xanchor="right", x=0.99)
807
+ )
808
+
809
+ return fig
810
+
811
+
812
+ def create_terrain_stats_chart(terrain_stats: Dict[str, float]) -> go.Figure:
813
+ """
814
+ Create a chart showing terrain slope distribution.
815
+ """
816
+ labels = ['Flat (<5°)', 'Gentle (5-15°)', 'Moderate (15-30°)', 'Steep (>30°)']
817
+ values = [
818
+ terrain_stats.get('percent_flat', 0),
819
+ terrain_stats.get('percent_gentle', 0),
820
+ terrain_stats.get('percent_moderate', 0),
821
+ terrain_stats.get('percent_steep', 0)
822
+ ]
823
+ colors = ['#1B5E20', '#4CAF50', '#FF9800', '#B71C1C']
824
+
825
+ fig = go.Figure(data=[go.Pie(
826
+ labels=labels, values=values, marker_colors=colors,
827
+ hole=0.4, textinfo='label+percent'
828
+ )])
829
+
830
+ fig.update_layout(
831
+ title='Slope Distribution',
832
+ height=350
833
+ )
834
+
835
+ return fig
836
+
837
+
838
+ def create_land_cover_chart(land_cover_stats: Dict[str, Any]) -> go.Figure:
839
+ """
840
+ Create a grouped bar chart showing land cover change.
841
+ """
842
+ if 'class_changes' not in land_cover_stats:
843
+ return go.Figure()
844
+
845
+ changes = land_cover_stats['class_changes']
846
+ classes = list(changes.keys())
847
+ before = [changes[c].get('before', 0) for c in classes]
848
+ after = [changes[c].get('after', 0) for c in classes]
849
+
850
+ # Convert to percentages
851
+ total_before = sum(before) or 1
852
+ total_after = sum(after) or 1
853
+ before_pct = [b / total_before * 100 for b in before]
854
+ after_pct = [a / total_after * 100 for a in after]
855
+
856
+ fig = go.Figure()
857
+
858
+ fig.add_trace(go.Bar(
859
+ name=f"Year {land_cover_stats.get('year_before', 'Before')}",
860
+ x=classes, y=before_pct, marker_color='#8B4513'
861
+ ))
862
+
863
+ fig.add_trace(go.Bar(
864
+ name=f"Year {land_cover_stats.get('year_after', 'After')}",
865
+ x=classes, y=after_pct, marker_color='#228B22'
866
+ ))
867
+
868
+ fig.update_layout(
869
+ title='Land Cover Change',
870
+ barmode='group',
871
+ yaxis_title='Percentage (%)',
872
+ template='plotly_white',
873
+ height=400
874
+ )
875
+
876
+ return fig
877
+
878
+
879
+ def create_vegetation_health_chart(stats: Dict[str, float]) -> go.Figure:
880
+ """
881
+ Create a chart showing vegetation health distribution.
882
+ """
883
+ labels = ['Sparse (0-0.2)', 'Low (0.2-0.4)', 'Moderate (0.4-0.6)', 'Dense (>0.6)']
884
+ values = [
885
+ stats.get('percent_sparse_veg', 0),
886
+ stats.get('percent_low_veg', 0),
887
+ stats.get('percent_moderate_veg', 0),
888
+ stats.get('percent_dense_veg', 0)
889
+ ]
890
+ colors = ['#D2B48C', '#90EE90', '#228B22', '#006400']
891
+
892
+ fig = go.Figure(data=[go.Pie(
893
+ labels=labels, values=values, marker_colors=colors,
894
+ hole=0.4, textinfo='label+percent'
895
+ )])
896
+
897
+ fig.update_layout(
898
+ title='Vegetation Health Distribution',
899
+ height=350
900
+ )
901
+
902
+ return fig
903
+
904
+
905
+ def create_environmental_indicators_chart(stats: Dict[str, float]) -> go.Figure:
906
+ """
907
+ Create a radar chart showing environmental indicators.
908
+ """
909
+ categories = ['Vegetation', 'Moisture', 'Soil Stability', 'Water Presence', 'Dense Veg']
910
+
911
+ # Normalize values to 0-100 scale
912
+ values = [
913
+ min(100, stats.get('ndvi_after_mean', 0) * 100 / 0.6), # NDVI
914
+ max(0, 100 - stats.get('percent_moisture_stressed', 50)), # Moisture health
915
+ max(0, 100 - stats.get('percent_bare_soil', 50)), # Soil stability
916
+ min(100, stats.get('percent_water', 0) * 10), # Water presence
917
+ stats.get('percent_dense_veg', 0) # Dense vegetation
918
+ ]
919
+
920
+ fig = go.Figure()
921
+
922
+ fig.add_trace(go.Scatterpolar(
923
+ r=values + [values[0]], # Close the polygon
924
+ theta=categories + [categories[0]],
925
+ fill='toself',
926
+ fillcolor='rgba(46, 125, 50, 0.3)',
927
+ line=dict(color='#2E7D32', width=2),
928
+ name='Current State'
929
+ ))
930
+
931
+ fig.update_layout(
932
+ polar=dict(
933
+ radialaxis=dict(visible=True, range=[0, 100])
934
+ ),
935
+ title='Environmental Health Indicators',
936
+ height=400,
937
+ showlegend=False
938
+ )
939
+
940
+ return fig
941
+
942
+
943
+ def create_comprehensive_stats_display(
944
+ stats: Dict[str, float],
945
+ rehab_score: int,
946
+ terrain_stats: Optional[Dict] = None,
947
+ land_cover_stats: Optional[Dict] = None
948
+ ) -> None:
949
+ """
950
+ Display comprehensive statistics with all new metrics.
951
+ """
952
+ # Rehabilitation Score
953
+ st.markdown("### Rehabilitation Score")
954
+ score_color = _get_score_color(rehab_score)
955
+ st.markdown(f"""
956
+ <div style="text-align: center; padding: 20px; background-color: {score_color}20;
957
+ border-radius: 10px; margin-bottom: 20px;">
958
+ <span style="font-size: 72px; font-weight: bold; color: {score_color};">
959
+ {rehab_score}
960
+ </span>
961
+ <span style="font-size: 24px; color: {score_color};">/100</span>
962
+ </div>
963
+ """, unsafe_allow_html=True)
964
+ st.progress(rehab_score / 100)
965
+
966
+ # Primary Metrics with tooltips
967
+ # Logic:
968
+ # - Arrow direction: based on numeric delta (positive=up, negative=down)
969
+ # - Color: "normal" = green for increase (good), red for decrease (bad)
970
+ # "inverse" = red for increase (bad), green for decrease (good)
971
+ st.markdown("### Key Metrics")
972
+ col1, col2, col3, col4 = st.columns(4)
973
+
974
+ # Get change values for proper arrow direction
975
+ ndvi_change = stats.get('ndvi_change_mean', 0)
976
+ percent_change = stats.get('percent_change', 0)
977
+
978
+ with col1:
979
+ # NDVI: increase = good (green), decrease = bad (red)
980
+ st.metric(
981
+ "NDVI",
982
+ f"{stats.get('ndvi_after_mean', 0):.3f}",
983
+ delta=f"{ndvi_change:+.3f}" if ndvi_change != 0 else None,
984
+ delta_color="normal", # green for +, red for -
985
+ help="Normalized Difference Vegetation Index: measures vegetation health. Values range from -1 to 1, with >0.4 indicating healthy vegetation."
986
+ )
987
+
988
+ with col2:
989
+ # Vegetation Change: increase = good (green), decrease = bad (red)
990
+ # Use numeric delta for correct arrow direction
991
+ st.metric(
992
+ "Vegetation Change",
993
+ f"{percent_change:+.1f}%",
994
+ delta=f"{percent_change:+.1f}%" if percent_change != 0 else None,
995
+ delta_color="normal", # green for +, red for -
996
+ help="Percentage change in vegetation cover between analysis dates."
997
+ )
998
+
999
+ with col3:
1000
+ bsi_change = stats.get('bsi_change', 0)
1001
+ # Bare Soil: increase = bad (red), decrease = good (green)
1002
+ st.metric(
1003
+ "Bare Soil",
1004
+ f"{stats.get('percent_bare_soil', 0):.1f}%",
1005
+ delta=f"{bsi_change:+.3f}" if bsi_change != 0 else None,
1006
+ delta_color="inverse", # red for +, green for -
1007
+ help="Percentage of area with exposed bare soil. Lower values indicate better vegetation cover."
1008
+ )
1009
+
1010
+ with col4:
1011
+ st.metric(
1012
+ "Water Presence",
1013
+ f"{stats.get('percent_water', 0):.1f}%",
1014
+ help="Percentage of area with water bodies or saturated soil."
1015
+ )
1016
+
1017
+ # Secondary Metrics with tooltips
1018
+ st.markdown("### Additional Indices")
1019
+ col1, col2, col3 = st.columns(3)
1020
+
1021
+ with col1:
1022
+ savi_change = stats.get('savi_change', 0)
1023
+ # SAVI: increase = good (green), decrease = bad (red)
1024
+ st.metric(
1025
+ "SAVI",
1026
+ f"{stats.get('savi_after_mean', 0):.3f}",
1027
+ delta=f"{savi_change:+.3f}" if savi_change != 0 else None,
1028
+ delta_color="normal", # green for +, red for -
1029
+ help="Soil Adjusted Vegetation Index: better for sparse vegetation as it accounts for soil brightness."
1030
+ )
1031
+ evi_change = stats.get('evi_change', 0)
1032
+ # EVI: increase = good (green), decrease = bad (red)
1033
+ st.metric(
1034
+ "EVI",
1035
+ f"{stats.get('evi_after_mean', 0):.3f}",
1036
+ delta=f"{evi_change:+.3f}" if evi_change != 0 else None,
1037
+ delta_color="normal", # green for +, red for -
1038
+ help="Enhanced Vegetation Index: more sensitive in high-biomass areas and corrects for atmospheric effects."
1039
+ )
1040
+
1041
+ with col2:
1042
+ ndmi_change = stats.get('ndmi_change', 0)
1043
+ # NDMI: increase = good (green), decrease = bad (red)
1044
+ st.metric(
1045
+ "NDMI",
1046
+ f"{stats.get('ndmi_after_mean', 0):.3f}",
1047
+ delta=f"{ndmi_change:+.3f}" if ndmi_change != 0 else None,
1048
+ delta_color="normal", # green for +, red for -
1049
+ help="Normalized Difference Moisture Index: measures vegetation water content. Higher values = more moisture."
1050
+ )
1051
+ bsi_val_change = stats.get('bsi_change', 0)
1052
+ # BSI: increase = bad (red), decrease = good (green)
1053
+ st.metric(
1054
+ "BSI",
1055
+ f"{stats.get('bsi_after_mean', 0):.3f}",
1056
+ delta=f"{bsi_val_change:+.3f}" if bsi_val_change != 0 else None,
1057
+ delta_color="inverse", # red for +, green for -
1058
+ help="Bare Soil Index: identifies bare soil areas. Higher values indicate more exposed soil (negative for rehab)."
1059
+ )
1060
+
1061
+ with col3:
1062
+ st.metric(
1063
+ "Moisture Stressed",
1064
+ f"{stats.get('percent_moisture_stressed', 0):.1f}%",
1065
+ help="Percentage of vegetation showing signs of water stress."
1066
+ )
1067
+ st.metric(
1068
+ "Dense Vegetation",
1069
+ f"{stats.get('percent_dense_veg', 0):.1f}%",
1070
+ help="Percentage of area with dense, healthy vegetation (NDVI > 0.6)."
1071
+ )
1072
+
1073
+ # Terrain stats if available
1074
+ if terrain_stats and terrain_stats.get('slope_mean'):
1075
+ st.markdown("### Terrain Analysis")
1076
+ col1, col2, col3 = st.columns(3)
1077
+
1078
+ with col1:
1079
+ st.metric("Mean Slope", f"{terrain_stats.get('slope_mean', 0):.1f}°")
1080
+
1081
+ with col2:
1082
+ st.metric("Steep Areas", f"{terrain_stats.get('percent_steep', 0):.1f}%")
1083
+
1084
+ with col3:
1085
+ if 'percent_high_erosion_risk' in terrain_stats:
1086
+ st.metric("High Erosion Risk", f"{terrain_stats.get('percent_high_erosion_risk', 0):.1f}%",
1087
+ delta_color="inverse")
1088
+
1089
+ # Land cover stats if available
1090
+ if land_cover_stats and land_cover_stats.get('vegetation_cover_after'):
1091
+ st.markdown("### Land Cover")
1092
+ col1, col2 = st.columns(2)
1093
+
1094
+ with col1:
1095
+ st.metric("Vegetation Cover",
1096
+ f"{land_cover_stats.get('vegetation_cover_after', 0):.1f}%",
1097
+ delta=f"{land_cover_stats.get('vegetation_cover_change', 0):.1f}%")
1098
+
1099
+ with col2:
1100
+ st.metric("Bare Ground",
1101
+ f"{land_cover_stats.get('bare_ground_after', 0):.1f}%",
1102
+ delta=f"{land_cover_stats.get('bare_ground_change', 0):.1f}%",
1103
+ delta_color="inverse")