refactor: strip to 4 real EO indicators, remove all tests and mock fallbacks
Browse filesKeep only NDVI, Water (MNDWI), SAR, and Built-up — the four indicators
with full openEO batch pipelines. Remove 8 unused indicators, all test
files, and every placeholder/fallback path. Failures now propagate as
errors instead of silently returning synthetic data.
- Delete: cropland, fires, food_security, lst, nightlights, no2, rainfall, vegetation
- Delete: entire tests/ directory
- Remove _fallback() from all indicators — process() raises on failure
- Remove harvest() fallback-to-placeholder on current data failure
- Worker no longer falls back to process() when batch submit fails
- Remove placeholder caveat from narrative engine and PDF report
- Re-weight overview scores for 4 indicators (ndvi 0.30, sar 0.25, water 0.25, buildup 0.20)
- Remove build_lst_graph from openeo_client
Generated with [Claude Code](https://claude.ai/code)
via [Happy](https://happy.engineering)
Co-Authored-By: Claude <noreply@anthropic.com>
Co-Authored-By: Happy <yesreply@happy.engineering>
- app/config.py +4 -9
- app/indicators/__init__.py +1 -17
- app/indicators/buildup.py +2 -30
- app/indicators/cropland.py +0 -343
- app/indicators/fires.py +0 -205
- app/indicators/food_security.py +0 -152
- app/indicators/lst.py +0 -268
- app/indicators/ndvi.py +2 -32
- app/indicators/nightlights.py +0 -229
- app/indicators/no2.py +0 -256
- app/indicators/rainfall.py +0 -304
- app/indicators/sar.py +8 -28
- app/indicators/vegetation.py +0 -324
- app/indicators/water.py +2 -30
- app/main.py +1 -1
- app/models.py +1 -1
- app/openeo_client.py +0 -27
- app/outputs/narrative.py +0 -28
- app/outputs/overview.py +2 -7
- app/outputs/report.py +0 -11
- app/outputs/thresholds.py +3 -44
- app/worker.py +60 -76
- tests/__init__.py +0 -0
- tests/conftest.py +0 -95
- tests/fixtures/create_fixtures.py +0 -70
- tests/fixtures/ndvi_monthly.tif +0 -0
- tests/fixtures/true_color.tif +0 -0
- tests/test_api_auth.py +0 -103
- tests/test_api_indicators.py +0 -38
- tests/test_api_jobs.py +0 -99
- tests/test_charts.py +0 -102
- tests/test_config.py +0 -34
- tests/test_database.py +0 -107
- tests/test_indicator_base.py +0 -173
- tests/test_indicator_buildup.py +0 -226
- tests/test_indicator_cropland.py +0 -227
- tests/test_indicator_fires.py +0 -91
- tests/test_indicator_lst.py +0 -117
- tests/test_indicator_ndvi.py +0 -285
- tests/test_indicator_nightlights.py +0 -91
- tests/test_indicator_no2.py +0 -55
- tests/test_indicator_rainfall.py +0 -97
- tests/test_indicator_sar.py +0 -235
- tests/test_indicator_vegetation.py +0 -90
- tests/test_indicator_water.py +0 -215
- tests/test_maps.py +0 -74
- tests/test_models.py +0 -151
- tests/test_narrative.py +0 -83
- tests/test_ndvi_e2e.py +0 -217
- tests/test_openeo_client.py +0 -190
|
@@ -23,13 +23,8 @@ OPENEO_CLIENT_SECRET: str | None = os.environ.get("OPENEO_CLIENT_SECRET")
|
|
| 23 |
# Normalized to 1.0. Indicators not selected or skipped are excluded
|
| 24 |
# and weights are re-normalized.
|
| 25 |
OVERVIEW_WEIGHTS: dict[str, float] = {
|
| 26 |
-
"
|
| 27 |
-
"sar": 0.
|
| 28 |
-
"
|
| 29 |
-
"
|
| 30 |
-
"water": 0.10,
|
| 31 |
-
"rainfall": 0.10,
|
| 32 |
-
"lst": 0.08,
|
| 33 |
-
"no2": 0.05,
|
| 34 |
-
"nightlights": 0.05,
|
| 35 |
}
|
|
|
|
| 23 |
# Normalized to 1.0. Indicators not selected or skipped are excluded
|
| 24 |
# and weights are re-normalized.
|
| 25 |
OVERVIEW_WEIGHTS: dict[str, float] = {
|
| 26 |
+
"ndvi": 0.30,
|
| 27 |
+
"sar": 0.25,
|
| 28 |
+
"water": 0.25,
|
| 29 |
+
"buildup": 0.20,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
}
|
|
@@ -1,27 +1,11 @@
|
|
| 1 |
from app.indicators.base import IndicatorRegistry
|
| 2 |
-
from app.indicators.fires import FiresIndicator
|
| 3 |
-
from app.indicators.cropland import CroplandIndicator
|
| 4 |
-
from app.indicators.vegetation import VegetationIndicator
|
| 5 |
-
from app.indicators.rainfall import RainfallIndicator
|
| 6 |
-
from app.indicators.water import WaterIndicator
|
| 7 |
-
from app.indicators.no2 import NO2Indicator
|
| 8 |
-
from app.indicators.lst import LSTIndicator
|
| 9 |
-
from app.indicators.nightlights import NightlightsIndicator
|
| 10 |
-
from app.indicators.food_security import FoodSecurityIndicator
|
| 11 |
from app.indicators.ndvi import NdviIndicator
|
|
|
|
| 12 |
from app.indicators.sar import SarIndicator
|
| 13 |
from app.indicators.buildup import BuiltupIndicator
|
| 14 |
|
| 15 |
registry = IndicatorRegistry()
|
| 16 |
registry.register(NdviIndicator())
|
| 17 |
-
registry.register(FiresIndicator())
|
| 18 |
-
registry.register(CroplandIndicator())
|
| 19 |
-
registry.register(VegetationIndicator())
|
| 20 |
-
registry.register(RainfallIndicator())
|
| 21 |
registry.register(WaterIndicator())
|
| 22 |
-
registry.register(NO2Indicator())
|
| 23 |
-
registry.register(LSTIndicator())
|
| 24 |
-
registry.register(NightlightsIndicator())
|
| 25 |
-
registry.register(FoodSecurityIndicator())
|
| 26 |
registry.register(SarIndicator())
|
| 27 |
registry.register(BuiltupIndicator())
|
|
|
|
| 1 |
from app.indicators.base import IndicatorRegistry
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from app.indicators.ndvi import NdviIndicator
|
| 3 |
+
from app.indicators.water import WaterIndicator
|
| 4 |
from app.indicators.sar import SarIndicator
|
| 5 |
from app.indicators.buildup import BuiltupIndicator
|
| 6 |
|
| 7 |
registry = IndicatorRegistry()
|
| 8 |
registry.register(NdviIndicator())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
registry.register(WaterIndicator())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
registry.register(SarIndicator())
|
| 11 |
registry.register(BuiltupIndicator())
|
|
@@ -97,8 +97,7 @@ class BuiltupIndicator(BaseIndicator):
|
|
| 97 |
paths = current_job.download_results(current_dir)
|
| 98 |
current_path = self._find_tif(paths, current_dir)
|
| 99 |
except Exception as exc:
|
| 100 |
-
|
| 101 |
-
return self._fallback(aoi, time_range)
|
| 102 |
|
| 103 |
# Download baseline — optional (degrades gracefully)
|
| 104 |
baseline_path = None
|
|
@@ -260,11 +259,7 @@ class BuiltupIndicator(BaseIndicator):
|
|
| 260 |
async def process(
|
| 261 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 262 |
) -> IndicatorResult:
|
| 263 |
-
|
| 264 |
-
return await self._process_openeo(aoi, time_range, season_months)
|
| 265 |
-
except Exception as exc:
|
| 266 |
-
logger.warning("Built-up openEO processing failed, using placeholder: %s", exc)
|
| 267 |
-
return self._fallback(aoi, time_range)
|
| 268 |
|
| 269 |
async def _process_openeo(
|
| 270 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
@@ -503,27 +498,4 @@ class BuiltupIndicator(BaseIndicator):
|
|
| 503 |
with rasterio.open(output_path, "w", **profile) as dst:
|
| 504 |
dst.write(change, 1)
|
| 505 |
|
| 506 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 507 |
-
rng = np.random.default_rng(11)
|
| 508 |
-
baseline = float(rng.uniform(5, 20))
|
| 509 |
-
current = baseline * float(rng.uniform(0.85, 1.15))
|
| 510 |
-
change = current - baseline
|
| 511 |
-
|
| 512 |
-
return IndicatorResult(
|
| 513 |
-
indicator_id=self.id,
|
| 514 |
-
headline=f"Settlement data degraded ({current:.1f}% extent)",
|
| 515 |
-
status=StatusLevel.GREEN if abs(change) < 5 else StatusLevel.AMBER,
|
| 516 |
-
trend=TrendDirection.STABLE,
|
| 517 |
-
confidence=ConfidenceLevel.LOW,
|
| 518 |
-
map_layer_path="",
|
| 519 |
-
chart_data={
|
| 520 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 521 |
-
"values": [round(baseline, 1), round(current, 1)],
|
| 522 |
-
"label": "Built-up area (ha)",
|
| 523 |
-
},
|
| 524 |
-
data_source="placeholder",
|
| 525 |
-
summary="openEO processing unavailable. Showing placeholder values.",
|
| 526 |
-
methodology="Placeholder — no satellite data processed.",
|
| 527 |
-
limitations=["Data is synthetic. openEO backend was unreachable."],
|
| 528 |
-
)
|
| 529 |
|
|
|
|
| 97 |
paths = current_job.download_results(current_dir)
|
| 98 |
current_path = self._find_tif(paths, current_dir)
|
| 99 |
except Exception as exc:
|
| 100 |
+
raise RuntimeError(f"Built-up current period data unavailable: {exc}") from exc
|
|
|
|
| 101 |
|
| 102 |
# Download baseline — optional (degrades gracefully)
|
| 103 |
baseline_path = None
|
|
|
|
| 259 |
async def process(
|
| 260 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 261 |
) -> IndicatorResult:
|
| 262 |
+
return await self._process_openeo(aoi, time_range, season_months)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
async def _process_openeo(
|
| 265 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
|
|
| 498 |
with rasterio.open(output_path, "w", **profile) as dst:
|
| 499 |
dst.write(change, 1)
|
| 500 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 501 |
|
|
@@ -1,343 +0,0 @@
|
|
| 1 |
-
from __future__ import annotations
|
| 2 |
-
|
| 3 |
-
from collections import defaultdict
|
| 4 |
-
from datetime import date
|
| 5 |
-
from typing import Any
|
| 6 |
-
|
| 7 |
-
import numpy as np
|
| 8 |
-
|
| 9 |
-
from app.indicators.base import BaseIndicator
|
| 10 |
-
from app.models import (
|
| 11 |
-
AOI,
|
| 12 |
-
TimeRange,
|
| 13 |
-
IndicatorResult,
|
| 14 |
-
StatusLevel,
|
| 15 |
-
TrendDirection,
|
| 16 |
-
ConfidenceLevel,
|
| 17 |
-
)
|
| 18 |
-
|
| 19 |
-
BASELINE_YEARS = 3
|
| 20 |
-
MAX_ITEMS = 100
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
class CroplandIndicator(BaseIndicator):
|
| 24 |
-
id = "cropland"
|
| 25 |
-
name = "Cropland Productivity"
|
| 26 |
-
category = "D1"
|
| 27 |
-
question = "Is farmland being cultivated or abandoned?"
|
| 28 |
-
estimated_minutes = 5
|
| 29 |
-
|
| 30 |
-
async def process(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> IndicatorResult:
|
| 31 |
-
baseline_mean, current_mean, n_months = await self._fetch_comparison(aoi, time_range, season_months)
|
| 32 |
-
|
| 33 |
-
# Use percentage-point change rather than ratio — more meaningful
|
| 34 |
-
# for scene-level vegetation cover which can swing widely
|
| 35 |
-
change_pp = current_mean - baseline_mean # positive = greening
|
| 36 |
-
abs_change = abs(change_pp)
|
| 37 |
-
|
| 38 |
-
status = self._classify(change_pp)
|
| 39 |
-
trend = self._compute_trend(change_pp)
|
| 40 |
-
confidence = (
|
| 41 |
-
ConfidenceLevel.HIGH if n_months >= 6
|
| 42 |
-
else ConfidenceLevel.MODERATE if n_months >= 3
|
| 43 |
-
else ConfidenceLevel.LOW
|
| 44 |
-
)
|
| 45 |
-
|
| 46 |
-
current_monthly = getattr(self, '_current_monthly_medians', None)
|
| 47 |
-
baseline_per_year_monthly = getattr(self, '_baseline_per_year_monthly', None)
|
| 48 |
-
if current_monthly and baseline_per_year_monthly:
|
| 49 |
-
chart_data = self._build_monthly_chart_data(
|
| 50 |
-
current_monthly=current_monthly,
|
| 51 |
-
baseline_per_year_monthly=baseline_per_year_monthly,
|
| 52 |
-
time_range=time_range,
|
| 53 |
-
season_months=season_months,
|
| 54 |
-
)
|
| 55 |
-
else:
|
| 56 |
-
chart_data = self._build_chart_data(baseline_mean, current_mean, time_range, getattr(self, '_baseline_yearly_means', None))
|
| 57 |
-
|
| 58 |
-
if abs_change <= 5:
|
| 59 |
-
headline = f"Cropland vegetation stable ({current_mean:.0f}% cover, ±{abs_change:.0f}pp vs baseline)"
|
| 60 |
-
elif change_pp > 0:
|
| 61 |
-
headline = f"Cropland vegetation increased (+{change_pp:.0f}pp vs baseline — possible greening)"
|
| 62 |
-
else:
|
| 63 |
-
headline = f"Cropland vegetation declined ({change_pp:.0f}pp vs baseline — possible abandonment)"
|
| 64 |
-
|
| 65 |
-
# Store spatial data for map rendering
|
| 66 |
-
from app.indicators.base import SpatialData
|
| 67 |
-
tile_geojson = await self._fetch_tile_footprints(aoi, time_range, season_months)
|
| 68 |
-
if tile_geojson["features"]:
|
| 69 |
-
self._spatial_data = SpatialData(
|
| 70 |
-
geojson=tile_geojson,
|
| 71 |
-
map_type="choropleth",
|
| 72 |
-
label="Vegetation cover (%)",
|
| 73 |
-
colormap="YlGn",
|
| 74 |
-
)
|
| 75 |
-
else:
|
| 76 |
-
self._spatial_data = None
|
| 77 |
-
|
| 78 |
-
return IndicatorResult(
|
| 79 |
-
indicator_id=self.id,
|
| 80 |
-
headline=headline,
|
| 81 |
-
status=status,
|
| 82 |
-
trend=trend,
|
| 83 |
-
confidence=confidence,
|
| 84 |
-
map_layer_path="",
|
| 85 |
-
chart_data=chart_data,
|
| 86 |
-
data_source="placeholder" if getattr(self, '_is_placeholder', False) else "satellite",
|
| 87 |
-
summary=(
|
| 88 |
-
f"Mean vegetation cover is {current_mean:.1f}% compared to a {BASELINE_YEARS}-year "
|
| 89 |
-
f"baseline of {baseline_mean:.1f}% ({change_pp:+.1f} percentage points). "
|
| 90 |
-
f"Month-matched comparison using {n_months} overlapping months. "
|
| 91 |
-
f"Status: {status.value}. Trend: {trend.value}."
|
| 92 |
-
),
|
| 93 |
-
methodology=(
|
| 94 |
-
"Sentinel-2 scene-level vegetation percentage (s2:vegetation_percentage) "
|
| 95 |
-
"is extracted from STAC metadata for cloud-free scenes during the growing "
|
| 96 |
-
"season only. Restricting to growing season isolates "
|
| 97 |
-
"cropland activity from year-round forest/shrub cover. Monthly medians "
|
| 98 |
-
"are compared between the current and baseline periods. "
|
| 99 |
-
f"Baseline: {BASELINE_YEARS} years. No pixel data is downloaded."
|
| 100 |
-
),
|
| 101 |
-
limitations=[
|
| 102 |
-
"Uses scene-level vegetation percentage, not field-level NDVI.",
|
| 103 |
-
"Growing season months are user-configurable (defaults to all months).",
|
| 104 |
-
"Cloud cover during peak season can reduce observation count.",
|
| 105 |
-
"Vegetation percentage includes non-crop vegetation within scenes.",
|
| 106 |
-
"Sentinel-2 data availability may be limited to after 2017.",
|
| 107 |
-
],
|
| 108 |
-
)
|
| 109 |
-
|
| 110 |
-
async def _fetch_tile_footprints(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> dict:
|
| 111 |
-
"""Fetch S2 tile footprints with median vegetation % per tile."""
|
| 112 |
-
try:
|
| 113 |
-
import asyncio
|
| 114 |
-
import pystac_client
|
| 115 |
-
except ImportError:
|
| 116 |
-
return {"type": "FeatureCollection", "features": []}
|
| 117 |
-
|
| 118 |
-
try:
|
| 119 |
-
catalog = pystac_client.Client.open(
|
| 120 |
-
"https://earth-search.aws.element84.com/v1"
|
| 121 |
-
)
|
| 122 |
-
current_year = time_range.end.year
|
| 123 |
-
months = season_months or list(range(1, 13))
|
| 124 |
-
|
| 125 |
-
def _query():
|
| 126 |
-
from collections import defaultdict
|
| 127 |
-
from datetime import date as _date
|
| 128 |
-
start = _date(current_year, min(months), 1)
|
| 129 |
-
end = _date(current_year, max(months), 30)
|
| 130 |
-
items = catalog.search(
|
| 131 |
-
collections=["sentinel-2-l2a"],
|
| 132 |
-
bbox=aoi.bbox,
|
| 133 |
-
datetime=f"{start.isoformat()}/{end.isoformat()}",
|
| 134 |
-
query={"eo:cloud_cover": {"lt": 30}},
|
| 135 |
-
max_items=MAX_ITEMS,
|
| 136 |
-
).item_collection()
|
| 137 |
-
|
| 138 |
-
tile_vals: dict[str, list[float]] = defaultdict(list)
|
| 139 |
-
tile_geom: dict[str, dict] = {}
|
| 140 |
-
for item in items:
|
| 141 |
-
grid = item.properties.get("grid:code", item.id)
|
| 142 |
-
veg = item.properties.get("s2:vegetation_percentage")
|
| 143 |
-
if veg is not None:
|
| 144 |
-
tile_vals[grid].append(float(veg))
|
| 145 |
-
if grid not in tile_geom:
|
| 146 |
-
tile_geom[grid] = item.geometry
|
| 147 |
-
return tile_vals, tile_geom
|
| 148 |
-
|
| 149 |
-
loop = asyncio.get_event_loop()
|
| 150 |
-
tile_vals, tile_geom = await loop.run_in_executor(None, _query)
|
| 151 |
-
|
| 152 |
-
features = []
|
| 153 |
-
for grid, vals in tile_vals.items():
|
| 154 |
-
if grid in tile_geom:
|
| 155 |
-
features.append({
|
| 156 |
-
"type": "Feature",
|
| 157 |
-
"geometry": tile_geom[grid],
|
| 158 |
-
"properties": {
|
| 159 |
-
"value": float(np.median(vals)),
|
| 160 |
-
"grid_code": grid,
|
| 161 |
-
},
|
| 162 |
-
})
|
| 163 |
-
return {"type": "FeatureCollection", "features": features}
|
| 164 |
-
except Exception:
|
| 165 |
-
return {"type": "FeatureCollection", "features": []}
|
| 166 |
-
|
| 167 |
-
async def _fetch_comparison(
|
| 168 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 169 |
-
) -> tuple[float, float, int]:
|
| 170 |
-
"""Returns (baseline_mean, current_mean, n_overlapping_months)."""
|
| 171 |
-
self._is_placeholder = False
|
| 172 |
-
try:
|
| 173 |
-
import pystac_client # noqa: F401
|
| 174 |
-
except ImportError as exc:
|
| 175 |
-
import logging
|
| 176 |
-
logging.getLogger(__name__).warning(
|
| 177 |
-
"Cropland missing dependencies, using placeholder: %s", exc
|
| 178 |
-
)
|
| 179 |
-
self._is_placeholder = True
|
| 180 |
-
return self._synthetic()
|
| 181 |
-
|
| 182 |
-
try:
|
| 183 |
-
return await self._stac_comparison(aoi, time_range, season_months)
|
| 184 |
-
except Exception as exc:
|
| 185 |
-
import logging
|
| 186 |
-
logging.getLogger(__name__).warning(
|
| 187 |
-
"Cropland STAC query failed, using placeholder: %s", exc
|
| 188 |
-
)
|
| 189 |
-
self._is_placeholder = True
|
| 190 |
-
return self._synthetic()
|
| 191 |
-
|
| 192 |
-
async def _stac_comparison(
|
| 193 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 194 |
-
) -> tuple[float, float, int]:
|
| 195 |
-
import asyncio
|
| 196 |
-
import pystac_client
|
| 197 |
-
|
| 198 |
-
catalog = pystac_client.Client.open(
|
| 199 |
-
"https://earth-search.aws.element84.com/v1"
|
| 200 |
-
)
|
| 201 |
-
current_year = time_range.end.year
|
| 202 |
-
baseline_start_year = current_year - BASELINE_YEARS
|
| 203 |
-
months = season_months or list(range(1, 13))
|
| 204 |
-
|
| 205 |
-
def _query_months(year: int) -> dict[int, list[float]]:
|
| 206 |
-
"""Return {month: [vegetation_pct, ...]} for specified months."""
|
| 207 |
-
start = date(year, 1, 1)
|
| 208 |
-
end = date(year, 12, 31)
|
| 209 |
-
items = catalog.search(
|
| 210 |
-
collections=["sentinel-2-l2a"],
|
| 211 |
-
bbox=aoi.bbox,
|
| 212 |
-
datetime=f"{start.isoformat()}/{end.isoformat()}",
|
| 213 |
-
query={"eo:cloud_cover": {"lt": 30}},
|
| 214 |
-
max_items=MAX_ITEMS,
|
| 215 |
-
).item_collection()
|
| 216 |
-
by_month: dict[int, list[float]] = defaultdict(list)
|
| 217 |
-
for item in items:
|
| 218 |
-
veg = item.properties.get("s2:vegetation_percentage")
|
| 219 |
-
if veg is not None and item.datetime:
|
| 220 |
-
month = item.datetime.month
|
| 221 |
-
if month in months:
|
| 222 |
-
by_month[month].append(float(veg))
|
| 223 |
-
return dict(by_month)
|
| 224 |
-
|
| 225 |
-
loop = asyncio.get_event_loop()
|
| 226 |
-
|
| 227 |
-
# Current year monthly medians
|
| 228 |
-
current_monthly = await loop.run_in_executor(None, _query_months, current_year)
|
| 229 |
-
|
| 230 |
-
# Baseline: pool all years by month, then median per month
|
| 231 |
-
baseline_pool: dict[int, list[float]] = defaultdict(list)
|
| 232 |
-
baseline_yearly_means: list[float] = []
|
| 233 |
-
baseline_per_year_monthly: dict[int, list[float]] = defaultdict(list)
|
| 234 |
-
for yr in range(baseline_start_year, current_year):
|
| 235 |
-
yr_monthly = await loop.run_in_executor(None, _query_months, yr)
|
| 236 |
-
yr_medians = []
|
| 237 |
-
for month, vals in yr_monthly.items():
|
| 238 |
-
baseline_pool[month].extend(vals)
|
| 239 |
-
if vals:
|
| 240 |
-
month_median = float(np.median(vals))
|
| 241 |
-
yr_medians.append(month_median)
|
| 242 |
-
baseline_per_year_monthly[month].append(month_median)
|
| 243 |
-
if yr_medians:
|
| 244 |
-
baseline_yearly_means.append(float(np.mean(yr_medians)))
|
| 245 |
-
|
| 246 |
-
# Month-matched comparison: only months with data in BOTH periods
|
| 247 |
-
baseline_medians = []
|
| 248 |
-
current_medians = []
|
| 249 |
-
for month in months:
|
| 250 |
-
b_vals = baseline_pool.get(month, [])
|
| 251 |
-
c_vals = current_monthly.get(month, [])
|
| 252 |
-
if b_vals and c_vals:
|
| 253 |
-
baseline_medians.append(float(np.median(b_vals)))
|
| 254 |
-
current_medians.append(float(np.median(c_vals)))
|
| 255 |
-
|
| 256 |
-
n_months = len(baseline_medians)
|
| 257 |
-
if n_months == 0:
|
| 258 |
-
self._is_placeholder = True
|
| 259 |
-
return self._synthetic()
|
| 260 |
-
|
| 261 |
-
self._baseline_yearly_means = baseline_yearly_means
|
| 262 |
-
self._current_monthly_medians = {m: float(np.median(v)) for m, v in current_monthly.items() if v}
|
| 263 |
-
self._baseline_per_year_monthly = dict(baseline_per_year_monthly)
|
| 264 |
-
return (
|
| 265 |
-
float(np.mean(baseline_medians)),
|
| 266 |
-
float(np.mean(current_medians)),
|
| 267 |
-
n_months,
|
| 268 |
-
)
|
| 269 |
-
|
| 270 |
-
@staticmethod
|
| 271 |
-
def _synthetic() -> tuple[float, float, int]:
|
| 272 |
-
rng = np.random.default_rng(42)
|
| 273 |
-
baseline = float(rng.uniform(30, 50))
|
| 274 |
-
current = baseline * float(rng.uniform(0.88, 1.02))
|
| 275 |
-
return baseline, current, 6
|
| 276 |
-
|
| 277 |
-
@staticmethod
|
| 278 |
-
def _classify(change_pp: float) -> StatusLevel:
|
| 279 |
-
"""Classify based on percentage-point change. Decline is concern."""
|
| 280 |
-
if change_pp >= -5:
|
| 281 |
-
return StatusLevel.GREEN
|
| 282 |
-
if change_pp >= -15:
|
| 283 |
-
return StatusLevel.AMBER
|
| 284 |
-
return StatusLevel.RED
|
| 285 |
-
|
| 286 |
-
@staticmethod
|
| 287 |
-
def _compute_trend(change_pp: float) -> TrendDirection:
|
| 288 |
-
if abs(change_pp) <= 5:
|
| 289 |
-
return TrendDirection.STABLE
|
| 290 |
-
if change_pp > 0:
|
| 291 |
-
return TrendDirection.IMPROVING
|
| 292 |
-
return TrendDirection.DETERIORATING
|
| 293 |
-
|
| 294 |
-
@staticmethod
|
| 295 |
-
def _build_chart_data(
|
| 296 |
-
baseline: float, current: float, time_range: TimeRange,
|
| 297 |
-
baseline_yearly_means: list[float] | None = None,
|
| 298 |
-
) -> dict[str, Any]:
|
| 299 |
-
data: dict[str, Any] = {
|
| 300 |
-
"dates": [str(time_range.start.year - 1), str(time_range.end.year)],
|
| 301 |
-
"values": [round(baseline, 1), round(current, 1)],
|
| 302 |
-
"label": "Vegetation cover (%)",
|
| 303 |
-
}
|
| 304 |
-
if baseline_yearly_means and len(baseline_yearly_means) >= 2:
|
| 305 |
-
data["baseline_range_mean"] = round(float(np.mean(baseline_yearly_means)), 1)
|
| 306 |
-
data["baseline_range_min"] = round(float(min(baseline_yearly_means)), 1)
|
| 307 |
-
data["baseline_range_max"] = round(float(max(baseline_yearly_means)), 1)
|
| 308 |
-
return data
|
| 309 |
-
|
| 310 |
-
@staticmethod
|
| 311 |
-
def _build_monthly_chart_data(
|
| 312 |
-
current_monthly: dict[int, float],
|
| 313 |
-
baseline_per_year_monthly: dict[int, list[float]],
|
| 314 |
-
time_range: TimeRange,
|
| 315 |
-
season_months: list[int] | None = None,
|
| 316 |
-
) -> dict[str, Any]:
|
| 317 |
-
months = season_months or sorted(current_monthly.keys())
|
| 318 |
-
yr = time_range.end.year
|
| 319 |
-
dates = [f"{yr}-{m:02d}" for m in months if m in current_monthly]
|
| 320 |
-
values = [round(current_monthly[m], 1) for m in months if m in current_monthly]
|
| 321 |
-
|
| 322 |
-
b_mean, b_min, b_max = [], [], []
|
| 323 |
-
for m in months:
|
| 324 |
-
if m not in current_monthly:
|
| 325 |
-
continue
|
| 326 |
-
yr_vals = baseline_per_year_monthly.get(m, [])
|
| 327 |
-
if yr_vals:
|
| 328 |
-
b_mean.append(round(float(np.mean(yr_vals)), 1))
|
| 329 |
-
b_min.append(round(float(min(yr_vals)), 1))
|
| 330 |
-
b_max.append(round(float(max(yr_vals)), 1))
|
| 331 |
-
else:
|
| 332 |
-
b_mean.append(round(current_monthly[m], 1))
|
| 333 |
-
b_min.append(round(current_monthly[m], 1))
|
| 334 |
-
b_max.append(round(current_monthly[m], 1))
|
| 335 |
-
|
| 336 |
-
return {
|
| 337 |
-
"dates": dates,
|
| 338 |
-
"values": values,
|
| 339 |
-
"label": "Vegetation cover (%)",
|
| 340 |
-
"baseline_mean": b_mean,
|
| 341 |
-
"baseline_min": b_min,
|
| 342 |
-
"baseline_max": b_max,
|
| 343 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,205 +0,0 @@
|
|
| 1 |
-
from __future__ import annotations
|
| 2 |
-
|
| 3 |
-
import io
|
| 4 |
-
import csv
|
| 5 |
-
from collections import defaultdict
|
| 6 |
-
from datetime import date, timedelta
|
| 7 |
-
from typing import Any
|
| 8 |
-
|
| 9 |
-
import httpx
|
| 10 |
-
|
| 11 |
-
from app.indicators.base import BaseIndicator
|
| 12 |
-
from app.models import (
|
| 13 |
-
AOI,
|
| 14 |
-
TimeRange,
|
| 15 |
-
IndicatorResult,
|
| 16 |
-
StatusLevel,
|
| 17 |
-
TrendDirection,
|
| 18 |
-
ConfidenceLevel,
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
FIRMS_URL = "https://firms.modaps.eosdis.nasa.gov/api/area/csv"
|
| 22 |
-
FIRMS_MAP_KEY = "DEMO_KEY" # override via env if needed
|
| 23 |
-
CHUNK_DAYS = 10
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
class FiresIndicator(BaseIndicator):
|
| 27 |
-
id = "fires"
|
| 28 |
-
name = "Active Fires"
|
| 29 |
-
category = "R3"
|
| 30 |
-
question = "Where are fires burning?"
|
| 31 |
-
estimated_minutes = 2
|
| 32 |
-
|
| 33 |
-
async def process(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> IndicatorResult:
|
| 34 |
-
rows = await self._fetch_firms(aoi, time_range)
|
| 35 |
-
|
| 36 |
-
count = len(rows)
|
| 37 |
-
status = self._classify(count)
|
| 38 |
-
trend = self._compute_trend(rows, time_range)
|
| 39 |
-
confidence = self._compute_confidence(rows)
|
| 40 |
-
chart_data = self._build_chart_data(rows, season_months)
|
| 41 |
-
|
| 42 |
-
if count == 0:
|
| 43 |
-
headline = "No active fires detected"
|
| 44 |
-
else:
|
| 45 |
-
headline = f"{count} active fire detection{'s' if count != 1 else ''} in period"
|
| 46 |
-
|
| 47 |
-
# Store spatial data for map rendering
|
| 48 |
-
if rows:
|
| 49 |
-
from app.indicators.base import SpatialData
|
| 50 |
-
self._spatial_data = SpatialData(
|
| 51 |
-
geojson=self._build_geojson(rows),
|
| 52 |
-
map_type="points",
|
| 53 |
-
label="Fire detections",
|
| 54 |
-
)
|
| 55 |
-
else:
|
| 56 |
-
self._spatial_data = None
|
| 57 |
-
|
| 58 |
-
return IndicatorResult(
|
| 59 |
-
indicator_id=self.id,
|
| 60 |
-
headline=headline,
|
| 61 |
-
status=status,
|
| 62 |
-
trend=trend,
|
| 63 |
-
confidence=confidence,
|
| 64 |
-
map_layer_path="",
|
| 65 |
-
chart_data=chart_data,
|
| 66 |
-
summary=(
|
| 67 |
-
f"{count} VIIRS fire detection{'s' if count != 1 else ''} recorded "
|
| 68 |
-
f"between {time_range.start} and {time_range.end}. "
|
| 69 |
-
f"Status: {status.value}. Trend: {trend.value}."
|
| 70 |
-
),
|
| 71 |
-
methodology=(
|
| 72 |
-
"Fire detections sourced from the NASA FIRMS (Fire Information for "
|
| 73 |
-
"Resource Management System) VIIRS 375m active fire product."
|
| 74 |
-
),
|
| 75 |
-
limitations=[
|
| 76 |
-
"VIIRS has a 375m spatial resolution — small fires may be missed.",
|
| 77 |
-
"Cloud cover can obscure fire detections.",
|
| 78 |
-
"Detections represent thermal anomalies, not confirmed fires.",
|
| 79 |
-
],
|
| 80 |
-
)
|
| 81 |
-
|
| 82 |
-
async def _fetch_firms(self, aoi: AOI, time_range: TimeRange) -> list[dict]:
|
| 83 |
-
min_lon, min_lat, max_lon, max_lat = aoi.bbox
|
| 84 |
-
bbox_str = f"{min_lon},{min_lat},{max_lon},{max_lat}"
|
| 85 |
-
|
| 86 |
-
all_rows: list[dict] = []
|
| 87 |
-
current = time_range.start
|
| 88 |
-
|
| 89 |
-
async with httpx.AsyncClient(timeout=30) as client:
|
| 90 |
-
while current < time_range.end:
|
| 91 |
-
chunk_end = min(current + timedelta(days=CHUNK_DAYS - 1), time_range.end)
|
| 92 |
-
days = (chunk_end - current).days + 1
|
| 93 |
-
url = (
|
| 94 |
-
f"{FIRMS_URL}/{FIRMS_MAP_KEY}/VIIRS_SNPP_NRT/"
|
| 95 |
-
f"{bbox_str}/{days}/{current.isoformat()}"
|
| 96 |
-
)
|
| 97 |
-
response = await client.get(url)
|
| 98 |
-
if response.status_code == 200 and response.text.strip():
|
| 99 |
-
reader = csv.DictReader(io.StringIO(response.text))
|
| 100 |
-
for row in reader:
|
| 101 |
-
acq = row.get("acq_date", "")
|
| 102 |
-
if acq:
|
| 103 |
-
try:
|
| 104 |
-
row_date = date.fromisoformat(acq)
|
| 105 |
-
if current <= row_date <= chunk_end:
|
| 106 |
-
all_rows.append(row)
|
| 107 |
-
except ValueError:
|
| 108 |
-
pass
|
| 109 |
-
current = chunk_end + timedelta(days=1)
|
| 110 |
-
|
| 111 |
-
return all_rows
|
| 112 |
-
|
| 113 |
-
@staticmethod
|
| 114 |
-
def _build_geojson(rows: list[dict]) -> dict:
|
| 115 |
-
features = []
|
| 116 |
-
for row in rows:
|
| 117 |
-
try:
|
| 118 |
-
lat = float(row["latitude"])
|
| 119 |
-
lon = float(row["longitude"])
|
| 120 |
-
except (KeyError, ValueError):
|
| 121 |
-
continue
|
| 122 |
-
features.append({
|
| 123 |
-
"type": "Feature",
|
| 124 |
-
"geometry": {"type": "Point", "coordinates": [lon, lat]},
|
| 125 |
-
"properties": {
|
| 126 |
-
"confidence": row.get("confidence", "unknown"),
|
| 127 |
-
"acq_date": row.get("acq_date", ""),
|
| 128 |
-
"bright_ti4": row.get("bright_ti4", ""),
|
| 129 |
-
},
|
| 130 |
-
})
|
| 131 |
-
return {"type": "FeatureCollection", "features": features}
|
| 132 |
-
|
| 133 |
-
@staticmethod
|
| 134 |
-
def _classify(count: int) -> StatusLevel:
|
| 135 |
-
if count == 0:
|
| 136 |
-
return StatusLevel.GREEN
|
| 137 |
-
if count <= 5:
|
| 138 |
-
return StatusLevel.AMBER
|
| 139 |
-
return StatusLevel.RED
|
| 140 |
-
|
| 141 |
-
@staticmethod
|
| 142 |
-
def _compute_trend(rows: list[dict], time_range: TimeRange) -> TrendDirection:
|
| 143 |
-
if not rows:
|
| 144 |
-
return TrendDirection.STABLE
|
| 145 |
-
|
| 146 |
-
total_days = (time_range.end - time_range.start).days
|
| 147 |
-
mid = time_range.start + timedelta(days=total_days // 2)
|
| 148 |
-
|
| 149 |
-
first_half = 0
|
| 150 |
-
second_half = 0
|
| 151 |
-
for row in rows:
|
| 152 |
-
try:
|
| 153 |
-
row_date = date.fromisoformat(row["acq_date"])
|
| 154 |
-
except (KeyError, ValueError):
|
| 155 |
-
continue
|
| 156 |
-
if row_date < mid:
|
| 157 |
-
first_half += 1
|
| 158 |
-
else:
|
| 159 |
-
second_half += 1
|
| 160 |
-
|
| 161 |
-
if first_half == 0 and second_half == 0:
|
| 162 |
-
return TrendDirection.STABLE
|
| 163 |
-
if first_half == 0:
|
| 164 |
-
return TrendDirection.DETERIORATING
|
| 165 |
-
ratio = second_half / first_half
|
| 166 |
-
if ratio > 1.25:
|
| 167 |
-
return TrendDirection.DETERIORATING
|
| 168 |
-
if ratio < 0.8:
|
| 169 |
-
return TrendDirection.IMPROVING
|
| 170 |
-
return TrendDirection.STABLE
|
| 171 |
-
|
| 172 |
-
@staticmethod
|
| 173 |
-
def _compute_confidence(rows: list[dict]) -> ConfidenceLevel:
|
| 174 |
-
if not rows:
|
| 175 |
-
return ConfidenceLevel.HIGH
|
| 176 |
-
confidences = [r.get("confidence", "nominal").lower() for r in rows]
|
| 177 |
-
nominal_count = sum(1 for c in confidences if c == "nominal")
|
| 178 |
-
high_count = sum(1 for c in confidences if c in ("high", "h"))
|
| 179 |
-
total = len(confidences)
|
| 180 |
-
if total == 0:
|
| 181 |
-
return ConfidenceLevel.MODERATE
|
| 182 |
-
high_frac = (nominal_count + high_count) / total
|
| 183 |
-
if high_frac >= 0.8:
|
| 184 |
-
return ConfidenceLevel.HIGH
|
| 185 |
-
if high_frac >= 0.5:
|
| 186 |
-
return ConfidenceLevel.MODERATE
|
| 187 |
-
return ConfidenceLevel.LOW
|
| 188 |
-
|
| 189 |
-
@staticmethod
|
| 190 |
-
def _build_chart_data(rows: list[dict], season_months: list[int] | None = None) -> dict[str, Any]:
|
| 191 |
-
monthly: dict[str, int] = defaultdict(int)
|
| 192 |
-
for row in rows:
|
| 193 |
-
acq = row.get("acq_date", "")
|
| 194 |
-
if acq and len(acq) >= 7:
|
| 195 |
-
month_key = acq[:7] # "YYYY-MM"
|
| 196 |
-
month_num = int(month_key[5:7])
|
| 197 |
-
if season_months is None or month_num in season_months:
|
| 198 |
-
monthly[month_key] += 1
|
| 199 |
-
|
| 200 |
-
sorted_months = sorted(monthly.keys())
|
| 201 |
-
return {
|
| 202 |
-
"dates": sorted_months,
|
| 203 |
-
"values": [monthly[m] for m in sorted_months],
|
| 204 |
-
"label": "Fire detections per month",
|
| 205 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,152 +0,0 @@
|
|
| 1 |
-
from __future__ import annotations
|
| 2 |
-
|
| 3 |
-
from typing import Any
|
| 4 |
-
|
| 5 |
-
from app.indicators.base import BaseIndicator
|
| 6 |
-
from app.models import (
|
| 7 |
-
AOI,
|
| 8 |
-
TimeRange,
|
| 9 |
-
IndicatorResult,
|
| 10 |
-
StatusLevel,
|
| 11 |
-
TrendDirection,
|
| 12 |
-
ConfidenceLevel,
|
| 13 |
-
)
|
| 14 |
-
|
| 15 |
-
# Status ranking for worst-case aggregation
|
| 16 |
-
_STATUS_RANK: dict[StatusLevel, int] = {
|
| 17 |
-
StatusLevel.GREEN: 0,
|
| 18 |
-
StatusLevel.AMBER: 1,
|
| 19 |
-
StatusLevel.RED: 2,
|
| 20 |
-
}
|
| 21 |
-
|
| 22 |
-
_TREND_RANK: dict[TrendDirection, int] = {
|
| 23 |
-
TrendDirection.IMPROVING: -1,
|
| 24 |
-
TrendDirection.STABLE: 0,
|
| 25 |
-
TrendDirection.DETERIORATING: 1,
|
| 26 |
-
}
|
| 27 |
-
|
| 28 |
-
_CONFIDENCE_RANK: dict[ConfidenceLevel, int] = {
|
| 29 |
-
ConfidenceLevel.HIGH: 2,
|
| 30 |
-
ConfidenceLevel.MODERATE: 1,
|
| 31 |
-
ConfidenceLevel.LOW: 0,
|
| 32 |
-
}
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
class FoodSecurityIndicator(BaseIndicator):
|
| 36 |
-
id = "food_security"
|
| 37 |
-
name = "Food Security Composite"
|
| 38 |
-
category = "F2"
|
| 39 |
-
question = "Combined crop, rain, and temperature signals"
|
| 40 |
-
estimated_minutes = 20
|
| 41 |
-
|
| 42 |
-
async def process(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> IndicatorResult:
|
| 43 |
-
# Import here to avoid circular import at module load time
|
| 44 |
-
from app.indicators import registry # type: ignore[import]
|
| 45 |
-
|
| 46 |
-
component_ids = ["cropland", "rainfall", "lst"]
|
| 47 |
-
component_results: list[IndicatorResult] = []
|
| 48 |
-
|
| 49 |
-
for cid in component_ids:
|
| 50 |
-
try:
|
| 51 |
-
indicator = registry.get(cid)
|
| 52 |
-
result = await indicator.process(aoi, time_range)
|
| 53 |
-
component_results.append(result)
|
| 54 |
-
except Exception as exc:
|
| 55 |
-
# If a sub-indicator fails, degrade confidence but continue
|
| 56 |
-
import warnings
|
| 57 |
-
warnings.warn(f"Food security sub-indicator '{cid}' failed: {exc}")
|
| 58 |
-
|
| 59 |
-
if not component_results:
|
| 60 |
-
# Nothing worked — return a low-confidence red result
|
| 61 |
-
return self._fallback_result()
|
| 62 |
-
|
| 63 |
-
# Worst status across components
|
| 64 |
-
worst_status = max(
|
| 65 |
-
(r.status for r in component_results),
|
| 66 |
-
key=lambda s: _STATUS_RANK[s],
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
# Worst trend across components
|
| 70 |
-
worst_trend = max(
|
| 71 |
-
(r.trend for r in component_results),
|
| 72 |
-
key=lambda t: _TREND_RANK[t],
|
| 73 |
-
)
|
| 74 |
-
|
| 75 |
-
# Minimum confidence across components
|
| 76 |
-
min_confidence = min(
|
| 77 |
-
(r.confidence for r in component_results),
|
| 78 |
-
key=lambda c: _CONFIDENCE_RANK[c],
|
| 79 |
-
)
|
| 80 |
-
|
| 81 |
-
chart_data = self._build_chart_data(component_results)
|
| 82 |
-
any_placeholder = any(r.data_source == "placeholder" for r in component_results)
|
| 83 |
-
summary_parts = [f"{r.indicator_id.upper()}: {r.headline}" for r in component_results]
|
| 84 |
-
|
| 85 |
-
status_label = worst_status.value.upper()
|
| 86 |
-
if worst_status == StatusLevel.GREEN:
|
| 87 |
-
headline = "Food security indicators within normal range"
|
| 88 |
-
elif worst_status == StatusLevel.AMBER:
|
| 89 |
-
headline = "Some food security stress signals detected"
|
| 90 |
-
else:
|
| 91 |
-
headline = "Critical food security stress signals detected"
|
| 92 |
-
|
| 93 |
-
return IndicatorResult(
|
| 94 |
-
indicator_id=self.id,
|
| 95 |
-
headline=headline,
|
| 96 |
-
status=worst_status,
|
| 97 |
-
trend=worst_trend,
|
| 98 |
-
confidence=min_confidence,
|
| 99 |
-
map_layer_path="",
|
| 100 |
-
chart_data=chart_data,
|
| 101 |
-
data_source="placeholder" if any_placeholder else "satellite",
|
| 102 |
-
summary=(
|
| 103 |
-
f"Composite food security assessment [{status_label}] based on "
|
| 104 |
-
f"{len(component_results)} sub-indicators. "
|
| 105 |
-
+ " | ".join(summary_parts)
|
| 106 |
-
),
|
| 107 |
-
methodology=(
|
| 108 |
-
"The F2 Food Security Composite aggregates D1 (Cropland Productivity), "
|
| 109 |
-
"D5 (Rainfall Adequacy), and D6 (Land Surface Temperature) indicators. "
|
| 110 |
-
"The composite status reflects the worst-case signal across components; "
|
| 111 |
-
"confidence reflects the minimum confidence of any component."
|
| 112 |
-
),
|
| 113 |
-
limitations=[
|
| 114 |
-
"Composite takes worst-case status — a single stressed component drives the result.",
|
| 115 |
-
"Each component carries its own limitations (see D1, D5, D6 indicators).",
|
| 116 |
-
"Food security depends on access and market factors not captured by remote sensing.",
|
| 117 |
-
"Composite does not account for adaptive coping mechanisms.",
|
| 118 |
-
],
|
| 119 |
-
)
|
| 120 |
-
|
| 121 |
-
@staticmethod
|
| 122 |
-
def _build_chart_data(results: list[IndicatorResult]) -> dict[str, Any]:
|
| 123 |
-
"""Build a chart comparing component status values."""
|
| 124 |
-
labels = [r.indicator_id for r in results]
|
| 125 |
-
# Map status to numeric severity for chart display
|
| 126 |
-
severity = [_STATUS_RANK[r.status] for r in results]
|
| 127 |
-
return {
|
| 128 |
-
"dates": labels,
|
| 129 |
-
"values": severity,
|
| 130 |
-
"label": "Component stress level (0=green, 1=amber, 2=red)",
|
| 131 |
-
}
|
| 132 |
-
|
| 133 |
-
@staticmethod
|
| 134 |
-
def _fallback_result() -> IndicatorResult:
|
| 135 |
-
return IndicatorResult(
|
| 136 |
-
indicator_id="food_security",
|
| 137 |
-
headline="Food security assessment unavailable — sub-indicators failed",
|
| 138 |
-
status=StatusLevel.RED,
|
| 139 |
-
trend=TrendDirection.STABLE,
|
| 140 |
-
confidence=ConfidenceLevel.LOW,
|
| 141 |
-
map_layer_path="",
|
| 142 |
-
chart_data={"dates": [], "values": [], "label": ""},
|
| 143 |
-
data_source="placeholder",
|
| 144 |
-
summary="No sub-indicator data could be retrieved.",
|
| 145 |
-
methodology=(
|
| 146 |
-
"The F2 Food Security Composite aggregates D1 (Cropland Productivity), "
|
| 147 |
-
"D5 (Rainfall Adequacy), and D6 (Land Surface Temperature) indicators."
|
| 148 |
-
),
|
| 149 |
-
limitations=[
|
| 150 |
-
"All sub-indicators failed — results are unreliable.",
|
| 151 |
-
],
|
| 152 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,268 +0,0 @@
|
|
| 1 |
-
"""Land Surface Temperature Indicator — Sentinel-3 SLSTR via CDSE openEO.
|
| 2 |
-
|
| 3 |
-
Retrieves monthly mean LST from Sentinel-3 SLSTR, compares to 5-year
|
| 4 |
-
baseline, and classifies using Z-score anomaly thresholds.
|
| 5 |
-
"""
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
import tempfile
|
| 11 |
-
from datetime import date
|
| 12 |
-
from typing import Any
|
| 13 |
-
|
| 14 |
-
import numpy as np
|
| 15 |
-
import rasterio
|
| 16 |
-
|
| 17 |
-
from app.config import RESOLUTION_M
|
| 18 |
-
from app.indicators.base import BaseIndicator, SpatialData
|
| 19 |
-
from app.models import (
|
| 20 |
-
AOI,
|
| 21 |
-
TimeRange,
|
| 22 |
-
IndicatorResult,
|
| 23 |
-
StatusLevel,
|
| 24 |
-
TrendDirection,
|
| 25 |
-
ConfidenceLevel,
|
| 26 |
-
)
|
| 27 |
-
from app.openeo_client import get_connection, build_lst_graph, build_true_color_graph, _bbox_dict
|
| 28 |
-
|
| 29 |
-
logger = logging.getLogger(__name__)
|
| 30 |
-
|
| 31 |
-
BASELINE_YEARS = 5
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class LSTIndicator(BaseIndicator):
|
| 35 |
-
id = "lst"
|
| 36 |
-
name = "Land Surface Temperature"
|
| 37 |
-
category = "D6"
|
| 38 |
-
question = "Unusual heat patterns?"
|
| 39 |
-
estimated_minutes = 8
|
| 40 |
-
|
| 41 |
-
_true_color_path: str | None = None
|
| 42 |
-
|
| 43 |
-
async def process(
|
| 44 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 45 |
-
) -> IndicatorResult:
|
| 46 |
-
try:
|
| 47 |
-
return await self._process_openeo(aoi, time_range, season_months)
|
| 48 |
-
except Exception as exc:
|
| 49 |
-
logger.warning("LST openEO processing failed, using placeholder: %s", exc)
|
| 50 |
-
return self._fallback(aoi, time_range)
|
| 51 |
-
|
| 52 |
-
async def _process_openeo(
|
| 53 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
| 54 |
-
) -> IndicatorResult:
|
| 55 |
-
import asyncio
|
| 56 |
-
|
| 57 |
-
conn = get_connection()
|
| 58 |
-
bbox = _bbox_dict(aoi.bbox)
|
| 59 |
-
|
| 60 |
-
current_start = time_range.start.isoformat()
|
| 61 |
-
current_end = time_range.end.isoformat()
|
| 62 |
-
baseline_start = date(
|
| 63 |
-
time_range.start.year - BASELINE_YEARS,
|
| 64 |
-
time_range.start.month,
|
| 65 |
-
time_range.start.day,
|
| 66 |
-
).isoformat()
|
| 67 |
-
baseline_end = time_range.start.isoformat()
|
| 68 |
-
|
| 69 |
-
results_dir = tempfile.mkdtemp(prefix="aperture_lst_")
|
| 70 |
-
|
| 71 |
-
# LST at 1km (SLSTR native)
|
| 72 |
-
lst_resolution = max(RESOLUTION_M, 1000)
|
| 73 |
-
|
| 74 |
-
current_cube = build_lst_graph(
|
| 75 |
-
conn=conn, bbox=bbox,
|
| 76 |
-
temporal_extent=[current_start, current_end],
|
| 77 |
-
resolution_m=lst_resolution,
|
| 78 |
-
)
|
| 79 |
-
baseline_cube = build_lst_graph(
|
| 80 |
-
conn=conn, bbox=bbox,
|
| 81 |
-
temporal_extent=[baseline_start, baseline_end],
|
| 82 |
-
resolution_m=lst_resolution,
|
| 83 |
-
)
|
| 84 |
-
true_color_cube = build_true_color_graph(
|
| 85 |
-
conn=conn, bbox=bbox,
|
| 86 |
-
temporal_extent=[current_start, current_end],
|
| 87 |
-
resolution_m=RESOLUTION_M,
|
| 88 |
-
)
|
| 89 |
-
|
| 90 |
-
loop = asyncio.get_event_loop()
|
| 91 |
-
current_path = os.path.join(results_dir, "lst_current.tif")
|
| 92 |
-
baseline_path = os.path.join(results_dir, "lst_baseline.tif")
|
| 93 |
-
true_color_path = os.path.join(results_dir, "true_color.tif")
|
| 94 |
-
|
| 95 |
-
await loop.run_in_executor(None, current_cube.download, current_path)
|
| 96 |
-
await loop.run_in_executor(None, baseline_cube.download, baseline_path)
|
| 97 |
-
await loop.run_in_executor(None, true_color_cube.download, true_color_path)
|
| 98 |
-
|
| 99 |
-
self._true_color_path = true_color_path
|
| 100 |
-
|
| 101 |
-
current_stats = self._compute_stats(current_path)
|
| 102 |
-
baseline_stats = self._compute_stats(baseline_path)
|
| 103 |
-
|
| 104 |
-
current_temp = current_stats["overall_mean_celsius"]
|
| 105 |
-
baseline_temp = baseline_stats["overall_mean_celsius"]
|
| 106 |
-
baseline_std = float(np.std(baseline_stats["monthly_means_celsius"])) if baseline_stats["monthly_means_celsius"] else 1.0
|
| 107 |
-
z_score = (current_temp - baseline_temp) / max(baseline_std, 0.1)
|
| 108 |
-
|
| 109 |
-
status = self._classify(abs(z_score))
|
| 110 |
-
trend = self._compute_trend(z_score)
|
| 111 |
-
confidence = (
|
| 112 |
-
ConfidenceLevel.HIGH if current_stats["valid_months"] >= 6
|
| 113 |
-
else ConfidenceLevel.MODERATE if current_stats["valid_months"] >= 3
|
| 114 |
-
else ConfidenceLevel.LOW
|
| 115 |
-
)
|
| 116 |
-
|
| 117 |
-
chart_data = self._build_chart_data(
|
| 118 |
-
current_stats["monthly_means_celsius"],
|
| 119 |
-
baseline_stats["monthly_means_celsius"],
|
| 120 |
-
time_range,
|
| 121 |
-
)
|
| 122 |
-
|
| 123 |
-
anomaly = current_temp - baseline_temp
|
| 124 |
-
if abs(z_score) < 1.0:
|
| 125 |
-
headline = f"Temperature normal ({current_temp:.1f}\u00b0C, z={z_score:+.1f})"
|
| 126 |
-
elif z_score > 0:
|
| 127 |
-
headline = f"Above-normal temperature ({current_temp:.1f}\u00b0C, +{anomaly:.1f}\u00b0C)"
|
| 128 |
-
else:
|
| 129 |
-
headline = f"Below-normal temperature ({current_temp:.1f}\u00b0C, {anomaly:.1f}\u00b0C)"
|
| 130 |
-
|
| 131 |
-
self._spatial_data = SpatialData(
|
| 132 |
-
map_type="raster",
|
| 133 |
-
label="LST (\u00b0C)",
|
| 134 |
-
colormap="coolwarm",
|
| 135 |
-
vmin=current_temp - 10,
|
| 136 |
-
vmax=current_temp + 10,
|
| 137 |
-
)
|
| 138 |
-
self._indicator_raster_path = current_path
|
| 139 |
-
self._true_color_path = true_color_path
|
| 140 |
-
self._render_band = current_stats.get("hottest_band", 1)
|
| 141 |
-
|
| 142 |
-
return IndicatorResult(
|
| 143 |
-
indicator_id=self.id,
|
| 144 |
-
headline=headline,
|
| 145 |
-
status=status,
|
| 146 |
-
trend=trend,
|
| 147 |
-
confidence=confidence,
|
| 148 |
-
map_layer_path=current_path,
|
| 149 |
-
chart_data=chart_data,
|
| 150 |
-
data_source="satellite",
|
| 151 |
-
summary=(
|
| 152 |
-
f"Mean LST is {current_temp:.1f}\u00b0C compared to "
|
| 153 |
-
f"{baseline_temp:.1f}\u00b0C baseline (z-score: {z_score:+.2f}). "
|
| 154 |
-
f"Sentinel-3 SLSTR at 1km resolution."
|
| 155 |
-
),
|
| 156 |
-
methodology=(
|
| 157 |
-
f"Sentinel-3 SLSTR Level-2 LST product. "
|
| 158 |
-
f"Monthly mean composites. "
|
| 159 |
-
f"Baseline: {BASELINE_YEARS}-year monthly means. "
|
| 160 |
-
f"Z-score anomaly classification. "
|
| 161 |
-
f"Processed via CDSE openEO."
|
| 162 |
-
),
|
| 163 |
-
limitations=[
|
| 164 |
-
"Sentinel-3 SLSTR resolution is ~1km \u2014 urban heat islands may be smoothed.",
|
| 165 |
-
"Cloud cover creates data gaps in monthly composites.",
|
| 166 |
-
"LST varies with land cover; change may reflect land use, not climate.",
|
| 167 |
-
"Daytime overpass only \u2014 nighttime temperatures not captured.",
|
| 168 |
-
],
|
| 169 |
-
)
|
| 170 |
-
|
| 171 |
-
@staticmethod
|
| 172 |
-
def _compute_stats(tif_path: str) -> dict[str, Any]:
|
| 173 |
-
"""Extract monthly LST statistics, converting Kelvin to Celsius."""
|
| 174 |
-
with rasterio.open(tif_path) as src:
|
| 175 |
-
n_bands = src.count
|
| 176 |
-
monthly_means_c = []
|
| 177 |
-
hottest = -999.0
|
| 178 |
-
hottest_band = 1
|
| 179 |
-
for band in range(1, n_bands + 1):
|
| 180 |
-
data = src.read(band).astype(np.float32)
|
| 181 |
-
nodata = src.nodata
|
| 182 |
-
if nodata is not None:
|
| 183 |
-
valid = data[data != nodata]
|
| 184 |
-
else:
|
| 185 |
-
valid = data.ravel()
|
| 186 |
-
if len(valid) > 0:
|
| 187 |
-
mean_k = float(np.nanmean(valid))
|
| 188 |
-
# Convert Kelvin to Celsius if needed (values > 100 assumed Kelvin)
|
| 189 |
-
mean_c = mean_k - 273.15 if mean_k > 100 else mean_k
|
| 190 |
-
monthly_means_c.append(mean_c)
|
| 191 |
-
if mean_c > hottest:
|
| 192 |
-
hottest = mean_c
|
| 193 |
-
hottest_band = band
|
| 194 |
-
else:
|
| 195 |
-
monthly_means_c.append(0.0)
|
| 196 |
-
|
| 197 |
-
valid_months = sum(1 for m in monthly_means_c if m != 0.0)
|
| 198 |
-
overall = float(np.mean([m for m in monthly_means_c if m != 0.0])) if valid_months > 0 else 0.0
|
| 199 |
-
|
| 200 |
-
return {
|
| 201 |
-
"monthly_means_celsius": monthly_means_c,
|
| 202 |
-
"overall_mean_celsius": overall,
|
| 203 |
-
"valid_months": valid_months,
|
| 204 |
-
"hottest_band": hottest_band,
|
| 205 |
-
}
|
| 206 |
-
|
| 207 |
-
@staticmethod
|
| 208 |
-
def _classify(abs_z: float) -> StatusLevel:
|
| 209 |
-
if abs_z < 1.0:
|
| 210 |
-
return StatusLevel.GREEN
|
| 211 |
-
if abs_z < 2.0:
|
| 212 |
-
return StatusLevel.AMBER
|
| 213 |
-
return StatusLevel.RED
|
| 214 |
-
|
| 215 |
-
@staticmethod
|
| 216 |
-
def _compute_trend(z_score: float) -> TrendDirection:
|
| 217 |
-
if abs(z_score) < 1.0:
|
| 218 |
-
return TrendDirection.STABLE
|
| 219 |
-
if z_score > 0:
|
| 220 |
-
return TrendDirection.DETERIORATING
|
| 221 |
-
return TrendDirection.IMPROVING
|
| 222 |
-
|
| 223 |
-
@staticmethod
|
| 224 |
-
def _build_chart_data(
|
| 225 |
-
current_monthly: list[float],
|
| 226 |
-
baseline_monthly: list[float],
|
| 227 |
-
time_range: TimeRange,
|
| 228 |
-
) -> dict[str, Any]:
|
| 229 |
-
year = time_range.end.year
|
| 230 |
-
n = min(len(current_monthly), len(baseline_monthly))
|
| 231 |
-
dates = [f"{year}-{m + 1:02d}" for m in range(n)]
|
| 232 |
-
values = [round(v, 1) for v in current_monthly[:n]]
|
| 233 |
-
b_mean = [round(v, 1) for v in baseline_monthly[:n]]
|
| 234 |
-
b_min = [round(v - 3.0, 1) for v in baseline_monthly[:n]]
|
| 235 |
-
b_max = [round(v + 3.0, 1) for v in baseline_monthly[:n]]
|
| 236 |
-
|
| 237 |
-
return {
|
| 238 |
-
"dates": dates,
|
| 239 |
-
"values": values,
|
| 240 |
-
"baseline_mean": b_mean,
|
| 241 |
-
"baseline_min": b_min,
|
| 242 |
-
"baseline_max": b_max,
|
| 243 |
-
"label": "Temperature (\u00b0C)",
|
| 244 |
-
}
|
| 245 |
-
|
| 246 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 247 |
-
rng = np.random.default_rng(6)
|
| 248 |
-
baseline = float(rng.uniform(30, 38))
|
| 249 |
-
current = baseline + float(rng.uniform(-2, 3))
|
| 250 |
-
z = (current - baseline) / 2.0
|
| 251 |
-
|
| 252 |
-
return IndicatorResult(
|
| 253 |
-
indicator_id=self.id,
|
| 254 |
-
headline=f"Temperature data degraded ({current:.1f}\u00b0C)",
|
| 255 |
-
status=self._classify(abs(z)),
|
| 256 |
-
trend=self._compute_trend(z),
|
| 257 |
-
confidence=ConfidenceLevel.LOW,
|
| 258 |
-
map_layer_path="",
|
| 259 |
-
chart_data={
|
| 260 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 261 |
-
"values": [round(baseline, 1), round(current, 1)],
|
| 262 |
-
"label": "Temperature (\u00b0C)",
|
| 263 |
-
},
|
| 264 |
-
data_source="placeholder",
|
| 265 |
-
summary="openEO processing unavailable. Showing placeholder values.",
|
| 266 |
-
methodology="Placeholder \u2014 no satellite data processed.",
|
| 267 |
-
limitations=["Data is synthetic. openEO backend was unreachable."],
|
| 268 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -103,9 +103,7 @@ class NdviIndicator(BaseIndicator):
|
|
| 103 |
current_path = self._find_tif(paths, current_dir)
|
| 104 |
print(f"[Aperture] NDVI current_path: {current_path} (exists={os.path.exists(current_path)}, size={os.path.getsize(current_path) if os.path.exists(current_path) else 'N/A'})")
|
| 105 |
except Exception as exc:
|
| 106 |
-
|
| 107 |
-
print(f"[Aperture] NDVI current download EXCEPTION: {type(exc).__name__}: {exc}")
|
| 108 |
-
return self._fallback(aoi, time_range)
|
| 109 |
|
| 110 |
# Download baseline — optional (degrades gracefully)
|
| 111 |
baseline_path = None
|
|
@@ -217,11 +215,7 @@ class NdviIndicator(BaseIndicator):
|
|
| 217 |
async def process(
|
| 218 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 219 |
) -> IndicatorResult:
|
| 220 |
-
|
| 221 |
-
return await self._process_openeo(aoi, time_range, season_months)
|
| 222 |
-
except Exception as exc:
|
| 223 |
-
logger.warning("NDVI openEO processing failed, using placeholder: %s", exc)
|
| 224 |
-
return self._fallback(aoi, time_range)
|
| 225 |
|
| 226 |
async def _process_openeo(
|
| 227 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
@@ -429,27 +423,3 @@ class NdviIndicator(BaseIndicator):
|
|
| 429 |
"label": "NDVI",
|
| 430 |
}
|
| 431 |
|
| 432 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 433 |
-
"""Return a placeholder result when openEO processing fails."""
|
| 434 |
-
rng = np.random.default_rng(7)
|
| 435 |
-
baseline = float(rng.uniform(0.25, 0.45))
|
| 436 |
-
current = baseline * float(rng.uniform(0.90, 1.02))
|
| 437 |
-
change = current - baseline
|
| 438 |
-
|
| 439 |
-
return IndicatorResult(
|
| 440 |
-
indicator_id=self.id,
|
| 441 |
-
headline=f"Vegetation data degraded (NDVI \u2248{current:.2f})",
|
| 442 |
-
status=self._classify(change),
|
| 443 |
-
trend=self._compute_trend(change),
|
| 444 |
-
confidence=ConfidenceLevel.LOW,
|
| 445 |
-
map_layer_path="",
|
| 446 |
-
chart_data={
|
| 447 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 448 |
-
"values": [round(baseline, 3), round(current, 3)],
|
| 449 |
-
"label": "NDVI",
|
| 450 |
-
},
|
| 451 |
-
data_source="placeholder",
|
| 452 |
-
summary="openEO processing unavailable. Showing placeholder values.",
|
| 453 |
-
methodology="Placeholder \u2014 no satellite data processed.",
|
| 454 |
-
limitations=["Data is synthetic. openEO backend was unreachable."],
|
| 455 |
-
)
|
|
|
|
| 103 |
current_path = self._find_tif(paths, current_dir)
|
| 104 |
print(f"[Aperture] NDVI current_path: {current_path} (exists={os.path.exists(current_path)}, size={os.path.getsize(current_path) if os.path.exists(current_path) else 'N/A'})")
|
| 105 |
except Exception as exc:
|
| 106 |
+
raise RuntimeError(f"NDVI current period data unavailable: {exc}") from exc
|
|
|
|
|
|
|
| 107 |
|
| 108 |
# Download baseline — optional (degrades gracefully)
|
| 109 |
baseline_path = None
|
|
|
|
| 215 |
async def process(
|
| 216 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 217 |
) -> IndicatorResult:
|
| 218 |
+
return await self._process_openeo(aoi, time_range, season_months)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
async def _process_openeo(
|
| 221 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
|
|
| 423 |
"label": "NDVI",
|
| 424 |
}
|
| 425 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,229 +0,0 @@
|
|
| 1 |
-
"""Nighttime Lights Indicator — VIIRS DNB via EOG direct download.
|
| 2 |
-
|
| 3 |
-
Downloads annual VIIRS DNB composites from Colorado School of Mines EOG,
|
| 4 |
-
compares current-year radiance to a 3-year baseline.
|
| 5 |
-
"""
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
import tempfile
|
| 11 |
-
from datetime import date
|
| 12 |
-
from typing import Any
|
| 13 |
-
|
| 14 |
-
import numpy as np
|
| 15 |
-
import rasterio
|
| 16 |
-
import httpx
|
| 17 |
-
|
| 18 |
-
from app.indicators.base import BaseIndicator, SpatialData
|
| 19 |
-
from app.models import (
|
| 20 |
-
AOI,
|
| 21 |
-
TimeRange,
|
| 22 |
-
IndicatorResult,
|
| 23 |
-
StatusLevel,
|
| 24 |
-
TrendDirection,
|
| 25 |
-
ConfidenceLevel,
|
| 26 |
-
)
|
| 27 |
-
|
| 28 |
-
logger = logging.getLogger(__name__)
|
| 29 |
-
|
| 30 |
-
BASELINE_YEARS = 3
|
| 31 |
-
|
| 32 |
-
# EOG VIIRS DNB annual composites (public, COG format)
|
| 33 |
-
EOG_BASE = "https://eogdata.mines.edu/nighttime_light/annual/v22"
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class NightlightsIndicator(BaseIndicator):
|
| 37 |
-
id = "nightlights"
|
| 38 |
-
name = "Nighttime Lights"
|
| 39 |
-
category = "D3"
|
| 40 |
-
question = "Is the local economy active?"
|
| 41 |
-
estimated_minutes = 10
|
| 42 |
-
|
| 43 |
-
_true_color_path: str | None = None
|
| 44 |
-
|
| 45 |
-
async def process(
|
| 46 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 47 |
-
) -> IndicatorResult:
|
| 48 |
-
try:
|
| 49 |
-
return await self._process_viirs(aoi, time_range)
|
| 50 |
-
except Exception as exc:
|
| 51 |
-
logger.warning("Nightlights download failed, using placeholder: %s", exc)
|
| 52 |
-
return self._fallback(aoi, time_range)
|
| 53 |
-
|
| 54 |
-
async def _process_viirs(
|
| 55 |
-
self, aoi: AOI, time_range: TimeRange
|
| 56 |
-
) -> IndicatorResult:
|
| 57 |
-
results_dir = tempfile.mkdtemp(prefix="aperture_nightlights_")
|
| 58 |
-
|
| 59 |
-
current_year = time_range.end.year
|
| 60 |
-
current_path = os.path.join(results_dir, "viirs_current.tif")
|
| 61 |
-
baseline_path = os.path.join(results_dir, "viirs_baseline.tif")
|
| 62 |
-
|
| 63 |
-
await self._download_viirs(aoi.bbox, current_year, current_path)
|
| 64 |
-
await self._download_viirs(aoi.bbox, current_year - BASELINE_YEARS, baseline_path)
|
| 65 |
-
|
| 66 |
-
current_stats = self._compute_stats(current_path)
|
| 67 |
-
baseline_stats = self._compute_stats(baseline_path)
|
| 68 |
-
|
| 69 |
-
current_rad = current_stats["mean_radiance"]
|
| 70 |
-
baseline_rad = baseline_stats["mean_radiance"]
|
| 71 |
-
pct_change = ((current_rad - baseline_rad) / baseline_rad * 100) if baseline_rad > 0 else 0.0
|
| 72 |
-
|
| 73 |
-
status = self._classify(pct_change)
|
| 74 |
-
trend = self._compute_trend(pct_change)
|
| 75 |
-
confidence = (
|
| 76 |
-
ConfidenceLevel.HIGH if current_stats["valid_pixel_fraction"] >= 0.7
|
| 77 |
-
else ConfidenceLevel.MODERATE if current_stats["valid_pixel_fraction"] >= 0.4
|
| 78 |
-
else ConfidenceLevel.LOW
|
| 79 |
-
)
|
| 80 |
-
|
| 81 |
-
chart_data = {
|
| 82 |
-
"dates": [str(current_year - BASELINE_YEARS), str(current_year)],
|
| 83 |
-
"values": [round(baseline_rad, 2), round(current_rad, 2)],
|
| 84 |
-
"baseline_range_mean": round(baseline_rad, 2),
|
| 85 |
-
"baseline_range_min": round(baseline_rad * 0.85, 2),
|
| 86 |
-
"baseline_range_max": round(baseline_rad * 1.15, 2),
|
| 87 |
-
"label": "Radiance (nW/cm\u00b2/sr)",
|
| 88 |
-
}
|
| 89 |
-
|
| 90 |
-
if abs(pct_change) <= 15:
|
| 91 |
-
headline = f"Nighttime lights stable ({current_rad:.1f} nW, {pct_change:+.0f}%)"
|
| 92 |
-
elif pct_change < 0:
|
| 93 |
-
headline = f"Nighttime lights declining ({pct_change:.0f}%)"
|
| 94 |
-
else:
|
| 95 |
-
headline = f"Nighttime lights increasing (+{pct_change:.0f}%)"
|
| 96 |
-
|
| 97 |
-
self._spatial_data = SpatialData(
|
| 98 |
-
map_type="raster",
|
| 99 |
-
label="Radiance (nW/cm\u00b2/sr)",
|
| 100 |
-
colormap="inferno",
|
| 101 |
-
vmin=0,
|
| 102 |
-
vmax=max(current_rad * 2, 10),
|
| 103 |
-
)
|
| 104 |
-
self._indicator_raster_path = current_path
|
| 105 |
-
self._true_color_path = None
|
| 106 |
-
self._render_band = 1
|
| 107 |
-
|
| 108 |
-
return IndicatorResult(
|
| 109 |
-
indicator_id=self.id,
|
| 110 |
-
headline=headline,
|
| 111 |
-
status=status,
|
| 112 |
-
trend=trend,
|
| 113 |
-
confidence=confidence,
|
| 114 |
-
map_layer_path=current_path,
|
| 115 |
-
chart_data=chart_data,
|
| 116 |
-
data_source="satellite",
|
| 117 |
-
summary=(
|
| 118 |
-
f"Mean radiance is {current_rad:.2f} nW/cm\u00b2/sr compared to "
|
| 119 |
-
f"{baseline_rad:.2f} baseline ({pct_change:+.1f}%). "
|
| 120 |
-
f"VIIRS DNB annual composite at ~500m resolution."
|
| 121 |
-
),
|
| 122 |
-
methodology=(
|
| 123 |
-
f"VIIRS Day-Night Band annual composites from Colorado School of Mines EOG. "
|
| 124 |
-
f"Stray-light corrected, cloud-free composite. "
|
| 125 |
-
f"Clipped to AOI bounding box. "
|
| 126 |
-
f"Baseline: {BASELINE_YEARS}-year prior annual composite."
|
| 127 |
-
),
|
| 128 |
-
limitations=[
|
| 129 |
-
"Annual composites \u2014 cannot detect sub-annual changes.",
|
| 130 |
-
"Moonlight, fires, and gas flaring inflate radiance values.",
|
| 131 |
-
"~500m resolution smooths urban-rural boundaries.",
|
| 132 |
-
"Most recent annual composite may lag by several months.",
|
| 133 |
-
],
|
| 134 |
-
)
|
| 135 |
-
|
| 136 |
-
async def _download_viirs(
|
| 137 |
-
self, bbox: list[float], year: int, output_path: str
|
| 138 |
-
) -> None:
|
| 139 |
-
"""Download VIIRS DNB annual composite and clip to AOI bbox.
|
| 140 |
-
|
| 141 |
-
Uses COG (Cloud-Optimized GeoTIFF) with HTTP range requests
|
| 142 |
-
to read only the AOI window from the full global file.
|
| 143 |
-
"""
|
| 144 |
-
import asyncio
|
| 145 |
-
from rasterio.windows import from_bounds as window_from_bounds
|
| 146 |
-
|
| 147 |
-
loop = asyncio.get_event_loop()
|
| 148 |
-
|
| 149 |
-
def _read_cog():
|
| 150 |
-
# EOG provides global annual composites as COGs
|
| 151 |
-
url = f"{EOG_BASE}/{year}/VNP46A4_t{year}.average_masked.tif"
|
| 152 |
-
|
| 153 |
-
with rasterio.open(url) as src:
|
| 154 |
-
window = window_from_bounds(*bbox, transform=src.transform)
|
| 155 |
-
data = src.read(1, window=window).astype(np.float32)
|
| 156 |
-
win_transform = src.window_transform(window)
|
| 157 |
-
|
| 158 |
-
from rasterio.transform import from_bounds
|
| 159 |
-
h, w = data.shape
|
| 160 |
-
t = from_bounds(*bbox, w, h)
|
| 161 |
-
|
| 162 |
-
with rasterio.open(
|
| 163 |
-
output_path, "w", driver="GTiff",
|
| 164 |
-
height=h, width=w, count=1,
|
| 165 |
-
dtype="float32", crs="EPSG:4326",
|
| 166 |
-
transform=t, nodata=-9999.0,
|
| 167 |
-
) as dst:
|
| 168 |
-
dst.write(data, 1)
|
| 169 |
-
|
| 170 |
-
await loop.run_in_executor(None, _read_cog)
|
| 171 |
-
|
| 172 |
-
@staticmethod
|
| 173 |
-
def _compute_stats(tif_path: str) -> dict[str, Any]:
|
| 174 |
-
"""Extract radiance statistics from VIIRS GeoTIFF."""
|
| 175 |
-
with rasterio.open(tif_path) as src:
|
| 176 |
-
data = src.read(1).astype(np.float32)
|
| 177 |
-
nodata = src.nodata
|
| 178 |
-
if nodata is not None:
|
| 179 |
-
valid = data[data != nodata]
|
| 180 |
-
else:
|
| 181 |
-
valid = data.ravel()
|
| 182 |
-
valid = valid[valid >= 0] # Remove negative radiance
|
| 183 |
-
total_pixels = data.size
|
| 184 |
-
valid_fraction = len(valid) / total_pixels if total_pixels > 0 else 0.0
|
| 185 |
-
|
| 186 |
-
return {
|
| 187 |
-
"mean_radiance": float(np.mean(valid)) if len(valid) > 0 else 0.0,
|
| 188 |
-
"valid_pixel_fraction": valid_fraction,
|
| 189 |
-
}
|
| 190 |
-
|
| 191 |
-
@staticmethod
|
| 192 |
-
def _classify(pct_change: float) -> StatusLevel:
|
| 193 |
-
if pct_change > -15:
|
| 194 |
-
return StatusLevel.GREEN
|
| 195 |
-
if pct_change > -40:
|
| 196 |
-
return StatusLevel.AMBER
|
| 197 |
-
return StatusLevel.RED
|
| 198 |
-
|
| 199 |
-
@staticmethod
|
| 200 |
-
def _compute_trend(pct_change: float) -> TrendDirection:
|
| 201 |
-
if abs(pct_change) <= 15:
|
| 202 |
-
return TrendDirection.STABLE
|
| 203 |
-
if pct_change < 0:
|
| 204 |
-
return TrendDirection.DETERIORATING
|
| 205 |
-
return TrendDirection.IMPROVING
|
| 206 |
-
|
| 207 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 208 |
-
rng = np.random.default_rng(3)
|
| 209 |
-
baseline = float(rng.uniform(2, 10))
|
| 210 |
-
current = baseline * float(rng.uniform(0.7, 1.1))
|
| 211 |
-
pct = ((current - baseline) / baseline) * 100
|
| 212 |
-
|
| 213 |
-
return IndicatorResult(
|
| 214 |
-
indicator_id=self.id,
|
| 215 |
-
headline=f"Nightlights data degraded ({current:.1f} nW)",
|
| 216 |
-
status=self._classify(pct),
|
| 217 |
-
trend=self._compute_trend(pct),
|
| 218 |
-
confidence=ConfidenceLevel.LOW,
|
| 219 |
-
map_layer_path="",
|
| 220 |
-
chart_data={
|
| 221 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 222 |
-
"values": [round(baseline, 2), round(current, 2)],
|
| 223 |
-
"label": "Radiance (nW/cm\u00b2/sr)",
|
| 224 |
-
},
|
| 225 |
-
data_source="placeholder",
|
| 226 |
-
summary="VIIRS download unavailable. Showing placeholder values.",
|
| 227 |
-
methodology="Placeholder \u2014 no satellite data processed.",
|
| 228 |
-
limitations=["Data is synthetic. VIIRS data was unreachable."],
|
| 229 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,256 +0,0 @@
|
|
| 1 |
-
from __future__ import annotations
|
| 2 |
-
|
| 3 |
-
from collections import defaultdict
|
| 4 |
-
from datetime import date
|
| 5 |
-
from typing import Any
|
| 6 |
-
|
| 7 |
-
import numpy as np
|
| 8 |
-
|
| 9 |
-
import httpx
|
| 10 |
-
|
| 11 |
-
from app.indicators.base import BaseIndicator
|
| 12 |
-
from app.models import (
|
| 13 |
-
AOI,
|
| 14 |
-
TimeRange,
|
| 15 |
-
IndicatorResult,
|
| 16 |
-
StatusLevel,
|
| 17 |
-
TrendDirection,
|
| 18 |
-
ConfidenceLevel,
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
BASELINE_YEARS = 3
|
| 22 |
-
AIR_QUALITY_API = "https://air-quality-api.open-meteo.com/v1/air-quality"
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class NO2Indicator(BaseIndicator):
|
| 26 |
-
id = "no2"
|
| 27 |
-
name = "Air Quality NO2"
|
| 28 |
-
category = "D7"
|
| 29 |
-
question = "Signs of industrial activity or destruction?"
|
| 30 |
-
estimated_minutes = 5
|
| 31 |
-
|
| 32 |
-
async def process(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> IndicatorResult:
|
| 33 |
-
current_no2, baseline_mean, baseline_std = await self._fetch_no2(aoi, time_range)
|
| 34 |
-
|
| 35 |
-
if baseline_std > 0:
|
| 36 |
-
z_score = (current_no2 - baseline_mean) / baseline_std
|
| 37 |
-
else:
|
| 38 |
-
z_score = 0.0
|
| 39 |
-
|
| 40 |
-
status = self._classify(abs(z_score))
|
| 41 |
-
trend = self._compute_trend(z_score)
|
| 42 |
-
confidence = ConfidenceLevel.MODERATE
|
| 43 |
-
|
| 44 |
-
if season_months and hasattr(self, '_current_monthly_means') and hasattr(self, '_baseline_per_year_monthly'):
|
| 45 |
-
chart_data = self._build_monthly_chart_data(
|
| 46 |
-
self._current_monthly_means,
|
| 47 |
-
self._baseline_per_year_monthly,
|
| 48 |
-
time_range,
|
| 49 |
-
season_months,
|
| 50 |
-
)
|
| 51 |
-
else:
|
| 52 |
-
chart_data = self._build_chart_data(
|
| 53 |
-
current_no2, baseline_mean, baseline_std, time_range,
|
| 54 |
-
getattr(self, '_baseline_yearly_means', None),
|
| 55 |
-
)
|
| 56 |
-
|
| 57 |
-
direction = "above" if z_score >= 0 else "below"
|
| 58 |
-
abs_z = abs(z_score)
|
| 59 |
-
|
| 60 |
-
if abs_z < 1:
|
| 61 |
-
headline = f"NO2 concentration normal — {abs_z:.1f} SD {direction} baseline"
|
| 62 |
-
elif abs_z < 2:
|
| 63 |
-
headline = f"Elevated NO2 — {abs_z:.1f} SD {direction} baseline"
|
| 64 |
-
else:
|
| 65 |
-
headline = f"Anomalous NO2 levels — {abs_z:.1f} SD {direction} baseline"
|
| 66 |
-
|
| 67 |
-
return IndicatorResult(
|
| 68 |
-
indicator_id=self.id,
|
| 69 |
-
headline=headline,
|
| 70 |
-
status=status,
|
| 71 |
-
trend=trend,
|
| 72 |
-
confidence=confidence,
|
| 73 |
-
map_layer_path="",
|
| 74 |
-
chart_data=chart_data,
|
| 75 |
-
data_source="placeholder" if getattr(self, '_is_placeholder', False) else "satellite",
|
| 76 |
-
summary=(
|
| 77 |
-
f"Mean NO2 concentration is {current_no2:.1f} µg/m³. "
|
| 78 |
-
f"Baseline mean: {baseline_mean:.1f} µg/m³, std: {baseline_std:.1f} µg/m³. "
|
| 79 |
-
f"Anomaly: {z_score:.2f} SD. "
|
| 80 |
-
f"Status: {status.value}. Trend: {trend.value}."
|
| 81 |
-
),
|
| 82 |
-
methodology=(
|
| 83 |
-
"Hourly NO2 concentrations from the CAMS global atmospheric composition "
|
| 84 |
-
"model are retrieved via the Open-Meteo Air Quality API. Daily means are "
|
| 85 |
-
f"computed and compared to a {BASELINE_YEARS}-year climatological distribution "
|
| 86 |
-
"using z-score anomaly detection. AOI centroid is used as the query point."
|
| 87 |
-
),
|
| 88 |
-
limitations=[
|
| 89 |
-
"Uses CAMS reanalysis at ~10 km resolution — local hotspots may be smoothed.",
|
| 90 |
-
"Model-derived, not direct satellite retrieval — depends on emissions inventory accuracy.",
|
| 91 |
-
"AOI centroid is used; large AOIs may miss spatial variability.",
|
| 92 |
-
"Seasonal biomass burning can be misinterpreted as industrial activity.",
|
| 93 |
-
],
|
| 94 |
-
)
|
| 95 |
-
|
| 96 |
-
async def _fetch_no2(
|
| 97 |
-
self, aoi: AOI, time_range: TimeRange
|
| 98 |
-
) -> tuple[float, float, float]:
|
| 99 |
-
"""Returns (current_no2_mean, baseline_mean, baseline_std) in µg/m³."""
|
| 100 |
-
self._is_placeholder = False
|
| 101 |
-
try:
|
| 102 |
-
return await self._api_query(aoi, time_range)
|
| 103 |
-
except Exception as exc:
|
| 104 |
-
import logging
|
| 105 |
-
logging.getLogger(__name__).warning(
|
| 106 |
-
"NO2 API query failed, using placeholder: %s", exc
|
| 107 |
-
)
|
| 108 |
-
self._is_placeholder = True
|
| 109 |
-
return self._synthetic_no2()
|
| 110 |
-
|
| 111 |
-
async def _api_query(
|
| 112 |
-
self, aoi: AOI, time_range: TimeRange
|
| 113 |
-
) -> tuple[float, float, float]:
|
| 114 |
-
lat = (aoi.bbox[1] + aoi.bbox[3]) / 2
|
| 115 |
-
lon = (aoi.bbox[0] + aoi.bbox[2]) / 2
|
| 116 |
-
current_year = time_range.end.year
|
| 117 |
-
baseline_start = current_year - BASELINE_YEARS
|
| 118 |
-
|
| 119 |
-
async with httpx.AsyncClient(timeout=30) as client:
|
| 120 |
-
# Current period
|
| 121 |
-
current_resp = await client.get(AIR_QUALITY_API, params={
|
| 122 |
-
"latitude": lat,
|
| 123 |
-
"longitude": lon,
|
| 124 |
-
"hourly": "nitrogen_dioxide",
|
| 125 |
-
"start_date": time_range.start.isoformat(),
|
| 126 |
-
"end_date": time_range.end.isoformat(),
|
| 127 |
-
"timezone": "auto",
|
| 128 |
-
})
|
| 129 |
-
current_resp.raise_for_status()
|
| 130 |
-
current_hourly = current_resp.json()["hourly"]
|
| 131 |
-
current_vals = [v for v in current_hourly["nitrogen_dioxide"] if v is not None]
|
| 132 |
-
|
| 133 |
-
# Aggregate hourly to monthly means for current period
|
| 134 |
-
current_monthly_means: dict[int, float] = {}
|
| 135 |
-
monthly_vals: dict[int, list[float]] = defaultdict(list)
|
| 136 |
-
for t, v in zip(current_hourly["time"], current_hourly["nitrogen_dioxide"]):
|
| 137 |
-
if v is not None:
|
| 138 |
-
monthly_vals[int(t[5:7])].append(v)
|
| 139 |
-
for m, vals in monthly_vals.items():
|
| 140 |
-
current_monthly_means[m] = float(np.mean(vals))
|
| 141 |
-
self._current_monthly_means = current_monthly_means
|
| 142 |
-
|
| 143 |
-
# Baseline: query each year
|
| 144 |
-
baseline_yearly_means: list[float] = []
|
| 145 |
-
baseline_per_year_monthly: dict[int, list[float]] = defaultdict(list)
|
| 146 |
-
for yr in range(baseline_start, current_year):
|
| 147 |
-
resp = await client.get(AIR_QUALITY_API, params={
|
| 148 |
-
"latitude": lat,
|
| 149 |
-
"longitude": lon,
|
| 150 |
-
"hourly": "nitrogen_dioxide",
|
| 151 |
-
"start_date": date(yr, 1, 1).isoformat(),
|
| 152 |
-
"end_date": date(yr, 12, 31).isoformat(),
|
| 153 |
-
"timezone": "auto",
|
| 154 |
-
})
|
| 155 |
-
resp.raise_for_status()
|
| 156 |
-
yr_hourly = resp.json()["hourly"]
|
| 157 |
-
yr_monthly: dict[int, list[float]] = defaultdict(list)
|
| 158 |
-
for t, v in zip(yr_hourly["time"], yr_hourly["nitrogen_dioxide"]):
|
| 159 |
-
if v is not None:
|
| 160 |
-
yr_monthly[int(t[5:7])].append(v)
|
| 161 |
-
yr_means = []
|
| 162 |
-
for m, vals in yr_monthly.items():
|
| 163 |
-
monthly_mean = float(np.mean(vals))
|
| 164 |
-
yr_means.append(monthly_mean)
|
| 165 |
-
baseline_per_year_monthly[m].append(monthly_mean)
|
| 166 |
-
if yr_means:
|
| 167 |
-
baseline_yearly_means.append(float(np.mean(yr_means)))
|
| 168 |
-
|
| 169 |
-
self._baseline_per_year_monthly = dict(baseline_per_year_monthly)
|
| 170 |
-
|
| 171 |
-
if not current_vals or not baseline_yearly_means:
|
| 172 |
-
self._is_placeholder = True
|
| 173 |
-
return self._synthetic_no2()
|
| 174 |
-
|
| 175 |
-
self._baseline_yearly_means = baseline_yearly_means
|
| 176 |
-
return (
|
| 177 |
-
float(np.mean(current_vals)),
|
| 178 |
-
float(np.mean(baseline_yearly_means)),
|
| 179 |
-
float(np.std(baseline_yearly_means)) or 1e-3,
|
| 180 |
-
)
|
| 181 |
-
|
| 182 |
-
@staticmethod
|
| 183 |
-
def _synthetic_no2() -> tuple[float, float, float]:
|
| 184 |
-
"""Plausible NO2 values for offline/test environments (µg/m³)."""
|
| 185 |
-
baseline_mean = 15.0
|
| 186 |
-
baseline_std = 4.0
|
| 187 |
-
current_no2 = baseline_mean * 1.1
|
| 188 |
-
return current_no2, baseline_mean, baseline_std
|
| 189 |
-
|
| 190 |
-
@staticmethod
|
| 191 |
-
def _classify(abs_z: float) -> StatusLevel:
|
| 192 |
-
if abs_z < 1.0:
|
| 193 |
-
return StatusLevel.GREEN
|
| 194 |
-
if abs_z < 2.0:
|
| 195 |
-
return StatusLevel.AMBER
|
| 196 |
-
return StatusLevel.RED
|
| 197 |
-
|
| 198 |
-
@staticmethod
|
| 199 |
-
def _compute_trend(z_score: float) -> TrendDirection:
|
| 200 |
-
if z_score > 1.0:
|
| 201 |
-
return TrendDirection.DETERIORATING
|
| 202 |
-
if z_score < -1.0:
|
| 203 |
-
return TrendDirection.IMPROVING
|
| 204 |
-
return TrendDirection.STABLE
|
| 205 |
-
|
| 206 |
-
@staticmethod
|
| 207 |
-
def _build_monthly_chart_data(
|
| 208 |
-
current_monthly: dict[int, float],
|
| 209 |
-
baseline_per_year_monthly: dict[int, list[float]],
|
| 210 |
-
time_range: TimeRange,
|
| 211 |
-
season_months: list[int],
|
| 212 |
-
) -> dict[str, Any]:
|
| 213 |
-
year = time_range.end.year
|
| 214 |
-
dates, values, b_mean, b_min, b_max = [], [], [], [], []
|
| 215 |
-
for m in season_months:
|
| 216 |
-
dates.append(f"{year}-{m:02d}")
|
| 217 |
-
values.append(round(current_monthly.get(m, 0.0), 1))
|
| 218 |
-
yr_means = baseline_per_year_monthly.get(m, [])
|
| 219 |
-
if yr_means:
|
| 220 |
-
b_mean.append(round(float(np.mean(yr_means)), 1))
|
| 221 |
-
b_min.append(round(float(min(yr_means)), 1))
|
| 222 |
-
b_max.append(round(float(max(yr_means)), 1))
|
| 223 |
-
else:
|
| 224 |
-
b_mean.append(0.0)
|
| 225 |
-
b_min.append(0.0)
|
| 226 |
-
b_max.append(0.0)
|
| 227 |
-
result: dict[str, Any] = {
|
| 228 |
-
"dates": dates,
|
| 229 |
-
"values": values,
|
| 230 |
-
"label": "NO2 concentration (µg/m³)",
|
| 231 |
-
}
|
| 232 |
-
if any(v > 0 for v in b_mean):
|
| 233 |
-
result["baseline_mean"] = b_mean
|
| 234 |
-
result["baseline_min"] = b_min
|
| 235 |
-
result["baseline_max"] = b_max
|
| 236 |
-
return result
|
| 237 |
-
|
| 238 |
-
@staticmethod
|
| 239 |
-
def _build_chart_data(
|
| 240 |
-
current: float,
|
| 241 |
-
baseline_mean: float,
|
| 242 |
-
baseline_std: float,
|
| 243 |
-
time_range: TimeRange,
|
| 244 |
-
baseline_yearly_means: list[float] | None = None,
|
| 245 |
-
) -> dict[str, Any]:
|
| 246 |
-
result: dict[str, Any] = {
|
| 247 |
-
"dates": ["baseline", str(time_range.end.year)],
|
| 248 |
-
"values": [round(baseline_mean, 1), round(current, 1)],
|
| 249 |
-
"baseline_std": round(baseline_std, 1),
|
| 250 |
-
"label": "NO2 concentration (µg/m³)",
|
| 251 |
-
}
|
| 252 |
-
if baseline_yearly_means and len(baseline_yearly_means) >= 2:
|
| 253 |
-
result["baseline_range_mean"] = round(float(np.mean(baseline_yearly_means)), 1)
|
| 254 |
-
result["baseline_range_min"] = round(float(min(baseline_yearly_means)), 1)
|
| 255 |
-
result["baseline_range_max"] = round(float(max(baseline_yearly_means)), 1)
|
| 256 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,304 +0,0 @@
|
|
| 1 |
-
"""Rainfall Indicator — CHIRPS v2.0 precipitation via direct download.
|
| 2 |
-
|
| 3 |
-
Downloads monthly CHIRPS GeoTIFFs, computes SPI-like deviation from
|
| 4 |
-
a 5-year climatological baseline, and classifies drought severity.
|
| 5 |
-
"""
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
import tempfile
|
| 11 |
-
from datetime import date
|
| 12 |
-
from typing import Any
|
| 13 |
-
|
| 14 |
-
import numpy as np
|
| 15 |
-
import rasterio
|
| 16 |
-
import httpx
|
| 17 |
-
|
| 18 |
-
from app.indicators.base import BaseIndicator, SpatialData
|
| 19 |
-
from app.models import (
|
| 20 |
-
AOI,
|
| 21 |
-
TimeRange,
|
| 22 |
-
IndicatorResult,
|
| 23 |
-
StatusLevel,
|
| 24 |
-
TrendDirection,
|
| 25 |
-
ConfidenceLevel,
|
| 26 |
-
)
|
| 27 |
-
|
| 28 |
-
logger = logging.getLogger(__name__)
|
| 29 |
-
|
| 30 |
-
BASELINE_YEARS = 5
|
| 31 |
-
|
| 32 |
-
# CHIRPS monthly data URL pattern (public, no auth needed)
|
| 33 |
-
# Format: https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_monthly/tifs/chirps-v2.0.YYYY.MM.tif.gz
|
| 34 |
-
CHIRPS_BASE = "https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_monthly/tifs"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
class RainfallIndicator(BaseIndicator):
|
| 38 |
-
id = "rainfall"
|
| 39 |
-
name = "Rainfall Adequacy"
|
| 40 |
-
category = "D5"
|
| 41 |
-
question = "Is this area getting enough rain?"
|
| 42 |
-
estimated_minutes = 10
|
| 43 |
-
|
| 44 |
-
_true_color_path: str | None = None
|
| 45 |
-
|
| 46 |
-
async def process(
|
| 47 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 48 |
-
) -> IndicatorResult:
|
| 49 |
-
try:
|
| 50 |
-
return await self._process_chirps(aoi, time_range, season_months)
|
| 51 |
-
except Exception as exc:
|
| 52 |
-
logger.warning("Rainfall CHIRPS processing failed, using placeholder: %s", exc)
|
| 53 |
-
return self._fallback(aoi, time_range)
|
| 54 |
-
|
| 55 |
-
async def _process_chirps(
|
| 56 |
-
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
| 57 |
-
) -> IndicatorResult:
|
| 58 |
-
results_dir = tempfile.mkdtemp(prefix="aperture_rainfall_")
|
| 59 |
-
|
| 60 |
-
current_path = os.path.join(results_dir, "precip_current.tif")
|
| 61 |
-
baseline_path = os.path.join(results_dir, "precip_baseline.tif")
|
| 62 |
-
|
| 63 |
-
await self._download_chirps(
|
| 64 |
-
aoi.bbox, time_range.start, time_range.end, current_path,
|
| 65 |
-
)
|
| 66 |
-
baseline_start = date(
|
| 67 |
-
time_range.start.year - BASELINE_YEARS,
|
| 68 |
-
time_range.start.month,
|
| 69 |
-
time_range.start.day,
|
| 70 |
-
)
|
| 71 |
-
await self._download_chirps(
|
| 72 |
-
aoi.bbox, baseline_start, time_range.start, baseline_path,
|
| 73 |
-
)
|
| 74 |
-
|
| 75 |
-
current_stats = self._compute_stats(current_path)
|
| 76 |
-
baseline_stats = self._compute_stats(baseline_path)
|
| 77 |
-
|
| 78 |
-
current_total = current_stats["total_mm"]
|
| 79 |
-
baseline_total = baseline_stats["total_mm"]
|
| 80 |
-
deviation_pct = (
|
| 81 |
-
((current_total - baseline_total) / baseline_total * 100.0)
|
| 82 |
-
if baseline_total > 0 else 0.0
|
| 83 |
-
)
|
| 84 |
-
|
| 85 |
-
status = self._classify(deviation_pct)
|
| 86 |
-
trend = self._compute_trend(deviation_pct)
|
| 87 |
-
confidence = (
|
| 88 |
-
ConfidenceLevel.HIGH if current_stats["valid_months"] >= 6
|
| 89 |
-
else ConfidenceLevel.MODERATE if current_stats["valid_months"] >= 3
|
| 90 |
-
else ConfidenceLevel.LOW
|
| 91 |
-
)
|
| 92 |
-
|
| 93 |
-
chart_data = self._build_chart_data(
|
| 94 |
-
current_stats["monthly_means_mm"],
|
| 95 |
-
baseline_stats["monthly_means_mm"],
|
| 96 |
-
time_range,
|
| 97 |
-
)
|
| 98 |
-
|
| 99 |
-
if abs(deviation_pct) <= 15:
|
| 100 |
-
headline = f"Rainfall near normal ({current_total:.0f}mm, {deviation_pct:+.0f}%)"
|
| 101 |
-
elif deviation_pct < 0:
|
| 102 |
-
headline = f"Rainfall deficit ({deviation_pct:.0f}% below baseline)"
|
| 103 |
-
else:
|
| 104 |
-
headline = f"Above-normal rainfall ({deviation_pct:+.0f}% above baseline)"
|
| 105 |
-
|
| 106 |
-
self._spatial_data = SpatialData(
|
| 107 |
-
map_type="raster",
|
| 108 |
-
label="Precipitation (mm)",
|
| 109 |
-
colormap="YlGnBu",
|
| 110 |
-
vmin=0,
|
| 111 |
-
vmax=max(current_stats["monthly_means_mm"]) * 1.5 if current_stats["monthly_means_mm"] else 100,
|
| 112 |
-
)
|
| 113 |
-
self._indicator_raster_path = current_path
|
| 114 |
-
self._true_color_path = None # No true-color for rainfall (different resolution)
|
| 115 |
-
self._render_band = current_stats.get("wettest_band", 1)
|
| 116 |
-
|
| 117 |
-
return IndicatorResult(
|
| 118 |
-
indicator_id=self.id,
|
| 119 |
-
headline=headline,
|
| 120 |
-
status=status,
|
| 121 |
-
trend=trend,
|
| 122 |
-
confidence=confidence,
|
| 123 |
-
map_layer_path=current_path,
|
| 124 |
-
chart_data=chart_data,
|
| 125 |
-
data_source="satellite",
|
| 126 |
-
summary=(
|
| 127 |
-
f"Total precipitation is {current_total:.0f}mm compared to "
|
| 128 |
-
f"{baseline_total:.0f}mm baseline ({deviation_pct:+.1f}%). "
|
| 129 |
-
f"CHIRPS v2.0 at ~5km resolution."
|
| 130 |
-
),
|
| 131 |
-
methodology=(
|
| 132 |
-
f"CHIRPS v2.0 monthly precipitation estimates (0.05\u00b0 resolution). "
|
| 133 |
-
f"Clipped to AOI bounding box. "
|
| 134 |
-
f"Baseline: {BASELINE_YEARS}-year monthly climatology. "
|
| 135 |
-
f"Deviation from baseline classified as drought severity."
|
| 136 |
-
),
|
| 137 |
-
limitations=[
|
| 138 |
-
"CHIRPS resolution is ~5km \u2014 local rainfall variability not captured.",
|
| 139 |
-
"Satellite-gauge blend may underestimate in data-sparse regions.",
|
| 140 |
-
"Orographic effects poorly represented at this resolution.",
|
| 141 |
-
"No distinction between effective and non-effective rainfall.",
|
| 142 |
-
],
|
| 143 |
-
)
|
| 144 |
-
|
| 145 |
-
async def _download_chirps(
|
| 146 |
-
self,
|
| 147 |
-
bbox: list[float],
|
| 148 |
-
start: date,
|
| 149 |
-
end: date,
|
| 150 |
-
output_path: str,
|
| 151 |
-
) -> None:
|
| 152 |
-
"""Download CHIRPS monthly data and create a multi-band GeoTIFF clipped to AOI.
|
| 153 |
-
|
| 154 |
-
Each band is one month of precipitation data (mm).
|
| 155 |
-
"""
|
| 156 |
-
from rasterio.windows import from_bounds as window_from_bounds
|
| 157 |
-
|
| 158 |
-
monthly_data = []
|
| 159 |
-
|
| 160 |
-
async def _fetch_month(year: int, month: int) -> np.ndarray | None:
|
| 161 |
-
url = f"{CHIRPS_BASE}/chirps-v2.0.{year}.{month:02d}.tif.gz"
|
| 162 |
-
try:
|
| 163 |
-
async with httpx.AsyncClient(timeout=60) as client:
|
| 164 |
-
resp = await client.get(url)
|
| 165 |
-
if resp.status_code != 200:
|
| 166 |
-
logger.warning("CHIRPS download failed for %d-%02d: %d", year, month, resp.status_code)
|
| 167 |
-
return None
|
| 168 |
-
|
| 169 |
-
# Decompress and read the subset
|
| 170 |
-
import gzip
|
| 171 |
-
import io
|
| 172 |
-
decompressed = gzip.decompress(resp.content)
|
| 173 |
-
with rasterio.open(io.BytesIO(decompressed)) as src:
|
| 174 |
-
window = window_from_bounds(*bbox, transform=src.transform)
|
| 175 |
-
data = src.read(1, window=window).astype(np.float32)
|
| 176 |
-
return data
|
| 177 |
-
except Exception as exc:
|
| 178 |
-
logger.warning("CHIRPS fetch error for %d-%02d: %s", year, month, exc)
|
| 179 |
-
return None
|
| 180 |
-
|
| 181 |
-
# Collect monthly data
|
| 182 |
-
current = date(start.year, start.month, 1)
|
| 183 |
-
while current < end:
|
| 184 |
-
data = await _fetch_month(current.year, current.month)
|
| 185 |
-
if data is not None:
|
| 186 |
-
monthly_data.append(data)
|
| 187 |
-
if current.month == 12:
|
| 188 |
-
current = date(current.year + 1, 1, 1)
|
| 189 |
-
else:
|
| 190 |
-
current = date(current.year, current.month + 1, 1)
|
| 191 |
-
|
| 192 |
-
if not monthly_data:
|
| 193 |
-
raise ValueError("No CHIRPS data available for the requested period")
|
| 194 |
-
|
| 195 |
-
# Write as multi-band GeoTIFF
|
| 196 |
-
h, w = monthly_data[0].shape
|
| 197 |
-
from rasterio.transform import from_bounds as transform_from_bounds
|
| 198 |
-
t = transform_from_bounds(*bbox, w, h)
|
| 199 |
-
|
| 200 |
-
with rasterio.open(
|
| 201 |
-
output_path, "w", driver="GTiff",
|
| 202 |
-
height=h, width=w, count=len(monthly_data),
|
| 203 |
-
dtype="float32", crs="EPSG:4326",
|
| 204 |
-
transform=t, nodata=-9999.0,
|
| 205 |
-
) as dst:
|
| 206 |
-
for i, data in enumerate(monthly_data):
|
| 207 |
-
dst.write(data, i + 1)
|
| 208 |
-
|
| 209 |
-
@staticmethod
|
| 210 |
-
def _compute_stats(tif_path: str) -> dict[str, Any]:
|
| 211 |
-
"""Extract monthly precipitation statistics from a multi-band GeoTIFF."""
|
| 212 |
-
with rasterio.open(tif_path) as src:
|
| 213 |
-
n_bands = src.count
|
| 214 |
-
monthly_means = []
|
| 215 |
-
peak_val = -1.0
|
| 216 |
-
peak_band = 1
|
| 217 |
-
for band in range(1, n_bands + 1):
|
| 218 |
-
data = src.read(band).astype(np.float32)
|
| 219 |
-
nodata = src.nodata
|
| 220 |
-
if nodata is not None:
|
| 221 |
-
valid = data[data != nodata]
|
| 222 |
-
else:
|
| 223 |
-
valid = data.ravel()
|
| 224 |
-
if len(valid) > 0:
|
| 225 |
-
mean = float(np.nanmean(valid))
|
| 226 |
-
monthly_means.append(mean)
|
| 227 |
-
if mean > peak_val:
|
| 228 |
-
peak_val = mean
|
| 229 |
-
peak_band = band
|
| 230 |
-
else:
|
| 231 |
-
monthly_means.append(0.0)
|
| 232 |
-
|
| 233 |
-
valid_months = sum(1 for m in monthly_means if m > 0)
|
| 234 |
-
total = float(np.sum(monthly_means))
|
| 235 |
-
|
| 236 |
-
return {
|
| 237 |
-
"monthly_means_mm": monthly_means,
|
| 238 |
-
"total_mm": total,
|
| 239 |
-
"valid_months": valid_months,
|
| 240 |
-
"wettest_band": peak_band,
|
| 241 |
-
}
|
| 242 |
-
|
| 243 |
-
@staticmethod
|
| 244 |
-
def _classify(deviation_pct: float) -> StatusLevel:
|
| 245 |
-
if deviation_pct >= -15:
|
| 246 |
-
return StatusLevel.GREEN
|
| 247 |
-
if deviation_pct >= -30:
|
| 248 |
-
return StatusLevel.AMBER
|
| 249 |
-
return StatusLevel.RED
|
| 250 |
-
|
| 251 |
-
@staticmethod
|
| 252 |
-
def _compute_trend(deviation_pct: float) -> TrendDirection:
|
| 253 |
-
if abs(deviation_pct) <= 15:
|
| 254 |
-
return TrendDirection.STABLE
|
| 255 |
-
if deviation_pct < 0:
|
| 256 |
-
return TrendDirection.DETERIORATING
|
| 257 |
-
return TrendDirection.IMPROVING
|
| 258 |
-
|
| 259 |
-
@staticmethod
|
| 260 |
-
def _build_chart_data(
|
| 261 |
-
current_monthly: list[float],
|
| 262 |
-
baseline_monthly: list[float],
|
| 263 |
-
time_range: TimeRange,
|
| 264 |
-
) -> dict[str, Any]:
|
| 265 |
-
year = time_range.end.year
|
| 266 |
-
n = min(len(current_monthly), len(baseline_monthly))
|
| 267 |
-
dates = [f"{year}-{m + 1:02d}" for m in range(n)]
|
| 268 |
-
values = [round(v, 1) for v in current_monthly[:n]]
|
| 269 |
-
b_mean = [round(v, 1) for v in baseline_monthly[:n]]
|
| 270 |
-
b_min = [round(max(v - 15, 0), 1) for v in baseline_monthly[:n]]
|
| 271 |
-
b_max = [round(v + 15, 1) for v in baseline_monthly[:n]]
|
| 272 |
-
|
| 273 |
-
return {
|
| 274 |
-
"dates": dates,
|
| 275 |
-
"values": values,
|
| 276 |
-
"baseline_mean": b_mean,
|
| 277 |
-
"baseline_min": b_min,
|
| 278 |
-
"baseline_max": b_max,
|
| 279 |
-
"label": "Precipitation (mm)",
|
| 280 |
-
}
|
| 281 |
-
|
| 282 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 283 |
-
rng = np.random.default_rng(5)
|
| 284 |
-
baseline = float(rng.uniform(300, 600))
|
| 285 |
-
current = baseline * float(rng.uniform(0.7, 1.1))
|
| 286 |
-
deviation = ((current - baseline) / baseline) * 100
|
| 287 |
-
|
| 288 |
-
return IndicatorResult(
|
| 289 |
-
indicator_id=self.id,
|
| 290 |
-
headline=f"Rainfall data degraded ({current:.0f}mm)",
|
| 291 |
-
status=self._classify(deviation),
|
| 292 |
-
trend=self._compute_trend(deviation),
|
| 293 |
-
confidence=ConfidenceLevel.LOW,
|
| 294 |
-
map_layer_path="",
|
| 295 |
-
chart_data={
|
| 296 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 297 |
-
"values": [round(baseline, 0), round(current, 0)],
|
| 298 |
-
"label": "Precipitation (mm)",
|
| 299 |
-
},
|
| 300 |
-
data_source="placeholder",
|
| 301 |
-
summary="CHIRPS download unavailable. Showing placeholder values.",
|
| 302 |
-
methodology="Placeholder \u2014 no satellite data processed.",
|
| 303 |
-
limitations=["Data is synthetic. CHIRPS data was unreachable."],
|
| 304 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -97,8 +97,7 @@ class SarIndicator(BaseIndicator):
|
|
| 97 |
paths = current_job.download_results(current_dir)
|
| 98 |
current_path = self._find_tif(paths, current_dir)
|
| 99 |
except Exception as exc:
|
| 100 |
-
|
| 101 |
-
return self._fallback(aoi, time_range)
|
| 102 |
|
| 103 |
# Download baseline — optional (degrades gracefully)
|
| 104 |
baseline_path = None
|
|
@@ -249,11 +248,7 @@ class SarIndicator(BaseIndicator):
|
|
| 249 |
async def process(
|
| 250 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 251 |
) -> IndicatorResult:
|
| 252 |
-
|
| 253 |
-
return await self._process_openeo(aoi, time_range, season_months)
|
| 254 |
-
except Exception as exc:
|
| 255 |
-
logger.warning("SAR openEO processing failed: %s", exc)
|
| 256 |
-
return self._fallback(aoi, time_range)
|
| 257 |
|
| 258 |
async def _process_openeo(
|
| 259 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
@@ -540,26 +535,11 @@ class SarIndicator(BaseIndicator):
|
|
| 540 |
with rasterio.open(output_path, "w", **profile) as dst:
|
| 541 |
dst.write(change, 1)
|
| 542 |
|
| 543 |
-
def _insufficient_data(self, aoi: AOI, time_range: TimeRange)
|
| 544 |
-
"""
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
trend=TrendDirection.STABLE,
|
| 550 |
-
confidence=ConfidenceLevel.LOW,
|
| 551 |
-
map_layer_path="",
|
| 552 |
-
chart_data={"dates": [], "values": [], "label": "VV Backscatter (dB)"},
|
| 553 |
-
data_source="placeholder",
|
| 554 |
-
summary=(
|
| 555 |
-
"No Sentinel-1 GRD scenes were available for the requested "
|
| 556 |
-
"area and time period. SAR coverage over parts of East Africa "
|
| 557 |
-
"is inconsistent."
|
| 558 |
-
),
|
| 559 |
-
methodology="Sentinel-1 GRD — no data available for processing.",
|
| 560 |
-
limitations=["No SAR data available. This indicator was skipped."],
|
| 561 |
)
|
| 562 |
|
| 563 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 564 |
-
return self._insufficient_data(aoi, time_range)
|
| 565 |
-
|
|
|
|
| 97 |
paths = current_job.download_results(current_dir)
|
| 98 |
current_path = self._find_tif(paths, current_dir)
|
| 99 |
except Exception as exc:
|
| 100 |
+
raise RuntimeError(f"SAR current period data unavailable: {exc}") from exc
|
|
|
|
| 101 |
|
| 102 |
# Download baseline — optional (degrades gracefully)
|
| 103 |
baseline_path = None
|
|
|
|
| 248 |
async def process(
|
| 249 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 250 |
) -> IndicatorResult:
|
| 251 |
+
return await self._process_openeo(aoi, time_range, season_months)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
|
| 253 |
async def _process_openeo(
|
| 254 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
|
|
| 535 |
with rasterio.open(output_path, "w", **profile) as dst:
|
| 536 |
dst.write(change, 1)
|
| 537 |
|
| 538 |
+
def _insufficient_data(self, aoi: AOI, time_range: TimeRange):
|
| 539 |
+
"""Raise when no Sentinel-1 data is available."""
|
| 540 |
+
raise RuntimeError(
|
| 541 |
+
"No Sentinel-1 GRD scenes available for the requested "
|
| 542 |
+
"area and time period. SAR coverage over parts of East Africa "
|
| 543 |
+
"is inconsistent."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 544 |
)
|
| 545 |
|
|
|
|
|
|
|
|
|
|
@@ -1,324 +0,0 @@
|
|
| 1 |
-
from __future__ import annotations
|
| 2 |
-
|
| 3 |
-
from collections import defaultdict
|
| 4 |
-
from datetime import date
|
| 5 |
-
from typing import Any
|
| 6 |
-
|
| 7 |
-
import numpy as np
|
| 8 |
-
|
| 9 |
-
from app.indicators.base import BaseIndicator
|
| 10 |
-
from app.models import (
|
| 11 |
-
AOI,
|
| 12 |
-
TimeRange,
|
| 13 |
-
IndicatorResult,
|
| 14 |
-
StatusLevel,
|
| 15 |
-
TrendDirection,
|
| 16 |
-
ConfidenceLevel,
|
| 17 |
-
)
|
| 18 |
-
|
| 19 |
-
BASELINE_YEARS = 5
|
| 20 |
-
MAX_ITEMS = 100
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
class VegetationIndicator(BaseIndicator):
|
| 24 |
-
id = "vegetation"
|
| 25 |
-
name = "Vegetation & Forest Cover"
|
| 26 |
-
category = "D2"
|
| 27 |
-
question = "Is vegetation cover declining?"
|
| 28 |
-
estimated_minutes = 5
|
| 29 |
-
|
| 30 |
-
async def process(self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None) -> IndicatorResult:
|
| 31 |
-
baseline_mean, current_mean, n_months = await self._fetch_comparison(aoi, time_range)
|
| 32 |
-
|
| 33 |
-
change_pp = current_mean - baseline_mean # positive = greening
|
| 34 |
-
abs_change = abs(change_pp)
|
| 35 |
-
|
| 36 |
-
status = self._classify(change_pp)
|
| 37 |
-
trend = self._compute_trend(change_pp)
|
| 38 |
-
confidence = (
|
| 39 |
-
ConfidenceLevel.HIGH if n_months >= 6
|
| 40 |
-
else ConfidenceLevel.MODERATE if n_months >= 3
|
| 41 |
-
else ConfidenceLevel.LOW
|
| 42 |
-
)
|
| 43 |
-
if season_months and hasattr(self, '_current_monthly_medians') and hasattr(self, '_baseline_per_year_monthly'):
|
| 44 |
-
chart_data = self._build_monthly_chart_data(
|
| 45 |
-
self._current_monthly_medians,
|
| 46 |
-
self._baseline_per_year_monthly,
|
| 47 |
-
time_range,
|
| 48 |
-
season_months,
|
| 49 |
-
)
|
| 50 |
-
else:
|
| 51 |
-
chart_data = self._build_chart_data(
|
| 52 |
-
baseline_mean, current_mean, time_range,
|
| 53 |
-
getattr(self, '_baseline_yearly_means', None),
|
| 54 |
-
)
|
| 55 |
-
|
| 56 |
-
if abs_change <= 5:
|
| 57 |
-
headline = f"Vegetation cover stable ({current_mean:.0f}% cover, ±{abs_change:.0f}pp vs baseline)"
|
| 58 |
-
elif change_pp > 0:
|
| 59 |
-
headline = f"Vegetation cover increased (+{change_pp:.0f}pp vs baseline)"
|
| 60 |
-
else:
|
| 61 |
-
headline = f"Vegetation cover declined ({change_pp:.0f}pp vs baseline)"
|
| 62 |
-
|
| 63 |
-
# Store spatial data for map rendering
|
| 64 |
-
from app.indicators.base import SpatialData
|
| 65 |
-
tile_geojson = await self._fetch_tile_footprints(aoi, time_range)
|
| 66 |
-
if tile_geojson["features"]:
|
| 67 |
-
self._spatial_data = SpatialData(
|
| 68 |
-
geojson=tile_geojson,
|
| 69 |
-
map_type="choropleth",
|
| 70 |
-
label="Vegetation cover (%)",
|
| 71 |
-
colormap="YlGn",
|
| 72 |
-
)
|
| 73 |
-
else:
|
| 74 |
-
self._spatial_data = None
|
| 75 |
-
|
| 76 |
-
return IndicatorResult(
|
| 77 |
-
indicator_id=self.id,
|
| 78 |
-
headline=headline,
|
| 79 |
-
status=status,
|
| 80 |
-
trend=trend,
|
| 81 |
-
confidence=confidence,
|
| 82 |
-
map_layer_path="",
|
| 83 |
-
chart_data=chart_data,
|
| 84 |
-
data_source="placeholder" if getattr(self, '_is_placeholder', False) else "satellite",
|
| 85 |
-
summary=(
|
| 86 |
-
f"Mean vegetation cover is {current_mean:.1f}% compared to a {BASELINE_YEARS}-year "
|
| 87 |
-
f"baseline of {baseline_mean:.1f}% ({change_pp:+.1f} percentage points). "
|
| 88 |
-
f"Month-matched comparison using {n_months} overlapping months. "
|
| 89 |
-
f"Status: {status.value}. Trend: {trend.value}."
|
| 90 |
-
),
|
| 91 |
-
methodology=(
|
| 92 |
-
"Sentinel-2 scene-level vegetation percentage (s2:vegetation_percentage) "
|
| 93 |
-
"is extracted from STAC metadata for cloud-free scenes. Monthly medians "
|
| 94 |
-
"are computed and only months with data in both the current and baseline "
|
| 95 |
-
"periods are compared, avoiding seasonal sampling bias. "
|
| 96 |
-
f"Baseline: {BASELINE_YEARS} years. No pixel data is downloaded."
|
| 97 |
-
),
|
| 98 |
-
limitations=[
|
| 99 |
-
"Uses scene-level vegetation percentage, not pixel-level NDVI.",
|
| 100 |
-
"Seasonal variation may cause apparent loss if analysis windows differ.",
|
| 101 |
-
"Cloud cover reduces observation count and may bias estimates.",
|
| 102 |
-
"Vegetation percentage includes all green vegetation, not just forest.",
|
| 103 |
-
],
|
| 104 |
-
)
|
| 105 |
-
|
| 106 |
-
async def _fetch_tile_footprints(self, aoi: AOI, time_range: TimeRange) -> dict:
|
| 107 |
-
"""Fetch S2 tile footprints with median vegetation % per tile."""
|
| 108 |
-
try:
|
| 109 |
-
import asyncio
|
| 110 |
-
import pystac_client
|
| 111 |
-
except ImportError:
|
| 112 |
-
return {"type": "FeatureCollection", "features": []}
|
| 113 |
-
|
| 114 |
-
try:
|
| 115 |
-
catalog = pystac_client.Client.open(
|
| 116 |
-
"https://earth-search.aws.element84.com/v1"
|
| 117 |
-
)
|
| 118 |
-
current_year = time_range.end.year
|
| 119 |
-
|
| 120 |
-
def _query():
|
| 121 |
-
from collections import defaultdict
|
| 122 |
-
from datetime import date as _date
|
| 123 |
-
start = _date(current_year, 1, 1)
|
| 124 |
-
end = _date(current_year, 12, 31)
|
| 125 |
-
items = catalog.search(
|
| 126 |
-
collections=["sentinel-2-l2a"],
|
| 127 |
-
bbox=aoi.bbox,
|
| 128 |
-
datetime=f"{start.isoformat()}/{end.isoformat()}",
|
| 129 |
-
query={"eo:cloud_cover": {"lt": 30}},
|
| 130 |
-
max_items=MAX_ITEMS,
|
| 131 |
-
).item_collection()
|
| 132 |
-
|
| 133 |
-
tile_vals: dict[str, list[float]] = defaultdict(list)
|
| 134 |
-
tile_geom: dict[str, dict] = {}
|
| 135 |
-
for item in items:
|
| 136 |
-
grid = item.properties.get("grid:code", item.id)
|
| 137 |
-
veg = item.properties.get("s2:vegetation_percentage")
|
| 138 |
-
if veg is not None:
|
| 139 |
-
tile_vals[grid].append(float(veg))
|
| 140 |
-
if grid not in tile_geom:
|
| 141 |
-
tile_geom[grid] = item.geometry
|
| 142 |
-
return tile_vals, tile_geom
|
| 143 |
-
|
| 144 |
-
loop = asyncio.get_event_loop()
|
| 145 |
-
tile_vals, tile_geom = await loop.run_in_executor(None, _query)
|
| 146 |
-
|
| 147 |
-
features = []
|
| 148 |
-
for grid, vals in tile_vals.items():
|
| 149 |
-
if grid in tile_geom:
|
| 150 |
-
features.append({
|
| 151 |
-
"type": "Feature",
|
| 152 |
-
"geometry": tile_geom[grid],
|
| 153 |
-
"properties": {
|
| 154 |
-
"value": float(np.median(vals)),
|
| 155 |
-
"grid_code": grid,
|
| 156 |
-
},
|
| 157 |
-
})
|
| 158 |
-
return {"type": "FeatureCollection", "features": features}
|
| 159 |
-
except Exception:
|
| 160 |
-
return {"type": "FeatureCollection", "features": []}
|
| 161 |
-
|
| 162 |
-
async def _fetch_comparison(
|
| 163 |
-
self, aoi: AOI, time_range: TimeRange
|
| 164 |
-
) -> tuple[float, float, int]:
|
| 165 |
-
"""Returns (baseline_mean, current_mean, n_overlapping_months)."""
|
| 166 |
-
self._is_placeholder = False
|
| 167 |
-
try:
|
| 168 |
-
import pystac_client # noqa: F401
|
| 169 |
-
except ImportError as exc:
|
| 170 |
-
import logging
|
| 171 |
-
logging.getLogger(__name__).warning("Vegetation missing dependencies, using placeholder: %s", exc)
|
| 172 |
-
self._is_placeholder = True
|
| 173 |
-
return self._synthetic()
|
| 174 |
-
|
| 175 |
-
try:
|
| 176 |
-
return await self._stac_comparison(aoi, time_range)
|
| 177 |
-
except Exception as exc:
|
| 178 |
-
import logging
|
| 179 |
-
logging.getLogger(__name__).warning("Vegetation STAC query failed, using placeholder: %s", exc)
|
| 180 |
-
self._is_placeholder = True
|
| 181 |
-
return self._synthetic()
|
| 182 |
-
|
| 183 |
-
async def _stac_comparison(
|
| 184 |
-
self, aoi: AOI, time_range: TimeRange
|
| 185 |
-
) -> tuple[float, float, int]:
|
| 186 |
-
import asyncio
|
| 187 |
-
import pystac_client
|
| 188 |
-
|
| 189 |
-
catalog = pystac_client.Client.open(
|
| 190 |
-
"https://earth-search.aws.element84.com/v1"
|
| 191 |
-
)
|
| 192 |
-
current_year = time_range.end.year
|
| 193 |
-
baseline_start_year = current_year - BASELINE_YEARS
|
| 194 |
-
|
| 195 |
-
def _query_monthly(year: int) -> dict[int, list[float]]:
|
| 196 |
-
items = catalog.search(
|
| 197 |
-
collections=["sentinel-2-l2a"],
|
| 198 |
-
bbox=aoi.bbox,
|
| 199 |
-
datetime=f"{date(year,1,1).isoformat()}/{date(year,12,31).isoformat()}",
|
| 200 |
-
query={"eo:cloud_cover": {"lt": 30}},
|
| 201 |
-
max_items=MAX_ITEMS,
|
| 202 |
-
).item_collection()
|
| 203 |
-
by_month: dict[int, list[float]] = defaultdict(list)
|
| 204 |
-
for item in items:
|
| 205 |
-
veg = item.properties.get("s2:vegetation_percentage")
|
| 206 |
-
if veg is not None and item.datetime:
|
| 207 |
-
by_month[item.datetime.month].append(float(veg))
|
| 208 |
-
return dict(by_month)
|
| 209 |
-
|
| 210 |
-
loop = asyncio.get_event_loop()
|
| 211 |
-
current_monthly = await loop.run_in_executor(None, _query_monthly, current_year)
|
| 212 |
-
|
| 213 |
-
baseline_pool: dict[int, list[float]] = defaultdict(list)
|
| 214 |
-
baseline_yearly_means: list[float] = []
|
| 215 |
-
baseline_per_year_monthly: dict[int, list[float]] = defaultdict(list)
|
| 216 |
-
for yr in range(baseline_start_year, current_year):
|
| 217 |
-
yr_monthly = await loop.run_in_executor(None, _query_monthly, yr)
|
| 218 |
-
yr_medians = []
|
| 219 |
-
for month, vals in yr_monthly.items():
|
| 220 |
-
baseline_pool[month].extend(vals)
|
| 221 |
-
if vals:
|
| 222 |
-
med = float(np.median(vals))
|
| 223 |
-
yr_medians.append(med)
|
| 224 |
-
baseline_per_year_monthly[month].append(med)
|
| 225 |
-
if yr_medians:
|
| 226 |
-
baseline_yearly_means.append(float(np.mean(yr_medians)))
|
| 227 |
-
|
| 228 |
-
baseline_medians = []
|
| 229 |
-
current_medians = []
|
| 230 |
-
for month in range(1, 13):
|
| 231 |
-
b_vals = baseline_pool.get(month, [])
|
| 232 |
-
c_vals = current_monthly.get(month, [])
|
| 233 |
-
if b_vals and c_vals:
|
| 234 |
-
baseline_medians.append(float(np.median(b_vals)))
|
| 235 |
-
current_medians.append(float(np.median(c_vals)))
|
| 236 |
-
|
| 237 |
-
n_months = len(baseline_medians)
|
| 238 |
-
if n_months == 0:
|
| 239 |
-
self._is_placeholder = True
|
| 240 |
-
return self._synthetic()
|
| 241 |
-
|
| 242 |
-
self._baseline_yearly_means = baseline_yearly_means
|
| 243 |
-
self._baseline_per_year_monthly = dict(baseline_per_year_monthly)
|
| 244 |
-
self._current_monthly_medians = {}
|
| 245 |
-
for month in range(1, 13):
|
| 246 |
-
c_vals = current_monthly.get(month, [])
|
| 247 |
-
if c_vals:
|
| 248 |
-
self._current_monthly_medians[month] = float(np.median(c_vals))
|
| 249 |
-
return (
|
| 250 |
-
float(np.mean(baseline_medians)),
|
| 251 |
-
float(np.mean(current_medians)),
|
| 252 |
-
n_months,
|
| 253 |
-
)
|
| 254 |
-
|
| 255 |
-
@staticmethod
|
| 256 |
-
def _synthetic() -> tuple[float, float, int]:
|
| 257 |
-
rng = np.random.default_rng(7)
|
| 258 |
-
baseline = float(rng.uniform(25, 45))
|
| 259 |
-
current = baseline * float(rng.uniform(0.90, 1.02))
|
| 260 |
-
return baseline, current, 6
|
| 261 |
-
|
| 262 |
-
@staticmethod
|
| 263 |
-
def _classify(change_pp: float) -> StatusLevel:
|
| 264 |
-
if change_pp >= -5:
|
| 265 |
-
return StatusLevel.GREEN
|
| 266 |
-
if change_pp >= -15:
|
| 267 |
-
return StatusLevel.AMBER
|
| 268 |
-
return StatusLevel.RED
|
| 269 |
-
|
| 270 |
-
@staticmethod
|
| 271 |
-
def _compute_trend(change_pp: float) -> TrendDirection:
|
| 272 |
-
if abs(change_pp) <= 5:
|
| 273 |
-
return TrendDirection.STABLE
|
| 274 |
-
if change_pp > 0:
|
| 275 |
-
return TrendDirection.IMPROVING
|
| 276 |
-
return TrendDirection.DETERIORATING
|
| 277 |
-
|
| 278 |
-
@staticmethod
|
| 279 |
-
def _build_monthly_chart_data(
|
| 280 |
-
current_monthly: dict[int, float],
|
| 281 |
-
baseline_per_year_monthly: dict[int, list[float]],
|
| 282 |
-
time_range: TimeRange,
|
| 283 |
-
season_months: list[int],
|
| 284 |
-
) -> dict[str, Any]:
|
| 285 |
-
year = time_range.end.year
|
| 286 |
-
dates, values, b_mean, b_min, b_max = [], [], [], [], []
|
| 287 |
-
for m in season_months:
|
| 288 |
-
dates.append(f"{year}-{m:02d}")
|
| 289 |
-
values.append(round(current_monthly.get(m, 0.0), 1))
|
| 290 |
-
yr_medians = baseline_per_year_monthly.get(m, [])
|
| 291 |
-
if yr_medians:
|
| 292 |
-
b_mean.append(round(float(np.mean(yr_medians)), 1))
|
| 293 |
-
b_min.append(round(float(min(yr_medians)), 1))
|
| 294 |
-
b_max.append(round(float(max(yr_medians)), 1))
|
| 295 |
-
else:
|
| 296 |
-
b_mean.append(0.0)
|
| 297 |
-
b_min.append(0.0)
|
| 298 |
-
b_max.append(0.0)
|
| 299 |
-
result: dict[str, Any] = {
|
| 300 |
-
"dates": dates,
|
| 301 |
-
"values": values,
|
| 302 |
-
"label": "Vegetation cover (%)",
|
| 303 |
-
}
|
| 304 |
-
if any(v > 0 for v in b_mean):
|
| 305 |
-
result["baseline_mean"] = b_mean
|
| 306 |
-
result["baseline_min"] = b_min
|
| 307 |
-
result["baseline_max"] = b_max
|
| 308 |
-
return result
|
| 309 |
-
|
| 310 |
-
@staticmethod
|
| 311 |
-
def _build_chart_data(
|
| 312 |
-
baseline: float, current: float, time_range: TimeRange,
|
| 313 |
-
baseline_yearly_means: list[float] | None = None,
|
| 314 |
-
) -> dict[str, Any]:
|
| 315 |
-
data: dict[str, Any] = {
|
| 316 |
-
"dates": [str(time_range.start.year - 1), str(time_range.end.year)],
|
| 317 |
-
"values": [round(baseline, 1), round(current, 1)],
|
| 318 |
-
"label": "Vegetation cover (%)",
|
| 319 |
-
}
|
| 320 |
-
if baseline_yearly_means and len(baseline_yearly_means) >= 2:
|
| 321 |
-
data["baseline_range_mean"] = round(float(np.mean(baseline_yearly_means)), 1)
|
| 322 |
-
data["baseline_range_min"] = round(float(min(baseline_yearly_means)), 1)
|
| 323 |
-
data["baseline_range_max"] = round(float(max(baseline_yearly_means)), 1)
|
| 324 |
-
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -96,8 +96,7 @@ class WaterIndicator(BaseIndicator):
|
|
| 96 |
paths = current_job.download_results(current_dir)
|
| 97 |
current_path = self._find_tif(paths, current_dir)
|
| 98 |
except Exception as exc:
|
| 99 |
-
|
| 100 |
-
return self._fallback(aoi, time_range)
|
| 101 |
|
| 102 |
# Download baseline — optional (degrades gracefully)
|
| 103 |
baseline_path = None
|
|
@@ -205,11 +204,7 @@ class WaterIndicator(BaseIndicator):
|
|
| 205 |
async def process(
|
| 206 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 207 |
) -> IndicatorResult:
|
| 208 |
-
|
| 209 |
-
return await self._process_openeo(aoi, time_range, season_months)
|
| 210 |
-
except Exception as exc:
|
| 211 |
-
logger.warning("Water openEO processing failed, using placeholder: %s", exc)
|
| 212 |
-
return self._fallback(aoi, time_range)
|
| 213 |
|
| 214 |
async def _process_openeo(
|
| 215 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
@@ -397,26 +392,3 @@ class WaterIndicator(BaseIndicator):
|
|
| 397 |
"label": "Water extent (%)",
|
| 398 |
}
|
| 399 |
|
| 400 |
-
def _fallback(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 401 |
-
rng = np.random.default_rng(9)
|
| 402 |
-
baseline = float(rng.uniform(5, 20))
|
| 403 |
-
current = baseline * float(rng.uniform(0.85, 1.15))
|
| 404 |
-
change = current - baseline
|
| 405 |
-
|
| 406 |
-
return IndicatorResult(
|
| 407 |
-
indicator_id=self.id,
|
| 408 |
-
headline=f"Water data degraded ({current:.1f}% extent)",
|
| 409 |
-
status=StatusLevel.GREEN if abs(change) < 5 else StatusLevel.AMBER,
|
| 410 |
-
trend=TrendDirection.STABLE,
|
| 411 |
-
confidence=ConfidenceLevel.LOW,
|
| 412 |
-
map_layer_path="",
|
| 413 |
-
chart_data={
|
| 414 |
-
"dates": [str(time_range.start.year), str(time_range.end.year)],
|
| 415 |
-
"values": [round(baseline, 1), round(current, 1)],
|
| 416 |
-
"label": "Water extent (%)",
|
| 417 |
-
},
|
| 418 |
-
data_source="placeholder",
|
| 419 |
-
summary="openEO processing unavailable. Showing placeholder values.",
|
| 420 |
-
methodology="Placeholder \u2014 no satellite data processed.",
|
| 421 |
-
limitations=["Data is synthetic. openEO backend was unreachable."],
|
| 422 |
-
)
|
|
|
|
| 96 |
paths = current_job.download_results(current_dir)
|
| 97 |
current_path = self._find_tif(paths, current_dir)
|
| 98 |
except Exception as exc:
|
| 99 |
+
raise RuntimeError(f"Water current period data unavailable: {exc}") from exc
|
|
|
|
| 100 |
|
| 101 |
# Download baseline — optional (degrades gracefully)
|
| 102 |
baseline_path = None
|
|
|
|
| 204 |
async def process(
|
| 205 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None = None
|
| 206 |
) -> IndicatorResult:
|
| 207 |
+
return await self._process_openeo(aoi, time_range, season_months)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
async def _process_openeo(
|
| 210 |
self, aoi: AOI, time_range: TimeRange, season_months: list[int] | None
|
|
|
|
| 392 |
"label": "Water extent (%)",
|
| 393 |
}
|
| 394 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -29,7 +29,7 @@ def create_app(db_path: str = "aperture.db", run_worker: bool = False) -> FastAP
|
|
| 29 |
if OPENEO_CLIENT_ID and OPENEO_CLIENT_SECRET:
|
| 30 |
print(f"[Aperture] CDSE credentials configured (client_id={OPENEO_CLIENT_ID[:8]}...)")
|
| 31 |
else:
|
| 32 |
-
print("[Aperture] WARNING: CDSE credentials NOT configured — EO indicators will
|
| 33 |
print(f"[Aperture] SPACE_ID={os.environ.get('SPACE_ID', '<not set>')}")
|
| 34 |
await db.init()
|
| 35 |
worker_task = None
|
|
|
|
| 29 |
if OPENEO_CLIENT_ID and OPENEO_CLIENT_SECRET:
|
| 30 |
print(f"[Aperture] CDSE credentials configured (client_id={OPENEO_CLIENT_ID[:8]}...)")
|
| 31 |
else:
|
| 32 |
+
print("[Aperture] WARNING: CDSE credentials NOT configured — EO indicators will fail")
|
| 33 |
print(f"[Aperture] SPACE_ID={os.environ.get('SPACE_ID', '<not set>')}")
|
| 34 |
await db.init()
|
| 35 |
worker_task = None
|
|
@@ -126,7 +126,7 @@ class IndicatorResult(BaseModel):
|
|
| 126 |
summary: str
|
| 127 |
methodology: str
|
| 128 |
limitations: list[str]
|
| 129 |
-
data_source: str = "satellite"
|
| 130 |
|
| 131 |
|
| 132 |
class Job(BaseModel):
|
|
|
|
| 126 |
summary: str
|
| 127 |
methodology: str
|
| 128 |
limitations: list[str]
|
| 129 |
+
data_source: str = "satellite"
|
| 130 |
|
| 131 |
|
| 132 |
class Job(BaseModel):
|
|
@@ -193,33 +193,6 @@ def build_mndwi_graph(
|
|
| 193 |
return monthly
|
| 194 |
|
| 195 |
|
| 196 |
-
def build_lst_graph(
|
| 197 |
-
*,
|
| 198 |
-
conn: openeo.Connection,
|
| 199 |
-
bbox: dict[str, float],
|
| 200 |
-
temporal_extent: list[str],
|
| 201 |
-
resolution_m: int = 1000,
|
| 202 |
-
) -> openeo.DataCube:
|
| 203 |
-
"""Build an openEO process graph for Sentinel-3 SLSTR land surface temperature.
|
| 204 |
-
|
| 205 |
-
Loads LST from Sentinel-3 SLSTR Level-2 product, aggregated to monthly means.
|
| 206 |
-
Resolution is typically 1km (SLSTR native).
|
| 207 |
-
"""
|
| 208 |
-
cube = conn.load_collection(
|
| 209 |
-
collection_id="SENTINEL3_SLSTR_L2_LST",
|
| 210 |
-
spatial_extent=bbox,
|
| 211 |
-
temporal_extent=temporal_extent,
|
| 212 |
-
bands=["LST"],
|
| 213 |
-
)
|
| 214 |
-
|
| 215 |
-
monthly = cube.aggregate_temporal_period("month", reducer="mean")
|
| 216 |
-
|
| 217 |
-
if resolution_m > 1000:
|
| 218 |
-
monthly = monthly.resample_spatial(resolution=resolution_m / 111320, projection="EPSG:4326")
|
| 219 |
-
|
| 220 |
-
return monthly
|
| 221 |
-
|
| 222 |
-
|
| 223 |
def build_sar_graph(
|
| 224 |
*,
|
| 225 |
conn: openeo.Connection,
|
|
|
|
| 193 |
return monthly
|
| 194 |
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
def build_sar_graph(
|
| 197 |
*,
|
| 198 |
conn: openeo.Connection,
|
|
@@ -20,26 +20,10 @@ _INTERPRETATIONS: dict[tuple[str, StatusLevel], str] = {
|
|
| 20 |
("buildup", StatusLevel.RED): "Rapid settlement expansion at this rate suggests significant population displacement or unplanned urban growth.",
|
| 21 |
("buildup", StatusLevel.AMBER): "Moderate settlement growth detected, consistent with gradual urbanization.",
|
| 22 |
("buildup", StatusLevel.GREEN): "Settlement extent is stable relative to the baseline period.",
|
| 23 |
-
("fires", StatusLevel.RED): "Active fire density at critical levels \u2014 indicates widespread burning requiring immediate attention.",
|
| 24 |
-
("fires", StatusLevel.AMBER): "Elevated fire activity detected, likely agricultural burning or localized wildfire.",
|
| 25 |
-
("fires", StatusLevel.GREEN): "Fire activity is within normal seasonal patterns.",
|
| 26 |
-
("rainfall", StatusLevel.RED): "Severe precipitation deficit against the long-term average is consistent with drought conditions.",
|
| 27 |
-
("rainfall", StatusLevel.AMBER): "Below-average precipitation may lead to crop stress if the deficit persists.",
|
| 28 |
-
("rainfall", StatusLevel.GREEN): "Precipitation is within normal ranges for the season.",
|
| 29 |
-
("lst", StatusLevel.RED): "Surface temperatures are critically elevated, indicating severe thermal stress.",
|
| 30 |
-
("lst", StatusLevel.AMBER): "Moderately elevated surface temperatures may affect crop health and water availability.",
|
| 31 |
-
("lst", StatusLevel.GREEN): "Surface temperatures are within the normal seasonal range.",
|
| 32 |
-
("nightlights", StatusLevel.RED): "Significant nighttime light changes indicate major disruption to infrastructure or population activity.",
|
| 33 |
-
("nightlights", StatusLevel.AMBER): "Moderate nighttime light changes may reflect economic shifts or partial service disruption.",
|
| 34 |
-
("nightlights", StatusLevel.GREEN): "Nighttime light patterns are stable, suggesting no major disruption.",
|
| 35 |
}
|
| 36 |
|
| 37 |
# --- Cross-indicator pattern rules ---
|
| 38 |
_CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
| 39 |
-
(
|
| 40 |
-
{"ndvi": {StatusLevel.RED, StatusLevel.AMBER}, "rainfall": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 41 |
-
"Vegetation decline is consistent with reduced precipitation, suggesting drought-driven stress.",
|
| 42 |
-
),
|
| 43 |
(
|
| 44 |
{"ndvi": {StatusLevel.RED, StatusLevel.AMBER}, "buildup": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 45 |
"Vegetation loss coincides with settlement expansion, indicating possible land-use conversion.",
|
|
@@ -52,10 +36,6 @@ _CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
|
| 52 |
{"water": {StatusLevel.RED, StatusLevel.AMBER}, "sar": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 53 |
"Increased water extent and SAR signal changes suggest flooding or waterlogging.",
|
| 54 |
),
|
| 55 |
-
(
|
| 56 |
-
{"fires": {StatusLevel.RED, StatusLevel.AMBER}, "ndvi": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 57 |
-
"Active fire detections combined with vegetation decline indicate burning-driven land cover change.",
|
| 58 |
-
),
|
| 59 |
]
|
| 60 |
|
| 61 |
_LEAD_TEMPLATES = {
|
|
@@ -101,12 +81,4 @@ def generate_narrative(results: Sequence[IndicatorResult]) -> str:
|
|
| 101 |
parts.append(sentence)
|
| 102 |
break
|
| 103 |
|
| 104 |
-
# 4. Placeholder caveat
|
| 105 |
-
placeholder_count = sum(1 for r in results if r.data_source == "placeholder")
|
| 106 |
-
if placeholder_count:
|
| 107 |
-
parts.append(
|
| 108 |
-
f"{placeholder_count} indicator(s) used estimated data "
|
| 109 |
-
"\u2014 cross-indicator interpretation is limited."
|
| 110 |
-
)
|
| 111 |
-
|
| 112 |
return " ".join(parts)
|
|
|
|
| 20 |
("buildup", StatusLevel.RED): "Rapid settlement expansion at this rate suggests significant population displacement or unplanned urban growth.",
|
| 21 |
("buildup", StatusLevel.AMBER): "Moderate settlement growth detected, consistent with gradual urbanization.",
|
| 22 |
("buildup", StatusLevel.GREEN): "Settlement extent is stable relative to the baseline period.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
}
|
| 24 |
|
| 25 |
# --- Cross-indicator pattern rules ---
|
| 26 |
_CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
(
|
| 28 |
{"ndvi": {StatusLevel.RED, StatusLevel.AMBER}, "buildup": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 29 |
"Vegetation loss coincides with settlement expansion, indicating possible land-use conversion.",
|
|
|
|
| 36 |
{"water": {StatusLevel.RED, StatusLevel.AMBER}, "sar": {StatusLevel.RED, StatusLevel.AMBER}},
|
| 37 |
"Increased water extent and SAR signal changes suggest flooding or waterlogging.",
|
| 38 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
]
|
| 40 |
|
| 41 |
_LEAD_TEMPLATES = {
|
|
|
|
| 81 |
parts.append(sentence)
|
| 82 |
break
|
| 83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
return " ".join(parts)
|
|
@@ -15,15 +15,10 @@ _STATUS_SCORES = {
|
|
| 15 |
|
| 16 |
# Display names for headline generation
|
| 17 |
_INDICATOR_NAMES = {
|
| 18 |
-
"fires": "fires",
|
| 19 |
-
"sar": "SAR ground change",
|
| 20 |
-
"buildup": "settlement expansion",
|
| 21 |
"ndvi": "vegetation decline",
|
|
|
|
| 22 |
"water": "water extent change",
|
| 23 |
-
"
|
| 24 |
-
"lst": "thermal stress",
|
| 25 |
-
"no2": "air quality",
|
| 26 |
-
"nightlights": "nighttime lights",
|
| 27 |
}
|
| 28 |
|
| 29 |
|
|
|
|
| 15 |
|
| 16 |
# Display names for headline generation
|
| 17 |
_INDICATOR_NAMES = {
|
|
|
|
|
|
|
|
|
|
| 18 |
"ndvi": "vegetation decline",
|
| 19 |
+
"sar": "SAR ground change",
|
| 20 |
"water": "water extent change",
|
| 21 |
+
"buildup": "settlement expansion",
|
|
|
|
|
|
|
|
|
|
| 22 |
}
|
| 23 |
|
| 24 |
|
|
@@ -26,8 +26,6 @@ from app.models import AOI, TimeRange, IndicatorResult, StatusLevel
|
|
| 26 |
|
| 27 |
# Display names for indicator IDs that don't title-case correctly
|
| 28 |
_DISPLAY_NAMES: dict[str, str] = {
|
| 29 |
-
"no2": "NO2",
|
| 30 |
-
"lst": "LST",
|
| 31 |
"sar": "SAR Backscatter",
|
| 32 |
"buildup": "Settlement Extent",
|
| 33 |
}
|
|
@@ -189,15 +187,6 @@ def _indicator_block(
|
|
| 189 |
elements.append(row)
|
| 190 |
elements.append(Spacer(1, 3 * mm))
|
| 191 |
|
| 192 |
-
# Placeholder data warning
|
| 193 |
-
if result.data_source == "placeholder":
|
| 194 |
-
elements.append(Paragraph(
|
| 195 |
-
'<font color="#CA5D0F"><b>⚠ Placeholder data</b> — real satellite data was '
|
| 196 |
-
'unavailable for this indicator. Results are illustrative only.</font>',
|
| 197 |
-
styles["body_muted"],
|
| 198 |
-
))
|
| 199 |
-
elements.append(Spacer(1, 2 * mm))
|
| 200 |
-
|
| 201 |
# Map and chart side by side (if both exist), or single image
|
| 202 |
map_exists = map_path and os.path.exists(map_path)
|
| 203 |
chart_exists = chart_path and os.path.exists(chart_path)
|
|
|
|
| 26 |
|
| 27 |
# Display names for indicator IDs that don't title-case correctly
|
| 28 |
_DISPLAY_NAMES: dict[str, str] = {
|
|
|
|
|
|
|
| 29 |
"sar": "SAR Backscatter",
|
| 30 |
"buildup": "Settlement Extent",
|
| 31 |
}
|
|
|
|
| 187 |
elements.append(row)
|
| 188 |
elements.append(Spacer(1, 3 * mm))
|
| 189 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
# Map and chart side by side (if both exist), or single image
|
| 191 |
map_exists = map_path and os.path.exists(map_path)
|
| 192 |
chart_exists = chart_path and os.path.exists(chart_path)
|
|
@@ -8,31 +8,6 @@ def classify_indicator(indicator_id: str, metrics: dict) -> StatusLevel:
|
|
| 8 |
return classifier(metrics)
|
| 9 |
|
| 10 |
|
| 11 |
-
def _fires(m):
|
| 12 |
-
count = m.get("count", 0)
|
| 13 |
-
return StatusLevel.GREEN if count == 0 else StatusLevel.AMBER if count <= 5 else StatusLevel.RED
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def _cropland(m):
|
| 17 |
-
pct = m.get("pct_of_baseline", 100)
|
| 18 |
-
return StatusLevel.GREEN if pct > 90 else StatusLevel.AMBER if pct >= 70 else StatusLevel.RED
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def _vegetation(m):
|
| 22 |
-
loss = m.get("loss_pct", 0)
|
| 23 |
-
return StatusLevel.GREEN if loss < 5 else StatusLevel.AMBER if loss <= 15 else StatusLevel.RED
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def _rainfall(m):
|
| 27 |
-
dev = m.get("pct_deviation", 0)
|
| 28 |
-
return StatusLevel.GREEN if dev > -10 else StatusLevel.AMBER if dev >= -25 else StatusLevel.RED
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
def _nightlights(m):
|
| 32 |
-
pct = m.get("pct_of_baseline", 100)
|
| 33 |
-
return StatusLevel.GREEN if pct > 90 else StatusLevel.AMBER if pct >= 70 else StatusLevel.RED
|
| 34 |
-
|
| 35 |
-
|
| 36 |
def _water(m):
|
| 37 |
change = m.get("change_pct", 0)
|
| 38 |
return StatusLevel.GREEN if abs(change) < 10 else StatusLevel.AMBER if abs(change) <= 25 else StatusLevel.RED
|
|
@@ -43,25 +18,9 @@ def _sd_based(m):
|
|
| 43 |
return StatusLevel.GREEN if sd < 1 else StatusLevel.AMBER if sd <= 2 else StatusLevel.RED
|
| 44 |
|
| 45 |
|
| 46 |
-
def _food_security(m):
|
| 47 |
-
statuses = m.get("component_statuses", [])
|
| 48 |
-
return (
|
| 49 |
-
StatusLevel.RED
|
| 50 |
-
if any(s == "red" for s in statuses)
|
| 51 |
-
else StatusLevel.AMBER
|
| 52 |
-
if any(s == "amber" for s in statuses)
|
| 53 |
-
else StatusLevel.GREEN
|
| 54 |
-
)
|
| 55 |
-
|
| 56 |
-
|
| 57 |
THRESHOLDS = {
|
| 58 |
-
"fires": _fires,
|
| 59 |
-
"cropland": _cropland,
|
| 60 |
-
"vegetation": _vegetation,
|
| 61 |
-
"rainfall": _rainfall,
|
| 62 |
-
"nightlights": _nightlights,
|
| 63 |
"water": _water,
|
| 64 |
-
"
|
| 65 |
-
"
|
| 66 |
-
"
|
| 67 |
}
|
|
|
|
| 8 |
return classifier(metrics)
|
| 9 |
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def _water(m):
|
| 12 |
change = m.get("change_pct", 0)
|
| 13 |
return StatusLevel.GREEN if abs(change) < 10 else StatusLevel.AMBER if abs(change) <= 25 else StatusLevel.RED
|
|
|
|
| 18 |
return StatusLevel.GREEN if sd < 1 else StatusLevel.AMBER if sd <= 2 else StatusLevel.RED
|
| 19 |
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
THRESHOLDS = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
"water": _water,
|
| 23 |
+
"ndvi": _sd_based,
|
| 24 |
+
"sar": _sd_based,
|
| 25 |
+
"buildup": _sd_based,
|
| 26 |
}
|
|
@@ -79,89 +79,73 @@ async def process_job(job_id: str, db: Database, registry: IndicatorRegistry) ->
|
|
| 79 |
|
| 80 |
# -- Process batch indicators sequentially --
|
| 81 |
for indicator_id, indicator in batch_indicators.items():
|
| 82 |
-
result = None
|
| 83 |
-
|
| 84 |
# Submit
|
| 85 |
await db.update_job_progress(job_id, indicator_id, "submitting")
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
await db.update_job_progress(job_id, indicator_id, "processing on CDSE")
|
| 95 |
-
except Exception as exc:
|
| 96 |
-
logger.warning("Batch submit failed for %s, will use fallback: %s", indicator_id, exc)
|
| 97 |
-
jobs = None
|
| 98 |
|
| 99 |
# Poll — exit early once first job finishes + grace period for others
|
| 100 |
GRACE_PERIOD = 600 # 10 min grace after first job finishes
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
# Wrap non-finished jobs so download_results() fails fast
|
| 140 |
-
# instead of blocking for 30 min on a still-running job
|
| 141 |
-
harvest_jobs = [
|
| 142 |
-
j if s == "finished" else _SkippedJob(getattr(j, 'job_id', '?'))
|
| 143 |
-
for j, s in zip(jobs, statuses)
|
| 144 |
-
]
|
| 145 |
-
await db.update_job_progress(job_id, indicator_id, "downloading")
|
| 146 |
-
try:
|
| 147 |
-
result = await indicator.harvest(
|
| 148 |
-
job.request.aoi,
|
| 149 |
-
job.request.time_range,
|
| 150 |
-
season_months=job.request.season_months(),
|
| 151 |
-
batch_jobs=harvest_jobs,
|
| 152 |
-
)
|
| 153 |
-
except Exception as exc:
|
| 154 |
-
logger.warning("Harvest failed for %s, using fallback: %s", indicator_id, exc)
|
| 155 |
-
|
| 156 |
-
# Fallback if submit failed, poll timed out, jobs errored, or harvest failed
|
| 157 |
-
if result is None:
|
| 158 |
-
await db.update_job_progress(job_id, indicator_id, "processing")
|
| 159 |
-
result = await indicator.process(
|
| 160 |
-
job.request.aoi,
|
| 161 |
-
job.request.time_range,
|
| 162 |
-
season_months=job.request.season_months(),
|
| 163 |
)
|
| 164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
spatial = indicator.get_spatial_data()
|
| 166 |
if spatial is not None:
|
| 167 |
spatial_cache[indicator_id] = spatial
|
|
|
|
| 79 |
|
| 80 |
# -- Process batch indicators sequentially --
|
| 81 |
for indicator_id, indicator in batch_indicators.items():
|
|
|
|
|
|
|
| 82 |
# Submit
|
| 83 |
await db.update_job_progress(job_id, indicator_id, "submitting")
|
| 84 |
+
jobs = await indicator.submit_batch(
|
| 85 |
+
job.request.aoi,
|
| 86 |
+
job.request.time_range,
|
| 87 |
+
season_months=job.request.season_months(),
|
| 88 |
+
)
|
| 89 |
+
job_ids = [getattr(j, 'job_id', '?') for j in jobs]
|
| 90 |
+
print(f"[Aperture] Submitted {indicator_id} batch jobs: {job_ids}")
|
| 91 |
+
await db.update_job_progress(job_id, indicator_id, "processing on CDSE")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
# Poll — exit early once first job finishes + grace period for others
|
| 94 |
GRACE_PERIOD = 600 # 10 min grace after first job finishes
|
| 95 |
+
poll_start = time.monotonic()
|
| 96 |
+
first_finished_at = None
|
| 97 |
+
while True:
|
| 98 |
+
elapsed = time.monotonic() - poll_start
|
| 99 |
+
statuses = [j.status() for j in jobs]
|
| 100 |
+
job_ids = [getattr(j, 'job_id', '?') for j in jobs]
|
| 101 |
+
print(f"[Aperture] Poll {indicator_id} ({elapsed:.0f}s): {list(zip(job_ids, statuses))}")
|
| 102 |
+
|
| 103 |
+
if all(s == "finished" for s in statuses):
|
| 104 |
+
logger.info("Batch jobs finished for %s", indicator_id)
|
| 105 |
+
break
|
| 106 |
+
elif any(s in ("error", "canceled") for s in statuses):
|
| 107 |
+
logger.warning("Batch job failed for %s: %s", indicator_id, statuses)
|
| 108 |
+
break
|
| 109 |
+
|
| 110 |
+
# Track when first job finishes
|
| 111 |
+
if first_finished_at is None and any(s == "finished" for s in statuses):
|
| 112 |
+
first_finished_at = time.monotonic()
|
| 113 |
+
print(f"[Aperture] {indicator_id}: first job finished, {GRACE_PERIOD}s grace for remaining")
|
| 114 |
+
|
| 115 |
+
# Grace period: once any job finished, give others 10 min then harvest partial
|
| 116 |
+
if first_finished_at and (time.monotonic() - first_finished_at) >= GRACE_PERIOD:
|
| 117 |
+
logger.info("Grace period expired for %s, harvesting partial results", indicator_id)
|
| 118 |
+
print(f"[Aperture] {indicator_id}: grace period expired, proceeding with partial results")
|
| 119 |
+
break
|
| 120 |
+
|
| 121 |
+
if elapsed >= BATCH_TIMEOUT:
|
| 122 |
+
logger.warning("Batch poll timeout after %.0fs for %s", elapsed, indicator_id)
|
| 123 |
+
break
|
| 124 |
+
|
| 125 |
+
await asyncio.sleep(BATCH_POLL_INTERVAL)
|
| 126 |
+
|
| 127 |
+
# Harvest if any job finished (harvest methods handle per-job failures)
|
| 128 |
+
any_finished = any(s == "finished" for s in statuses)
|
| 129 |
+
if not any_finished:
|
| 130 |
+
failed_statuses = list(zip(job_ids, statuses))
|
| 131 |
+
raise RuntimeError(
|
| 132 |
+
f"All batch jobs failed for {indicator_id}: {failed_statuses}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
)
|
| 134 |
|
| 135 |
+
# Wrap non-finished jobs so download_results() fails fast
|
| 136 |
+
# instead of blocking for 30 min on a still-running job
|
| 137 |
+
harvest_jobs = [
|
| 138 |
+
j if s == "finished" else _SkippedJob(getattr(j, 'job_id', '?'))
|
| 139 |
+
for j, s in zip(jobs, statuses)
|
| 140 |
+
]
|
| 141 |
+
await db.update_job_progress(job_id, indicator_id, "downloading")
|
| 142 |
+
result = await indicator.harvest(
|
| 143 |
+
job.request.aoi,
|
| 144 |
+
job.request.time_range,
|
| 145 |
+
season_months=job.request.season_months(),
|
| 146 |
+
batch_jobs=harvest_jobs,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
spatial = indicator.get_spatial_data()
|
| 150 |
if spatial is not None:
|
| 151 |
spatial_cache[indicator_id] = spatial
|
|
File without changes
|
|
@@ -1,95 +0,0 @@
|
|
| 1 |
-
"""Shared test fixtures for indicator tests."""
|
| 2 |
-
import os
|
| 3 |
-
import pytest
|
| 4 |
-
import tempfile
|
| 5 |
-
import numpy as np
|
| 6 |
-
import rasterio
|
| 7 |
-
from rasterio.transform import from_bounds
|
| 8 |
-
from unittest.mock import MagicMock
|
| 9 |
-
from datetime import date
|
| 10 |
-
|
| 11 |
-
from app.models import AOI, TimeRange, JobRequest
|
| 12 |
-
|
| 13 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
def mock_rgb_tif(path: str):
|
| 17 |
-
"""Create a small synthetic true-color GeoTIFF."""
|
| 18 |
-
rng = np.random.default_rng(43)
|
| 19 |
-
data = rng.integers(500, 1500, (3, 10, 10), dtype=np.uint16)
|
| 20 |
-
with rasterio.open(
|
| 21 |
-
path, "w", driver="GTiff", height=10, width=10, count=3,
|
| 22 |
-
dtype="uint16", crs="EPSG:4326",
|
| 23 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=0,
|
| 24 |
-
) as dst:
|
| 25 |
-
for i in range(3):
|
| 26 |
-
dst.write(data[i], i + 1)
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def make_mock_batch_job(src_path, fail=False):
|
| 30 |
-
"""Create a mock openEO batch job that copies a source tif on download."""
|
| 31 |
-
job = MagicMock()
|
| 32 |
-
job.job_id = "j-test"
|
| 33 |
-
|
| 34 |
-
def fake_download_results(target):
|
| 35 |
-
if fail:
|
| 36 |
-
raise Exception("Batch job failed on CDSE")
|
| 37 |
-
import shutil
|
| 38 |
-
os.makedirs(target, exist_ok=True)
|
| 39 |
-
dest = os.path.join(target, "result.tif")
|
| 40 |
-
shutil.copy(src_path, dest)
|
| 41 |
-
from pathlib import Path
|
| 42 |
-
return {Path(dest): {"type": "image/tiff"}}
|
| 43 |
-
|
| 44 |
-
job.download_results.side_effect = fake_download_results
|
| 45 |
-
job.status.return_value = "finished"
|
| 46 |
-
return job
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
@pytest.fixture
|
| 50 |
-
def sample_aoi():
|
| 51 |
-
return AOI(name="Khartoum North", bbox=[32.45, 15.65, 32.65, 15.80])
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
@pytest.fixture
|
| 55 |
-
def sample_time_range():
|
| 56 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
@pytest.fixture
|
| 60 |
-
def sample_job_request(sample_aoi, sample_time_range):
|
| 61 |
-
return JobRequest(
|
| 62 |
-
aoi=sample_aoi,
|
| 63 |
-
time_range=sample_time_range,
|
| 64 |
-
indicator_ids=["fires", "cropland"],
|
| 65 |
-
email="test@example.com",
|
| 66 |
-
)
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
@pytest.fixture
|
| 70 |
-
def temp_db_path():
|
| 71 |
-
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
|
| 72 |
-
path = f.name
|
| 73 |
-
yield path
|
| 74 |
-
os.unlink(path)
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
@pytest.fixture
|
| 81 |
-
def ndvi_monthly_tif():
|
| 82 |
-
"""Path to a 12-band synthetic NDVI monthly GeoTIFF."""
|
| 83 |
-
path = os.path.join(FIXTURES_DIR, "ndvi_monthly.tif")
|
| 84 |
-
if not os.path.exists(path):
|
| 85 |
-
pytest.skip("Test fixtures not generated. Run: python tests/fixtures/create_fixtures.py")
|
| 86 |
-
return path
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
@pytest.fixture
|
| 90 |
-
def true_color_tif():
|
| 91 |
-
"""Path to a 3-band synthetic true-color GeoTIFF."""
|
| 92 |
-
path = os.path.join(FIXTURES_DIR, "true_color.tif")
|
| 93 |
-
if not os.path.exists(path):
|
| 94 |
-
pytest.skip("Test fixtures not generated. Run: python tests/fixtures/create_fixtures.py")
|
| 95 |
-
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,70 +0,0 @@
|
|
| 1 |
-
"""Generate small synthetic GeoTIFFs for unit tests.
|
| 2 |
-
|
| 3 |
-
Run this script once to create the test fixtures:
|
| 4 |
-
python tests/fixtures/create_fixtures.py
|
| 5 |
-
"""
|
| 6 |
-
from __future__ import annotations
|
| 7 |
-
|
| 8 |
-
import os
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
|
| 13 |
-
FIXTURES_DIR = os.path.dirname(__file__)
|
| 14 |
-
|
| 15 |
-
# AOI: small region near Khartoum (~20x15 pixels at 100m = 2km x 1.5km)
|
| 16 |
-
WEST, SOUTH, EAST, NORTH = 32.45, 15.65, 32.65, 15.8
|
| 17 |
-
WIDTH, HEIGHT = 22, 17 # ~100m pixels
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
def _transform():
|
| 21 |
-
return from_bounds(WEST, SOUTH, EAST, NORTH, WIDTH, HEIGHT)
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
def create_ndvi_monthly():
|
| 25 |
-
"""12-band GeoTIFF: monthly median NDVI (Jan-Dec), float32, range -0.2 to 0.9."""
|
| 26 |
-
rng = np.random.default_rng(42)
|
| 27 |
-
path = os.path.join(FIXTURES_DIR, "ndvi_monthly.tif")
|
| 28 |
-
data = np.zeros((12, HEIGHT, WIDTH), dtype=np.float32)
|
| 29 |
-
for month in range(12):
|
| 30 |
-
# Seasonal NDVI pattern: peak in rainy season (Jul-Sep)
|
| 31 |
-
base = 0.25 + 0.35 * np.sin(np.pi * (month - 3) / 6)
|
| 32 |
-
data[month] = base + rng.normal(0, 0.05, (HEIGHT, WIDTH))
|
| 33 |
-
data = np.clip(data, -0.2, 0.9)
|
| 34 |
-
|
| 35 |
-
with rasterio.open(
|
| 36 |
-
path, "w", driver="GTiff",
|
| 37 |
-
height=HEIGHT, width=WIDTH, count=12,
|
| 38 |
-
dtype="float32", crs="EPSG:4326",
|
| 39 |
-
transform=_transform(), nodata=-9999.0,
|
| 40 |
-
) as dst:
|
| 41 |
-
for i in range(12):
|
| 42 |
-
dst.write(data[i], i + 1)
|
| 43 |
-
print(f"Created {path} ({os.path.getsize(path)} bytes)")
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
def create_true_color():
|
| 47 |
-
"""3-band GeoTIFF: RGB (B04, B03, B02) reflectance, uint16, range 0-10000."""
|
| 48 |
-
rng = np.random.default_rng(43)
|
| 49 |
-
path = os.path.join(FIXTURES_DIR, "true_color.tif")
|
| 50 |
-
data = np.zeros((3, HEIGHT, WIDTH), dtype=np.uint16)
|
| 51 |
-
# Semi-arid landscape: brownish-green
|
| 52 |
-
data[0] = rng.integers(800, 1500, (HEIGHT, WIDTH)) # Red
|
| 53 |
-
data[1] = rng.integers(700, 1300, (HEIGHT, WIDTH)) # Green
|
| 54 |
-
data[2] = rng.integers(500, 1000, (HEIGHT, WIDTH)) # Blue
|
| 55 |
-
|
| 56 |
-
with rasterio.open(
|
| 57 |
-
path, "w", driver="GTiff",
|
| 58 |
-
height=HEIGHT, width=WIDTH, count=3,
|
| 59 |
-
dtype="uint16", crs="EPSG:4326",
|
| 60 |
-
transform=_transform(), nodata=0,
|
| 61 |
-
) as dst:
|
| 62 |
-
for i in range(3):
|
| 63 |
-
dst.write(data[i], i + 1)
|
| 64 |
-
print(f"Created {path} ({os.path.getsize(path)} bytes)")
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
if __name__ == "__main__":
|
| 68 |
-
create_ndvi_monthly()
|
| 69 |
-
create_true_color()
|
| 70 |
-
print("Done.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Binary file (18.4 kB)
|
|
|
|
Binary file (2.65 kB)
|
|
|
|
@@ -1,103 +0,0 @@
|
|
| 1 |
-
"""Tests for auth middleware — get_current_user dependency."""
|
| 2 |
-
import time
|
| 3 |
-
import hashlib
|
| 4 |
-
import pytest
|
| 5 |
-
from httpx import AsyncClient, ASGITransport
|
| 6 |
-
from app.main import create_app
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
def _make_token(email: str) -> str:
|
| 10 |
-
"""Mirror the backend token generation for test fixtures."""
|
| 11 |
-
secret = "aperture-mvp-secret-change-in-production"
|
| 12 |
-
hour = int(time.time() // 3600)
|
| 13 |
-
payload = f"{email}:{hour}:{secret}"
|
| 14 |
-
return hashlib.sha256(payload.encode()).hexdigest()[:32]
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
@pytest.fixture
|
| 18 |
-
async def client(tmp_path):
|
| 19 |
-
app = create_app(db_path=str(tmp_path / "test.db"), run_worker=False)
|
| 20 |
-
transport = ASGITransport(app=app)
|
| 21 |
-
async with AsyncClient(transport=transport, base_url="http://test") as c:
|
| 22 |
-
yield c
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
@pytest.mark.asyncio
|
| 26 |
-
async def test_auth_header_valid(client):
|
| 27 |
-
email = "user@example.com"
|
| 28 |
-
token = _make_token(email)
|
| 29 |
-
resp = await client.get(
|
| 30 |
-
"/api/jobs",
|
| 31 |
-
headers={"Authorization": f"Bearer {email}:{token}"},
|
| 32 |
-
)
|
| 33 |
-
assert resp.status_code == 200
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
@pytest.mark.asyncio
|
| 37 |
-
async def test_auth_header_missing(client):
|
| 38 |
-
resp = await client.get("/api/jobs")
|
| 39 |
-
assert resp.status_code == 401
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
@pytest.mark.asyncio
|
| 43 |
-
async def test_auth_header_bad_token(client):
|
| 44 |
-
resp = await client.get(
|
| 45 |
-
"/api/jobs",
|
| 46 |
-
headers={"Authorization": "Bearer user@example.com:badtoken"},
|
| 47 |
-
)
|
| 48 |
-
assert resp.status_code == 401
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
@pytest.mark.asyncio
|
| 52 |
-
async def test_auth_header_malformed(client):
|
| 53 |
-
resp = await client.get(
|
| 54 |
-
"/api/jobs",
|
| 55 |
-
headers={"Authorization": "Bearer garbage"},
|
| 56 |
-
)
|
| 57 |
-
assert resp.status_code == 401
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
@pytest.mark.asyncio
|
| 61 |
-
async def test_full_auth_and_job_list_flow(client):
|
| 62 |
-
email = "flow@example.com"
|
| 63 |
-
|
| 64 |
-
# 1. Request magic link
|
| 65 |
-
resp = await client.post("/api/auth/request", json={"email": email})
|
| 66 |
-
assert resp.status_code == 200
|
| 67 |
-
demo_token = resp.json()["demo_token"]
|
| 68 |
-
|
| 69 |
-
# 2. Verify token
|
| 70 |
-
resp = await client.post(
|
| 71 |
-
"/api/auth/verify", json={"email": email, "token": demo_token}
|
| 72 |
-
)
|
| 73 |
-
assert resp.status_code == 200
|
| 74 |
-
assert resp.json()["verified"] is True
|
| 75 |
-
|
| 76 |
-
# 3. Submit a job with auth
|
| 77 |
-
headers = {"Authorization": f"Bearer {email}:{demo_token}"}
|
| 78 |
-
job_payload = {
|
| 79 |
-
"aoi": {"name": "Test Area", "bbox": [32.5, 15.5, 32.6, 15.6]},
|
| 80 |
-
"time_range": {"start": "2025-03-01", "end": "2026-03-01"},
|
| 81 |
-
"indicator_ids": ["fires"],
|
| 82 |
-
"email": email,
|
| 83 |
-
}
|
| 84 |
-
resp = await client.post("/api/jobs", json=job_payload, headers=headers)
|
| 85 |
-
assert resp.status_code == 201
|
| 86 |
-
job_id = resp.json()["id"]
|
| 87 |
-
|
| 88 |
-
# 4. List jobs — should see the one we created
|
| 89 |
-
resp = await client.get("/api/jobs", headers=headers)
|
| 90 |
-
assert resp.status_code == 200
|
| 91 |
-
jobs = resp.json()
|
| 92 |
-
assert len(jobs) == 1
|
| 93 |
-
assert jobs[0]["id"] == job_id
|
| 94 |
-
assert jobs[0]["aoi_name"] == "Test Area"
|
| 95 |
-
assert jobs[0]["indicator_count"] == 1
|
| 96 |
-
|
| 97 |
-
# 5. Other user sees nothing
|
| 98 |
-
other_email = "other@example.com"
|
| 99 |
-
other_token = _make_token(other_email)
|
| 100 |
-
other_headers = {"Authorization": f"Bearer {other_email}:{other_token}"}
|
| 101 |
-
resp = await client.get("/api/jobs", headers=other_headers)
|
| 102 |
-
assert resp.status_code == 200
|
| 103 |
-
assert resp.json() == []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
import pytest
|
| 2 |
-
from httpx import AsyncClient, ASGITransport
|
| 3 |
-
|
| 4 |
-
from app.main import create_app
|
| 5 |
-
from app.indicators import registry
|
| 6 |
-
from app.indicators.base import BaseIndicator
|
| 7 |
-
from app.models import AOI, TimeRange, IndicatorResult, StatusLevel, TrendDirection, ConfidenceLevel
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class StubIndicator(BaseIndicator):
|
| 11 |
-
id = "stub"
|
| 12 |
-
name = "Stub"
|
| 13 |
-
category = "test"
|
| 14 |
-
question = "Is this a stub?"
|
| 15 |
-
estimated_minutes = 1
|
| 16 |
-
|
| 17 |
-
async def process(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 18 |
-
raise NotImplementedError
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
@pytest.fixture
|
| 22 |
-
async def client(temp_db_path):
|
| 23 |
-
registry._indicators.clear()
|
| 24 |
-
registry.register(StubIndicator())
|
| 25 |
-
app = create_app(db_path=temp_db_path)
|
| 26 |
-
transport = ASGITransport(app=app)
|
| 27 |
-
async with AsyncClient(transport=transport, base_url="http://test") as c:
|
| 28 |
-
yield c
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
@pytest.mark.asyncio
|
| 32 |
-
async def test_list_indicators(client):
|
| 33 |
-
resp = await client.get("/api/indicators")
|
| 34 |
-
assert resp.status_code == 200
|
| 35 |
-
data = resp.json()
|
| 36 |
-
assert len(data) >= 1
|
| 37 |
-
assert data[0]["id"] == "stub"
|
| 38 |
-
assert data[0]["question"] == "Is this a stub?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,99 +0,0 @@
|
|
| 1 |
-
import time
|
| 2 |
-
import hashlib
|
| 3 |
-
|
| 4 |
-
import pytest
|
| 5 |
-
from httpx import AsyncClient, ASGITransport
|
| 6 |
-
|
| 7 |
-
from app.main import create_app
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
def _make_token(email: str) -> str:
|
| 11 |
-
secret = "aperture-mvp-secret-change-in-production"
|
| 12 |
-
hour = int(time.time() // 3600)
|
| 13 |
-
payload = f"{email}:{hour}:{secret}"
|
| 14 |
-
return hashlib.sha256(payload.encode()).hexdigest()[:32]
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def _auth_headers(email: str = "test@example.com") -> dict:
|
| 18 |
-
token = _make_token(email)
|
| 19 |
-
return {"Authorization": f"Bearer {email}:{token}"}
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
@pytest.fixture
|
| 23 |
-
async def client(temp_db_path):
|
| 24 |
-
app = create_app(db_path=temp_db_path)
|
| 25 |
-
transport = ASGITransport(app=app)
|
| 26 |
-
async with AsyncClient(transport=transport, base_url="http://test") as c:
|
| 27 |
-
yield c
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# --- Auth-required tests (new) ---
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
@pytest.mark.asyncio
|
| 34 |
-
async def test_submit_job_requires_auth(client, sample_job_request):
|
| 35 |
-
resp = await client.post("/api/jobs", json=sample_job_request.model_dump(mode="json"))
|
| 36 |
-
assert resp.status_code == 401
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
@pytest.mark.asyncio
|
| 40 |
-
async def test_get_job_requires_auth(client, sample_job_request):
|
| 41 |
-
# Create a job first (with auth)
|
| 42 |
-
resp = await client.post(
|
| 43 |
-
"/api/jobs",
|
| 44 |
-
json=sample_job_request.model_dump(mode="json"),
|
| 45 |
-
headers=_auth_headers(),
|
| 46 |
-
)
|
| 47 |
-
job_id = resp.json()["id"]
|
| 48 |
-
# Try to get without auth
|
| 49 |
-
resp = await client.get(f"/api/jobs/{job_id}")
|
| 50 |
-
assert resp.status_code == 401
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
@pytest.mark.asyncio
|
| 54 |
-
async def test_get_job_wrong_user(client, sample_job_request):
|
| 55 |
-
resp = await client.post(
|
| 56 |
-
"/api/jobs",
|
| 57 |
-
json=sample_job_request.model_dump(mode="json"),
|
| 58 |
-
headers=_auth_headers("test@example.com"),
|
| 59 |
-
)
|
| 60 |
-
job_id = resp.json()["id"]
|
| 61 |
-
resp = await client.get(
|
| 62 |
-
f"/api/jobs/{job_id}",
|
| 63 |
-
headers=_auth_headers("other@example.com"),
|
| 64 |
-
)
|
| 65 |
-
assert resp.status_code == 403
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
# --- Existing tests (updated with auth headers) ---
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
@pytest.mark.asyncio
|
| 72 |
-
async def test_submit_job(client, sample_job_request):
|
| 73 |
-
resp = await client.post(
|
| 74 |
-
"/api/jobs",
|
| 75 |
-
json=sample_job_request.model_dump(mode="json"),
|
| 76 |
-
headers=_auth_headers(),
|
| 77 |
-
)
|
| 78 |
-
assert resp.status_code == 201
|
| 79 |
-
data = resp.json()
|
| 80 |
-
assert "id" in data
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
@pytest.mark.asyncio
|
| 84 |
-
async def test_get_job(client, sample_job_request):
|
| 85 |
-
resp = await client.post(
|
| 86 |
-
"/api/jobs",
|
| 87 |
-
json=sample_job_request.model_dump(mode="json"),
|
| 88 |
-
headers=_auth_headers(),
|
| 89 |
-
)
|
| 90 |
-
job_id = resp.json()["id"]
|
| 91 |
-
resp = await client.get(f"/api/jobs/{job_id}", headers=_auth_headers())
|
| 92 |
-
assert resp.status_code == 200
|
| 93 |
-
assert resp.json()["id"] == job_id
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
@pytest.mark.asyncio
|
| 97 |
-
async def test_get_unknown_job_returns_404(client):
|
| 98 |
-
resp = await client.get("/api/jobs/nonexistent", headers=_auth_headers())
|
| 99 |
-
assert resp.status_code == 404
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,102 +0,0 @@
|
|
| 1 |
-
import pytest
|
| 2 |
-
import tempfile
|
| 3 |
-
import os
|
| 4 |
-
from app.outputs.charts import render_timeseries_chart
|
| 5 |
-
from app.models import StatusLevel, TrendDirection
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def test_render_timeseries_chart_creates_png():
|
| 9 |
-
chart_data = {
|
| 10 |
-
"dates": ["2025-01", "2025-02", "2025-03", "2025-04", "2025-05", "2025-06"],
|
| 11 |
-
"values": [2, 3, 1, 5, 4, 7],
|
| 12 |
-
}
|
| 13 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 14 |
-
out_path = os.path.join(tmpdir, "chart.png")
|
| 15 |
-
render_timeseries_chart(
|
| 16 |
-
chart_data=chart_data,
|
| 17 |
-
indicator_name="Active Fires",
|
| 18 |
-
status=StatusLevel.RED,
|
| 19 |
-
trend=TrendDirection.DETERIORATING,
|
| 20 |
-
output_path=out_path,
|
| 21 |
-
y_label="Fire events",
|
| 22 |
-
)
|
| 23 |
-
assert os.path.exists(out_path)
|
| 24 |
-
assert os.path.getsize(out_path) > 1000
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
def test_render_timeseries_chart_handles_empty_data():
|
| 28 |
-
chart_data = {"dates": [], "values": []}
|
| 29 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 30 |
-
out_path = os.path.join(tmpdir, "empty_chart.png")
|
| 31 |
-
render_timeseries_chart(
|
| 32 |
-
chart_data=chart_data,
|
| 33 |
-
indicator_name="Active Fires",
|
| 34 |
-
status=StatusLevel.GREEN,
|
| 35 |
-
trend=TrendDirection.STABLE,
|
| 36 |
-
output_path=out_path,
|
| 37 |
-
y_label="Fire events",
|
| 38 |
-
)
|
| 39 |
-
assert os.path.exists(out_path)
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
def test_render_timeseries_chart_with_monthly_baseline():
|
| 43 |
-
chart_data = {
|
| 44 |
-
"dates": ["2025-01", "2025-02", "2025-03", "2025-04", "2025-05", "2025-06"],
|
| 45 |
-
"values": [2, 3, 1, 5, 4, 7],
|
| 46 |
-
"baseline_mean": [3.0, 3.5, 2.5, 4.0, 4.5, 5.0],
|
| 47 |
-
"baseline_min": [1.0, 1.5, 0.5, 2.0, 2.5, 3.0],
|
| 48 |
-
"baseline_max": [5.0, 5.5, 4.5, 6.0, 6.5, 7.0],
|
| 49 |
-
}
|
| 50 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 51 |
-
out_path = os.path.join(tmpdir, "monthly_baseline_chart.png")
|
| 52 |
-
render_timeseries_chart(
|
| 53 |
-
chart_data=chart_data,
|
| 54 |
-
indicator_name="NDVI",
|
| 55 |
-
status=StatusLevel.GREEN,
|
| 56 |
-
trend=TrendDirection.IMPROVING,
|
| 57 |
-
output_path=out_path,
|
| 58 |
-
y_label="NDVI value",
|
| 59 |
-
)
|
| 60 |
-
assert os.path.exists(out_path)
|
| 61 |
-
assert os.path.getsize(out_path) > 1000
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def test_render_timeseries_chart_with_summary_baseline():
|
| 65 |
-
chart_data = {
|
| 66 |
-
"dates": ["2025-01", "2025-02", "2025-03", "2025-04", "2025-05", "2025-06"],
|
| 67 |
-
"values": [2, 3, 1, 5, 4, 7],
|
| 68 |
-
"baseline_range_mean": 4.0,
|
| 69 |
-
"baseline_range_min": 2.0,
|
| 70 |
-
"baseline_range_max": 6.0,
|
| 71 |
-
}
|
| 72 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 73 |
-
out_path = os.path.join(tmpdir, "summary_baseline_chart.png")
|
| 74 |
-
render_timeseries_chart(
|
| 75 |
-
chart_data=chart_data,
|
| 76 |
-
indicator_name="NDVI",
|
| 77 |
-
status=StatusLevel.AMBER,
|
| 78 |
-
trend=TrendDirection.STABLE,
|
| 79 |
-
output_path=out_path,
|
| 80 |
-
y_label="NDVI value",
|
| 81 |
-
)
|
| 82 |
-
assert os.path.exists(out_path)
|
| 83 |
-
assert os.path.getsize(out_path) > 1000
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def test_render_timeseries_chart_no_baseline_still_works():
|
| 87 |
-
chart_data = {
|
| 88 |
-
"dates": ["2025-01", "2025-02", "2025-03", "2025-04", "2025-05", "2025-06"],
|
| 89 |
-
"values": [2, 3, 1, 5, 4, 7],
|
| 90 |
-
}
|
| 91 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 92 |
-
out_path = os.path.join(tmpdir, "no_baseline_chart.png")
|
| 93 |
-
render_timeseries_chart(
|
| 94 |
-
chart_data=chart_data,
|
| 95 |
-
indicator_name="NDVI",
|
| 96 |
-
status=StatusLevel.RED,
|
| 97 |
-
trend=TrendDirection.DETERIORATING,
|
| 98 |
-
output_path=out_path,
|
| 99 |
-
y_label="NDVI value",
|
| 100 |
-
)
|
| 101 |
-
assert os.path.exists(out_path)
|
| 102 |
-
assert os.path.getsize(out_path) > 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,34 +0,0 @@
|
|
| 1 |
-
"""Tests for app.config — centralized configuration."""
|
| 2 |
-
import os
|
| 3 |
-
import pytest
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def test_default_resolution():
|
| 7 |
-
"""Default resolution is 100m (free-tier HF Space)."""
|
| 8 |
-
from app.config import RESOLUTION_M
|
| 9 |
-
assert RESOLUTION_M == 100
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
def test_default_max_aoi():
|
| 13 |
-
"""Default max AOI is 500 km²."""
|
| 14 |
-
from app.config import MAX_AOI_KM2
|
| 15 |
-
assert MAX_AOI_KM2 == 500
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def test_default_openeo_backend():
|
| 19 |
-
"""Default openEO backend is CDSE."""
|
| 20 |
-
from app.config import OPENEO_BACKEND
|
| 21 |
-
assert OPENEO_BACKEND == "openeo.dataspace.copernicus.eu"
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
def test_resolution_env_override(monkeypatch):
|
| 25 |
-
"""APERTURE_RESOLUTION_M env var overrides the default."""
|
| 26 |
-
monkeypatch.setenv("APERTURE_RESOLUTION_M", "20")
|
| 27 |
-
# Force reimport to pick up env var
|
| 28 |
-
import importlib
|
| 29 |
-
import app.config
|
| 30 |
-
importlib.reload(app.config)
|
| 31 |
-
assert app.config.RESOLUTION_M == 20
|
| 32 |
-
# Reset
|
| 33 |
-
monkeypatch.delenv("APERTURE_RESOLUTION_M", raising=False)
|
| 34 |
-
importlib.reload(app.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,107 +0,0 @@
|
|
| 1 |
-
import aiosqlite
|
| 2 |
-
import pytest
|
| 3 |
-
from app.database import Database
|
| 4 |
-
from app.models import JobStatus
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
@pytest.mark.asyncio
|
| 8 |
-
async def test_create_and_get_job(temp_db_path, sample_job_request):
|
| 9 |
-
db = Database(temp_db_path)
|
| 10 |
-
await db.init()
|
| 11 |
-
|
| 12 |
-
job_id = await db.create_job(sample_job_request)
|
| 13 |
-
assert isinstance(job_id, str)
|
| 14 |
-
assert len(job_id) > 0
|
| 15 |
-
|
| 16 |
-
job = await db.get_job(job_id)
|
| 17 |
-
assert job.id == job_id
|
| 18 |
-
assert job.status == JobStatus.QUEUED
|
| 19 |
-
assert job.request.aoi.name == "Khartoum North"
|
| 20 |
-
assert job.request.indicator_ids == ["fires", "cropland"]
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
@pytest.mark.asyncio
|
| 24 |
-
async def test_update_job_status(temp_db_path, sample_job_request):
|
| 25 |
-
db = Database(temp_db_path)
|
| 26 |
-
await db.init()
|
| 27 |
-
|
| 28 |
-
job_id = await db.create_job(sample_job_request)
|
| 29 |
-
await db.update_job_status(job_id, JobStatus.PROCESSING)
|
| 30 |
-
|
| 31 |
-
job = await db.get_job(job_id)
|
| 32 |
-
assert job.status == JobStatus.PROCESSING
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
@pytest.mark.asyncio
|
| 36 |
-
async def test_update_job_progress(temp_db_path, sample_job_request):
|
| 37 |
-
db = Database(temp_db_path)
|
| 38 |
-
await db.init()
|
| 39 |
-
|
| 40 |
-
job_id = await db.create_job(sample_job_request)
|
| 41 |
-
await db.update_job_progress(job_id, "fires", "complete")
|
| 42 |
-
|
| 43 |
-
job = await db.get_job(job_id)
|
| 44 |
-
assert job.progress["fires"] == "complete"
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
@pytest.mark.asyncio
|
| 48 |
-
async def test_get_next_queued_job(temp_db_path, sample_job_request):
|
| 49 |
-
db = Database(temp_db_path)
|
| 50 |
-
await db.init()
|
| 51 |
-
|
| 52 |
-
job_id = await db.create_job(sample_job_request)
|
| 53 |
-
next_job = await db.get_next_queued_job()
|
| 54 |
-
assert next_job is not None
|
| 55 |
-
assert next_job.id == job_id
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
@pytest.mark.asyncio
|
| 59 |
-
async def test_get_next_queued_returns_none_when_empty(temp_db_path):
|
| 60 |
-
db = Database(temp_db_path)
|
| 61 |
-
await db.init()
|
| 62 |
-
|
| 63 |
-
next_job = await db.get_next_queued_job()
|
| 64 |
-
assert next_job is None
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
@pytest.mark.asyncio
|
| 68 |
-
async def test_get_unknown_job_returns_none(temp_db_path):
|
| 69 |
-
db = Database(temp_db_path)
|
| 70 |
-
await db.init()
|
| 71 |
-
|
| 72 |
-
job = await db.get_job("nonexistent-id")
|
| 73 |
-
assert job is None
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
@pytest.mark.asyncio
|
| 77 |
-
async def test_create_job_stores_email(temp_db_path, sample_job_request):
|
| 78 |
-
db = Database(temp_db_path)
|
| 79 |
-
await db.init()
|
| 80 |
-
job_id = await db.create_job(sample_job_request)
|
| 81 |
-
# Read raw row to confirm email column
|
| 82 |
-
async with aiosqlite.connect(temp_db_path) as conn:
|
| 83 |
-
cur = await conn.execute("SELECT email FROM jobs WHERE id = ?", (job_id,))
|
| 84 |
-
row = await cur.fetchone()
|
| 85 |
-
assert row[0] == "test@example.com"
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
@pytest.mark.asyncio
|
| 89 |
-
async def test_get_jobs_by_email(temp_db_path, sample_job_request):
|
| 90 |
-
db = Database(temp_db_path)
|
| 91 |
-
await db.init()
|
| 92 |
-
id1 = await db.create_job(sample_job_request)
|
| 93 |
-
id2 = await db.create_job(sample_job_request)
|
| 94 |
-
jobs = await db.get_jobs_by_email("test@example.com")
|
| 95 |
-
assert len(jobs) == 2
|
| 96 |
-
# Most recent first
|
| 97 |
-
assert jobs[0].id == id2
|
| 98 |
-
assert jobs[1].id == id1
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
@pytest.mark.asyncio
|
| 102 |
-
async def test_get_jobs_by_email_filters(temp_db_path, sample_job_request):
|
| 103 |
-
db = Database(temp_db_path)
|
| 104 |
-
await db.init()
|
| 105 |
-
await db.create_job(sample_job_request)
|
| 106 |
-
jobs = await db.get_jobs_by_email("other@example.com")
|
| 107 |
-
assert len(jobs) == 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,173 +0,0 @@
|
|
| 1 |
-
"""Tests for BaseIndicator batch interface."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import pytest
|
| 5 |
-
from datetime import date
|
| 6 |
-
from unittest.mock import MagicMock
|
| 7 |
-
from app.indicators.base import BaseIndicator, IndicatorRegistry
|
| 8 |
-
from app.models import AOI, TimeRange, IndicatorResult, StatusLevel, TrendDirection, ConfidenceLevel
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class FakeIndicator(BaseIndicator):
|
| 12 |
-
id = "fake"
|
| 13 |
-
name = "Fake Indicator"
|
| 14 |
-
category = "test"
|
| 15 |
-
question = "Is this a test?"
|
| 16 |
-
estimated_minutes = 1
|
| 17 |
-
|
| 18 |
-
async def process(self, aoi: AOI, time_range: TimeRange) -> IndicatorResult:
|
| 19 |
-
return IndicatorResult(
|
| 20 |
-
indicator_id=self.id,
|
| 21 |
-
headline="Test headline",
|
| 22 |
-
status=StatusLevel.GREEN,
|
| 23 |
-
trend=TrendDirection.STABLE,
|
| 24 |
-
confidence=ConfidenceLevel.HIGH,
|
| 25 |
-
map_layer_path="/tmp/fake.tif",
|
| 26 |
-
chart_data={"dates": [], "values": []},
|
| 27 |
-
summary="Test summary.",
|
| 28 |
-
methodology="Test methodology.",
|
| 29 |
-
limitations=[],
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def test_base_indicator_meta():
|
| 34 |
-
ind = FakeIndicator()
|
| 35 |
-
meta = ind.meta()
|
| 36 |
-
assert meta.id == "fake"
|
| 37 |
-
assert meta.name == "Fake Indicator"
|
| 38 |
-
assert meta.estimated_minutes == 1
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
@pytest.mark.asyncio
|
| 42 |
-
async def test_base_indicator_process():
|
| 43 |
-
ind = FakeIndicator()
|
| 44 |
-
aoi = AOI(name="Test", bbox=[32.45, 15.65, 32.65, 15.80])
|
| 45 |
-
tr = TimeRange()
|
| 46 |
-
result = await ind.process(aoi, tr)
|
| 47 |
-
assert result.indicator_id == "fake"
|
| 48 |
-
assert result.status == StatusLevel.GREEN
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
def test_registry_register_and_get():
|
| 52 |
-
registry = IndicatorRegistry()
|
| 53 |
-
ind = FakeIndicator()
|
| 54 |
-
registry.register(ind)
|
| 55 |
-
assert registry.get("fake") is ind
|
| 56 |
-
assert "fake" in registry.list_ids()
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def test_registry_get_unknown_raises():
|
| 60 |
-
registry = IndicatorRegistry()
|
| 61 |
-
with pytest.raises(KeyError, match="nonexistent"):
|
| 62 |
-
registry.get("nonexistent")
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
def test_registry_catalogue():
|
| 66 |
-
registry = IndicatorRegistry()
|
| 67 |
-
registry.register(FakeIndicator())
|
| 68 |
-
catalogue = registry.catalogue()
|
| 69 |
-
assert len(catalogue) == 1
|
| 70 |
-
assert catalogue[0].id == "fake"
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
# --- Batch interface tests ---
|
| 74 |
-
|
| 75 |
-
class PlainIndicator(BaseIndicator):
|
| 76 |
-
"""Non-batch indicator for testing."""
|
| 77 |
-
id = "plain"
|
| 78 |
-
name = "Plain"
|
| 79 |
-
category = "T1"
|
| 80 |
-
question = "Test?"
|
| 81 |
-
estimated_minutes = 1
|
| 82 |
-
|
| 83 |
-
async def process(self, aoi, time_range, season_months=None):
|
| 84 |
-
return IndicatorResult(
|
| 85 |
-
indicator_id="plain", headline="ok",
|
| 86 |
-
status=StatusLevel.GREEN, trend=TrendDirection.STABLE,
|
| 87 |
-
confidence=ConfidenceLevel.HIGH, map_layer_path="",
|
| 88 |
-
chart_data={}, summary="", methodology="", limitations=[],
|
| 89 |
-
)
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
class BatchIndicator(BaseIndicator):
|
| 93 |
-
"""Batch indicator for testing."""
|
| 94 |
-
id = "batch"
|
| 95 |
-
name = "Batch"
|
| 96 |
-
category = "T2"
|
| 97 |
-
question = "Batch test?"
|
| 98 |
-
estimated_minutes = 5
|
| 99 |
-
uses_batch = True
|
| 100 |
-
|
| 101 |
-
async def process(self, aoi, time_range, season_months=None):
|
| 102 |
-
return IndicatorResult(
|
| 103 |
-
indicator_id="batch", headline="fallback",
|
| 104 |
-
status=StatusLevel.GREEN, trend=TrendDirection.STABLE,
|
| 105 |
-
confidence=ConfidenceLevel.LOW, map_layer_path="",
|
| 106 |
-
chart_data={}, data_source="placeholder",
|
| 107 |
-
summary="", methodology="", limitations=[],
|
| 108 |
-
)
|
| 109 |
-
|
| 110 |
-
async def submit_batch(self, aoi, time_range, season_months=None):
|
| 111 |
-
return [MagicMock()]
|
| 112 |
-
|
| 113 |
-
async def harvest(self, aoi, time_range, season_months=None, batch_jobs=None):
|
| 114 |
-
return IndicatorResult(
|
| 115 |
-
indicator_id="batch", headline="harvested",
|
| 116 |
-
status=StatusLevel.GREEN, trend=TrendDirection.STABLE,
|
| 117 |
-
confidence=ConfidenceLevel.HIGH, map_layer_path="",
|
| 118 |
-
chart_data={}, data_source="satellite",
|
| 119 |
-
summary="", methodology="", limitations=[],
|
| 120 |
-
)
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
def test_plain_indicator_uses_batch_is_false():
|
| 124 |
-
ind = PlainIndicator()
|
| 125 |
-
assert ind.uses_batch is False
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
def test_batch_indicator_uses_batch_is_true():
|
| 129 |
-
ind = BatchIndicator()
|
| 130 |
-
assert ind.uses_batch is True
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
@pytest.mark.asyncio
|
| 134 |
-
async def test_plain_indicator_submit_batch_raises():
|
| 135 |
-
ind = PlainIndicator()
|
| 136 |
-
with pytest.raises(NotImplementedError):
|
| 137 |
-
await ind.submit_batch(
|
| 138 |
-
AOI(name="T", bbox=[32.0, 15.0, 32.1, 15.1]),
|
| 139 |
-
TimeRange(start=date(2025, 1, 1), end=date(2025, 6, 30)),
|
| 140 |
-
)
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
@pytest.mark.asyncio
|
| 144 |
-
async def test_plain_indicator_harvest_raises():
|
| 145 |
-
ind = PlainIndicator()
|
| 146 |
-
with pytest.raises(NotImplementedError):
|
| 147 |
-
await ind.harvest(
|
| 148 |
-
AOI(name="T", bbox=[32.0, 15.0, 32.1, 15.1]),
|
| 149 |
-
TimeRange(start=date(2025, 1, 1), end=date(2025, 6, 30)),
|
| 150 |
-
batch_jobs=[],
|
| 151 |
-
)
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
@pytest.mark.asyncio
|
| 155 |
-
async def test_batch_indicator_submit_returns_jobs():
|
| 156 |
-
ind = BatchIndicator()
|
| 157 |
-
jobs = await ind.submit_batch(
|
| 158 |
-
AOI(name="T", bbox=[32.0, 15.0, 32.1, 15.1]),
|
| 159 |
-
TimeRange(start=date(2025, 1, 1), end=date(2025, 6, 30)),
|
| 160 |
-
)
|
| 161 |
-
assert len(jobs) == 1
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
@pytest.mark.asyncio
|
| 165 |
-
async def test_batch_indicator_harvest_returns_result():
|
| 166 |
-
ind = BatchIndicator()
|
| 167 |
-
result = await ind.harvest(
|
| 168 |
-
AOI(name="T", bbox=[32.0, 15.0, 32.1, 15.1]),
|
| 169 |
-
TimeRange(start=date(2025, 1, 1), end=date(2025, 6, 30)),
|
| 170 |
-
batch_jobs=[MagicMock()],
|
| 171 |
-
)
|
| 172 |
-
assert result.data_source == "satellite"
|
| 173 |
-
assert result.headline == "harvested"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,226 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.buildup — built-up extent via NDBI from openEO."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, TrendDirection, ConfidenceLevel
|
| 15 |
-
from tests.conftest import mock_rgb_tif, make_mock_batch_job
|
| 16 |
-
|
| 17 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
@pytest.fixture
|
| 21 |
-
def test_aoi():
|
| 22 |
-
return AOI(name="Test", bbox=BBOX)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
@pytest.fixture
|
| 26 |
-
def test_time_range():
|
| 27 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def _mock_ndbi_tif(path: str, n_months: int = 12, buildup_fraction: float = 0.15):
|
| 31 |
-
"""Create synthetic NDBI GeoTIFF. Values > 0 with low NDVI = built-up."""
|
| 32 |
-
rng = np.random.default_rng(55)
|
| 33 |
-
data = np.zeros((n_months, 10, 10), dtype=np.float32)
|
| 34 |
-
for m in range(n_months):
|
| 35 |
-
vals = rng.normal(-0.15, 0.2, (10, 10))
|
| 36 |
-
buildup_mask = rng.random((10, 10)) < buildup_fraction
|
| 37 |
-
vals[buildup_mask] = rng.uniform(0.05, 0.4, buildup_mask.sum())
|
| 38 |
-
data[m] = vals
|
| 39 |
-
with rasterio.open(
|
| 40 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_months,
|
| 41 |
-
dtype="float32", crs="EPSG:4326",
|
| 42 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 43 |
-
) as dst:
|
| 44 |
-
for i in range(n_months):
|
| 45 |
-
dst.write(data[i], i + 1)
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
@pytest.mark.asyncio
|
| 49 |
-
async def test_buildup_process_returns_result(test_aoi, test_time_range):
|
| 50 |
-
"""BuiltupIndicator.process() returns a valid IndicatorResult."""
|
| 51 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 52 |
-
|
| 53 |
-
indicator = BuiltupIndicator()
|
| 54 |
-
|
| 55 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 56 |
-
ndbi_path = os.path.join(tmpdir, "ndbi.tif")
|
| 57 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 58 |
-
_mock_ndbi_tif(ndbi_path)
|
| 59 |
-
mock_rgb_tif(rgb_path)
|
| 60 |
-
|
| 61 |
-
mock_cube = MagicMock()
|
| 62 |
-
|
| 63 |
-
def fake_download(path, **kwargs):
|
| 64 |
-
import shutil
|
| 65 |
-
if "ndbi" in path or "buildup" in path:
|
| 66 |
-
shutil.copy(ndbi_path, path)
|
| 67 |
-
else:
|
| 68 |
-
shutil.copy(rgb_path, path)
|
| 69 |
-
|
| 70 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 71 |
-
|
| 72 |
-
with patch("app.indicators.buildup.get_connection"), \
|
| 73 |
-
patch("app.indicators.buildup.build_buildup_graph", return_value=mock_cube), \
|
| 74 |
-
patch("app.indicators.buildup.build_true_color_graph", return_value=mock_cube):
|
| 75 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 76 |
-
|
| 77 |
-
assert result.indicator_id == "buildup"
|
| 78 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 79 |
-
assert result.data_source == "satellite"
|
| 80 |
-
assert "NDBI" in result.methodology or "built-up" in result.methodology.lower()
|
| 81 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
@pytest.mark.asyncio
|
| 85 |
-
async def test_buildup_falls_back_on_failure(test_aoi, test_time_range):
|
| 86 |
-
"""BuiltupIndicator falls back gracefully when openEO fails."""
|
| 87 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 88 |
-
|
| 89 |
-
indicator = BuiltupIndicator()
|
| 90 |
-
|
| 91 |
-
with patch("app.indicators.buildup.get_connection", side_effect=Exception("CDSE down")):
|
| 92 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 93 |
-
|
| 94 |
-
assert result.indicator_id == "buildup"
|
| 95 |
-
assert result.data_source == "placeholder"
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
def test_buildup_compute_stats():
|
| 99 |
-
"""_compute_stats() extracts built-up fraction from NDBI raster."""
|
| 100 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 101 |
-
|
| 102 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 103 |
-
path = os.path.join(tmpdir, "ndbi.tif")
|
| 104 |
-
_mock_ndbi_tif(path, n_months=12, buildup_fraction=0.2)
|
| 105 |
-
stats = BuiltupIndicator._compute_stats(path)
|
| 106 |
-
|
| 107 |
-
assert "monthly_buildup_fractions" in stats
|
| 108 |
-
assert len(stats["monthly_buildup_fractions"]) == 12
|
| 109 |
-
assert "overall_buildup_fraction" in stats
|
| 110 |
-
assert 0 < stats["overall_buildup_fraction"] < 1
|
| 111 |
-
assert "valid_months" in stats
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def test_buildup_classify():
|
| 115 |
-
"""_classify() maps change percentage to correct status."""
|
| 116 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 117 |
-
|
| 118 |
-
assert BuiltupIndicator._classify(change_pct=5.0) == StatusLevel.GREEN
|
| 119 |
-
assert BuiltupIndicator._classify(change_pct=15.0) == StatusLevel.AMBER
|
| 120 |
-
assert BuiltupIndicator._classify(change_pct=35.0) == StatusLevel.RED
|
| 121 |
-
assert BuiltupIndicator._classify(change_pct=-25.0) == StatusLevel.AMBER
|
| 122 |
-
assert BuiltupIndicator._classify(change_pct=-35.0) == StatusLevel.RED
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
@pytest.mark.asyncio
|
| 126 |
-
async def test_buildup_submit_batch_creates_three_jobs(test_aoi, test_time_range):
|
| 127 |
-
"""submit_batch() creates current, baseline, and true-color batch jobs."""
|
| 128 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 129 |
-
|
| 130 |
-
indicator = BuiltupIndicator()
|
| 131 |
-
|
| 132 |
-
mock_conn = MagicMock()
|
| 133 |
-
mock_job = MagicMock()
|
| 134 |
-
mock_job.job_id = "j-test"
|
| 135 |
-
mock_conn.create_job.return_value = mock_job
|
| 136 |
-
|
| 137 |
-
with patch("app.indicators.buildup.get_connection", return_value=mock_conn), \
|
| 138 |
-
patch("app.indicators.buildup.build_buildup_graph") as mock_bu_graph, \
|
| 139 |
-
patch("app.indicators.buildup.build_true_color_graph") as mock_tc_graph:
|
| 140 |
-
|
| 141 |
-
mock_bu_graph.return_value = MagicMock()
|
| 142 |
-
mock_tc_graph.return_value = MagicMock()
|
| 143 |
-
|
| 144 |
-
jobs = await indicator.submit_batch(test_aoi, test_time_range)
|
| 145 |
-
|
| 146 |
-
assert len(jobs) == 3
|
| 147 |
-
assert mock_bu_graph.call_count == 2
|
| 148 |
-
assert mock_tc_graph.call_count == 1
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
@pytest.mark.asyncio
|
| 152 |
-
async def test_buildup_harvest_computes_result_from_batch_jobs(test_aoi, test_time_range):
|
| 153 |
-
"""harvest() downloads batch results and returns IndicatorResult."""
|
| 154 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 155 |
-
|
| 156 |
-
indicator = BuiltupIndicator()
|
| 157 |
-
|
| 158 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 159 |
-
ndbi_path = os.path.join(tmpdir, "ndbi.tif")
|
| 160 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 161 |
-
_mock_ndbi_tif(ndbi_path)
|
| 162 |
-
mock_rgb_tif(rgb_path)
|
| 163 |
-
|
| 164 |
-
current_job = make_mock_batch_job(ndbi_path)
|
| 165 |
-
baseline_job = make_mock_batch_job(ndbi_path)
|
| 166 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 167 |
-
|
| 168 |
-
result = await indicator.harvest(
|
| 169 |
-
test_aoi, test_time_range,
|
| 170 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 171 |
-
)
|
| 172 |
-
|
| 173 |
-
assert result.indicator_id == "buildup"
|
| 174 |
-
assert result.data_source == "satellite"
|
| 175 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 176 |
-
assert result.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.MODERATE, ConfidenceLevel.LOW)
|
| 177 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 178 |
-
assert "baseline_mean" in result.chart_data
|
| 179 |
-
assert len(result.chart_data["baseline_mean"]) > 0
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
@pytest.mark.asyncio
|
| 183 |
-
async def test_buildup_harvest_falls_back_when_current_fails(test_aoi, test_time_range):
|
| 184 |
-
"""harvest() returns placeholder when current NDBI job failed."""
|
| 185 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 186 |
-
|
| 187 |
-
indicator = BuiltupIndicator()
|
| 188 |
-
|
| 189 |
-
current_job = MagicMock()
|
| 190 |
-
current_job.download_results.side_effect = Exception("failed")
|
| 191 |
-
baseline_job = MagicMock()
|
| 192 |
-
true_color_job = MagicMock()
|
| 193 |
-
|
| 194 |
-
result = await indicator.harvest(
|
| 195 |
-
test_aoi, test_time_range,
|
| 196 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 197 |
-
)
|
| 198 |
-
|
| 199 |
-
assert result.data_source == "placeholder"
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
@pytest.mark.asyncio
|
| 203 |
-
async def test_buildup_harvest_degrades_when_baseline_fails(test_aoi, test_time_range):
|
| 204 |
-
"""harvest() returns degraded result when baseline NDBI job failed."""
|
| 205 |
-
from app.indicators.buildup import BuiltupIndicator
|
| 206 |
-
|
| 207 |
-
indicator = BuiltupIndicator()
|
| 208 |
-
|
| 209 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 210 |
-
ndbi_path = os.path.join(tmpdir, "ndbi.tif")
|
| 211 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 212 |
-
_mock_ndbi_tif(ndbi_path)
|
| 213 |
-
mock_rgb_tif(rgb_path)
|
| 214 |
-
|
| 215 |
-
current_job = make_mock_batch_job(ndbi_path)
|
| 216 |
-
baseline_job = make_mock_batch_job(ndbi_path, fail=True)
|
| 217 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 218 |
-
|
| 219 |
-
result = await indicator.harvest(
|
| 220 |
-
test_aoi, test_time_range,
|
| 221 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 222 |
-
)
|
| 223 |
-
|
| 224 |
-
assert result.indicator_id == "buildup"
|
| 225 |
-
assert result.data_source == "satellite"
|
| 226 |
-
assert result.confidence == ConfidenceLevel.LOW
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,227 +0,0 @@
|
|
| 1 |
-
"""Tests for the D1 Cropland Productivity indicator."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import pytest
|
| 5 |
-
from datetime import date
|
| 6 |
-
from unittest.mock import AsyncMock, patch
|
| 7 |
-
|
| 8 |
-
from app.indicators.cropland import CroplandIndicator
|
| 9 |
-
from app.models import AOI, TimeRange, StatusLevel, TrendDirection, ConfidenceLevel
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
@pytest.fixture
|
| 13 |
-
def cropland_indicator():
|
| 14 |
-
return CroplandIndicator()
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
@pytest.fixture
|
| 18 |
-
def sample_aoi():
|
| 19 |
-
return AOI(name="Khartoum Test", bbox=[32.45, 15.65, 32.65, 15.80])
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
@pytest.fixture
|
| 23 |
-
def sample_time_range():
|
| 24 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
# ---------------------------------------------------------------------------
|
| 28 |
-
# Meta
|
| 29 |
-
# ---------------------------------------------------------------------------
|
| 30 |
-
|
| 31 |
-
def test_cropland_meta(cropland_indicator):
|
| 32 |
-
meta = cropland_indicator.meta()
|
| 33 |
-
assert meta.id == "cropland"
|
| 34 |
-
assert meta.category == "D1"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
# ---------------------------------------------------------------------------
|
| 38 |
-
# Red status: severe decline (> 15pp drop)
|
| 39 |
-
# ---------------------------------------------------------------------------
|
| 40 |
-
|
| 41 |
-
@pytest.mark.asyncio
|
| 42 |
-
async def test_cropland_red_when_severe_decline(
|
| 43 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 44 |
-
):
|
| 45 |
-
with patch.object(
|
| 46 |
-
cropland_indicator,
|
| 47 |
-
"_fetch_comparison",
|
| 48 |
-
new=AsyncMock(return_value=(45.0, 25.0, 6)), # -20pp
|
| 49 |
-
):
|
| 50 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 51 |
-
|
| 52 |
-
assert result.indicator_id == "cropland"
|
| 53 |
-
assert result.status == StatusLevel.RED
|
| 54 |
-
assert result.trend == TrendDirection.DETERIORATING
|
| 55 |
-
assert "abandonment" in result.headline.lower()
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
# ---------------------------------------------------------------------------
|
| 59 |
-
# Amber status: moderate decline (5-15pp drop)
|
| 60 |
-
# ---------------------------------------------------------------------------
|
| 61 |
-
|
| 62 |
-
@pytest.mark.asyncio
|
| 63 |
-
async def test_cropland_amber_when_moderate_decline(
|
| 64 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 65 |
-
):
|
| 66 |
-
with patch.object(
|
| 67 |
-
cropland_indicator,
|
| 68 |
-
"_fetch_comparison",
|
| 69 |
-
new=AsyncMock(return_value=(45.0, 35.0, 6)), # -10pp
|
| 70 |
-
):
|
| 71 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 72 |
-
|
| 73 |
-
assert result.status == StatusLevel.AMBER
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
# ---------------------------------------------------------------------------
|
| 77 |
-
# Green status: stable (within ±5pp)
|
| 78 |
-
# ---------------------------------------------------------------------------
|
| 79 |
-
|
| 80 |
-
@pytest.mark.asyncio
|
| 81 |
-
async def test_cropland_green_when_stable(
|
| 82 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 83 |
-
):
|
| 84 |
-
with patch.object(
|
| 85 |
-
cropland_indicator,
|
| 86 |
-
"_fetch_comparison",
|
| 87 |
-
new=AsyncMock(return_value=(45.0, 43.0, 6)), # -2pp
|
| 88 |
-
):
|
| 89 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 90 |
-
|
| 91 |
-
assert result.status == StatusLevel.GREEN
|
| 92 |
-
assert result.trend == TrendDirection.STABLE
|
| 93 |
-
assert "stable" in result.headline.lower()
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# ---------------------------------------------------------------------------
|
| 97 |
-
# Confidence based on n_months
|
| 98 |
-
# ---------------------------------------------------------------------------
|
| 99 |
-
|
| 100 |
-
@pytest.mark.asyncio
|
| 101 |
-
async def test_cropland_low_confidence_with_few_months(
|
| 102 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 103 |
-
):
|
| 104 |
-
with patch.object(
|
| 105 |
-
cropland_indicator,
|
| 106 |
-
"_fetch_comparison",
|
| 107 |
-
new=AsyncMock(return_value=(45.0, 43.0, 2)), # only 2 months
|
| 108 |
-
):
|
| 109 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 110 |
-
|
| 111 |
-
assert result.confidence == ConfidenceLevel.LOW
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
@pytest.mark.asyncio
|
| 115 |
-
async def test_cropland_high_confidence_with_many_months(
|
| 116 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 117 |
-
):
|
| 118 |
-
with patch.object(
|
| 119 |
-
cropland_indicator,
|
| 120 |
-
"_fetch_comparison",
|
| 121 |
-
new=AsyncMock(return_value=(45.0, 43.0, 6)),
|
| 122 |
-
):
|
| 123 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 124 |
-
|
| 125 |
-
assert result.confidence == ConfidenceLevel.HIGH
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
# ---------------------------------------------------------------------------
|
| 129 |
-
# Result has required fields
|
| 130 |
-
# ---------------------------------------------------------------------------
|
| 131 |
-
|
| 132 |
-
@pytest.mark.asyncio
|
| 133 |
-
async def test_cropland_result_has_all_fields(
|
| 134 |
-
cropland_indicator, sample_aoi, sample_time_range
|
| 135 |
-
):
|
| 136 |
-
with patch.object(
|
| 137 |
-
cropland_indicator,
|
| 138 |
-
"_fetch_comparison",
|
| 139 |
-
new=AsyncMock(return_value=(45.0, 40.0, 5)),
|
| 140 |
-
):
|
| 141 |
-
result = await cropland_indicator.process(sample_aoi, sample_time_range)
|
| 142 |
-
|
| 143 |
-
assert result.indicator_id == "cropland"
|
| 144 |
-
assert isinstance(result.headline, str) and result.headline
|
| 145 |
-
assert isinstance(result.summary, str) and result.summary
|
| 146 |
-
assert isinstance(result.methodology, str) and result.methodology
|
| 147 |
-
assert isinstance(result.limitations, list) and result.limitations
|
| 148 |
-
assert "dates" in result.chart_data
|
| 149 |
-
assert "values" in result.chart_data
|
| 150 |
-
assert "growing season" in result.methodology.lower()
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
# ---------------------------------------------------------------------------
|
| 154 |
-
# Classify boundaries
|
| 155 |
-
# ---------------------------------------------------------------------------
|
| 156 |
-
|
| 157 |
-
def test_classify_boundary():
|
| 158 |
-
ind = CroplandIndicator()
|
| 159 |
-
assert ind._classify(0) == StatusLevel.GREEN # no change
|
| 160 |
-
assert ind._classify(-5) == StatusLevel.GREEN # boundary
|
| 161 |
-
assert ind._classify(-5.1) == StatusLevel.AMBER
|
| 162 |
-
assert ind._classify(-15) == StatusLevel.AMBER # boundary
|
| 163 |
-
assert ind._classify(-15.1) == StatusLevel.RED
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
# ---------------------------------------------------------------------------
|
| 167 |
-
# Baseline range in chart data
|
| 168 |
-
# ---------------------------------------------------------------------------
|
| 169 |
-
|
| 170 |
-
def test_build_chart_data_includes_baseline_range():
|
| 171 |
-
from app.indicators.cropland import CroplandIndicator
|
| 172 |
-
from datetime import date
|
| 173 |
-
from app.models import TimeRange
|
| 174 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 175 |
-
result = CroplandIndicator._build_chart_data(
|
| 176 |
-
baseline=40.0, current=42.0, time_range=tr,
|
| 177 |
-
baseline_yearly_means=[38.0, 40.0, 42.0],
|
| 178 |
-
)
|
| 179 |
-
assert "baseline_range_mean" in result
|
| 180 |
-
assert "baseline_range_min" in result
|
| 181 |
-
assert "baseline_range_max" in result
|
| 182 |
-
assert result["baseline_range_min"] == 38.0
|
| 183 |
-
assert result["baseline_range_max"] == 42.0
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
def test_build_chart_data_no_baseline_range_when_absent():
|
| 187 |
-
from app.indicators.cropland import CroplandIndicator
|
| 188 |
-
from datetime import date
|
| 189 |
-
from app.models import TimeRange
|
| 190 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 191 |
-
result = CroplandIndicator._build_chart_data(
|
| 192 |
-
baseline=40.0, current=42.0, time_range=tr,
|
| 193 |
-
)
|
| 194 |
-
assert "baseline_range_mean" not in result
|
| 195 |
-
assert "baseline_range_min" not in result
|
| 196 |
-
assert "baseline_range_max" not in result
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
def test_build_chart_data_no_baseline_range_when_single_year():
|
| 200 |
-
from app.indicators.cropland import CroplandIndicator
|
| 201 |
-
from datetime import date
|
| 202 |
-
from app.models import TimeRange
|
| 203 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 204 |
-
result = CroplandIndicator._build_chart_data(
|
| 205 |
-
baseline=40.0, current=42.0, time_range=tr,
|
| 206 |
-
baseline_yearly_means=[40.0],
|
| 207 |
-
)
|
| 208 |
-
assert "baseline_range_mean" not in result
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
def test_build_monthly_chart_data():
|
| 212 |
-
from app.indicators.cropland import CroplandIndicator
|
| 213 |
-
from datetime import date
|
| 214 |
-
from app.models import TimeRange
|
| 215 |
-
|
| 216 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 217 |
-
current_monthly = {4: 35.0, 5: 40.0, 6: 42.0}
|
| 218 |
-
baseline_stats = {4: [33.0, 35.0, 37.0], 5: [38.0, 40.0, 42.0], 6: [40.0, 42.0, 44.0]}
|
| 219 |
-
result = CroplandIndicator._build_monthly_chart_data(
|
| 220 |
-
current_monthly=current_monthly,
|
| 221 |
-
baseline_per_year_monthly=baseline_stats,
|
| 222 |
-
time_range=tr,
|
| 223 |
-
season_months=[4, 5, 6],
|
| 224 |
-
)
|
| 225 |
-
assert len(result["dates"]) == 3
|
| 226 |
-
assert result["dates"][0] == "2025-04"
|
| 227 |
-
assert "baseline_mean" in result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,91 +0,0 @@
|
|
| 1 |
-
import pytest
|
| 2 |
-
import json
|
| 3 |
-
from datetime import date
|
| 4 |
-
from unittest.mock import AsyncMock, patch
|
| 5 |
-
|
| 6 |
-
from app.indicators.fires import FiresIndicator
|
| 7 |
-
from app.models import AOI, TimeRange, StatusLevel
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
SAMPLE_FIRMS_CSV = """latitude,longitude,brightness,scan,track,acq_date,acq_time,satellite,confidence,version,bright_t31,frp,daynight
|
| 11 |
-
15.70,32.50,320.5,0.4,0.4,2025-06-15,0130,N,nominal,2.0NRT,290.1,5.2,N
|
| 12 |
-
15.72,32.55,310.2,0.4,0.4,2025-08-20,1300,N,nominal,2.0NRT,288.3,3.1,D
|
| 13 |
-
15.68,32.48,335.0,0.5,0.5,2025-11-01,0200,N,nominal,2.0NRT,295.0,8.7,N
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
@pytest.fixture
|
| 18 |
-
def fires_indicator():
|
| 19 |
-
return FiresIndicator()
|
| 20 |
-
|
| 21 |
-
@pytest.fixture
|
| 22 |
-
def sample_aoi():
|
| 23 |
-
return AOI(name="Khartoum Test", bbox=[32.45, 15.65, 32.65, 15.80])
|
| 24 |
-
|
| 25 |
-
@pytest.fixture
|
| 26 |
-
def sample_time_range():
|
| 27 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
@pytest.mark.asyncio
|
| 31 |
-
async def test_fires_indicator_meta(fires_indicator):
|
| 32 |
-
meta = fires_indicator.meta()
|
| 33 |
-
assert meta.id == "fires"
|
| 34 |
-
assert meta.category == "R3"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
@pytest.mark.asyncio
|
| 38 |
-
async def test_fires_indicator_process(fires_indicator, sample_aoi, sample_time_range):
|
| 39 |
-
mock_response = AsyncMock()
|
| 40 |
-
mock_response.status_code = 200
|
| 41 |
-
mock_response.text = SAMPLE_FIRMS_CSV
|
| 42 |
-
|
| 43 |
-
with patch("app.indicators.fires.httpx.AsyncClient") as mock_client_cls:
|
| 44 |
-
mock_client = AsyncMock()
|
| 45 |
-
mock_client.get.return_value = mock_response
|
| 46 |
-
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
| 47 |
-
mock_client.__aexit__ = AsyncMock(return_value=False)
|
| 48 |
-
mock_client_cls.return_value = mock_client
|
| 49 |
-
|
| 50 |
-
result = await fires_indicator.process(sample_aoi, sample_time_range)
|
| 51 |
-
|
| 52 |
-
assert result.indicator_id == "fires"
|
| 53 |
-
assert result.status == StatusLevel.AMBER # 3 fires = 1-5 = amber
|
| 54 |
-
assert "3" in result.headline
|
| 55 |
-
assert result.confidence.value in ("high", "moderate", "low")
|
| 56 |
-
assert len(result.chart_data["dates"]) > 0
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
@pytest.mark.asyncio
|
| 60 |
-
async def test_fires_indicator_green_when_no_fires(fires_indicator, sample_aoi, sample_time_range):
|
| 61 |
-
mock_response = AsyncMock()
|
| 62 |
-
mock_response.status_code = 200
|
| 63 |
-
mock_response.text = "latitude,longitude,brightness,scan,track,acq_date,acq_time,satellite,confidence,version,bright_t31,frp,daynight\n"
|
| 64 |
-
|
| 65 |
-
with patch("app.indicators.fires.httpx.AsyncClient") as mock_client_cls:
|
| 66 |
-
mock_client = AsyncMock()
|
| 67 |
-
mock_client.get.return_value = mock_response
|
| 68 |
-
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
| 69 |
-
mock_client.__aexit__ = AsyncMock(return_value=False)
|
| 70 |
-
mock_client_cls.return_value = mock_client
|
| 71 |
-
|
| 72 |
-
result = await fires_indicator.process(sample_aoi, sample_time_range)
|
| 73 |
-
|
| 74 |
-
assert result.status == StatusLevel.GREEN
|
| 75 |
-
assert "0" in result.headline or "no" in result.headline.lower()
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
def test_build_chart_data_filters_by_season():
|
| 79 |
-
rows = [{"acq_date": f"2025-{m:02d}-15"} for m in range(1, 13) for _ in range(3)]
|
| 80 |
-
season_months = [6, 7, 8, 9]
|
| 81 |
-
result = FiresIndicator._build_chart_data(rows, season_months)
|
| 82 |
-
assert len(result["dates"]) == 4
|
| 83 |
-
assert result["dates"][0] == "2025-06"
|
| 84 |
-
assert result["dates"][-1] == "2025-09"
|
| 85 |
-
assert all(v == 3 for v in result["values"])
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
def test_build_chart_data_no_season_returns_all():
|
| 89 |
-
rows = [{"acq_date": f"2025-{m:02d}-15"} for m in range(1, 13)]
|
| 90 |
-
result = FiresIndicator._build_chart_data(rows, None)
|
| 91 |
-
assert len(result["dates"]) == 12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,117 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.lst — Sentinel-3 SLSTR LST via openEO."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, ConfidenceLevel
|
| 15 |
-
|
| 16 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
@pytest.fixture
|
| 20 |
-
def test_aoi():
|
| 21 |
-
return AOI(name="Test", bbox=BBOX)
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
@pytest.fixture
|
| 25 |
-
def test_time_range():
|
| 26 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def _mock_lst_tif(path: str, n_months: int = 12, mean_k: float = 310.0):
|
| 30 |
-
"""Create synthetic LST GeoTIFF in Kelvin."""
|
| 31 |
-
rng = np.random.default_rng(45)
|
| 32 |
-
data = np.zeros((n_months, 10, 10), dtype=np.float32)
|
| 33 |
-
for m in range(n_months):
|
| 34 |
-
seasonal = 5.0 * np.sin(np.pi * (m - 1) / 6)
|
| 35 |
-
data[m] = mean_k + seasonal + rng.normal(0, 2, (10, 10))
|
| 36 |
-
with rasterio.open(
|
| 37 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_months,
|
| 38 |
-
dtype="float32", crs="EPSG:4326",
|
| 39 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 40 |
-
) as dst:
|
| 41 |
-
for i in range(n_months):
|
| 42 |
-
dst.write(data[i], i + 1)
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
def _mock_rgb_tif(path: str):
|
| 46 |
-
rng = np.random.default_rng(43)
|
| 47 |
-
data = rng.integers(500, 1500, (3, 10, 10), dtype=np.uint16)
|
| 48 |
-
with rasterio.open(
|
| 49 |
-
path, "w", driver="GTiff", height=10, width=10, count=3,
|
| 50 |
-
dtype="uint16", crs="EPSG:4326",
|
| 51 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=0,
|
| 52 |
-
) as dst:
|
| 53 |
-
for i in range(3):
|
| 54 |
-
dst.write(data[i], i + 1)
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
@pytest.mark.asyncio
|
| 58 |
-
async def test_lst_process_returns_result(test_aoi, test_time_range):
|
| 59 |
-
from app.indicators.lst import LSTIndicator
|
| 60 |
-
|
| 61 |
-
indicator = LSTIndicator()
|
| 62 |
-
|
| 63 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 64 |
-
lst_path = os.path.join(tmpdir, "lst.tif")
|
| 65 |
-
baseline_path = os.path.join(tmpdir, "lst_baseline.tif")
|
| 66 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 67 |
-
_mock_lst_tif(lst_path, mean_k=310.0)
|
| 68 |
-
_mock_lst_tif(baseline_path, mean_k=308.0)
|
| 69 |
-
_mock_rgb_tif(rgb_path)
|
| 70 |
-
|
| 71 |
-
mock_cube = MagicMock()
|
| 72 |
-
|
| 73 |
-
def fake_download(path, **kwargs):
|
| 74 |
-
import shutil
|
| 75 |
-
if "lst" in path and "baseline" not in path:
|
| 76 |
-
shutil.copy(lst_path, path)
|
| 77 |
-
elif "lst" in path:
|
| 78 |
-
shutil.copy(baseline_path, path)
|
| 79 |
-
else:
|
| 80 |
-
shutil.copy(rgb_path, path)
|
| 81 |
-
|
| 82 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 83 |
-
|
| 84 |
-
with patch("app.indicators.lst.get_connection"), \
|
| 85 |
-
patch("app.indicators.lst.build_lst_graph", return_value=mock_cube), \
|
| 86 |
-
patch("app.indicators.lst.build_true_color_graph", return_value=mock_cube):
|
| 87 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 88 |
-
|
| 89 |
-
assert result.indicator_id == "lst"
|
| 90 |
-
assert result.data_source == "satellite"
|
| 91 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
@pytest.mark.asyncio
|
| 95 |
-
async def test_lst_falls_back_on_failure(test_aoi, test_time_range):
|
| 96 |
-
from app.indicators.lst import LSTIndicator
|
| 97 |
-
indicator = LSTIndicator()
|
| 98 |
-
|
| 99 |
-
with patch("app.indicators.lst.get_connection", side_effect=Exception("CDSE down")):
|
| 100 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 101 |
-
|
| 102 |
-
assert result.indicator_id == "lst"
|
| 103 |
-
assert result.data_source == "placeholder"
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
def test_lst_compute_stats():
|
| 107 |
-
from app.indicators.lst import LSTIndicator
|
| 108 |
-
|
| 109 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 110 |
-
path = os.path.join(tmpdir, "lst.tif")
|
| 111 |
-
_mock_lst_tif(path, mean_k=310.0)
|
| 112 |
-
stats = LSTIndicator._compute_stats(path)
|
| 113 |
-
|
| 114 |
-
assert "monthly_means_celsius" in stats
|
| 115 |
-
assert len(stats["monthly_means_celsius"]) == 12
|
| 116 |
-
assert "overall_mean_celsius" in stats
|
| 117 |
-
assert 30 < stats["overall_mean_celsius"] < 45 # ~310K = ~37C
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,285 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.ndvi — pixel-level NDVI via openEO."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import pytest
|
| 11 |
-
|
| 12 |
-
from app.models import AOI, TimeRange, StatusLevel, TrendDirection, ConfidenceLevel
|
| 13 |
-
from tests.conftest import mock_rgb_tif, make_mock_batch_job
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
@pytest.fixture
|
| 17 |
-
def test_aoi():
|
| 18 |
-
return AOI(name="Test Khartoum", bbox=[32.45, 15.65, 32.65, 15.8])
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
@pytest.fixture
|
| 22 |
-
def test_time_range():
|
| 23 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def _mock_ndvi_tif(path: str, n_months: int = 12):
|
| 27 |
-
"""Create a small synthetic NDVI GeoTIFF at the given path."""
|
| 28 |
-
import rasterio
|
| 29 |
-
from rasterio.transform import from_bounds
|
| 30 |
-
rng = np.random.default_rng(42)
|
| 31 |
-
data = np.zeros((n_months, 10, 10), dtype=np.float32)
|
| 32 |
-
for m in range(n_months):
|
| 33 |
-
data[m] = 0.3 + 0.2 * np.sin(np.pi * (m - 3) / 6) + rng.normal(0, 0.02, (10, 10))
|
| 34 |
-
with rasterio.open(
|
| 35 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_months,
|
| 36 |
-
dtype="float32", crs="EPSG:4326",
|
| 37 |
-
transform=from_bounds(32.45, 15.65, 32.65, 15.8, 10, 10),
|
| 38 |
-
nodata=-9999.0,
|
| 39 |
-
) as dst:
|
| 40 |
-
for i in range(n_months):
|
| 41 |
-
dst.write(data[i], i + 1)
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
@pytest.mark.asyncio
|
| 45 |
-
async def test_ndvi_process_returns_indicator_result(test_aoi, test_time_range):
|
| 46 |
-
"""NdviIndicator.process() returns a valid IndicatorResult."""
|
| 47 |
-
from app.indicators.ndvi import NdviIndicator
|
| 48 |
-
|
| 49 |
-
indicator = NdviIndicator()
|
| 50 |
-
|
| 51 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 52 |
-
ndvi_path = os.path.join(tmpdir, "ndvi.tif")
|
| 53 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 54 |
-
_mock_ndvi_tif(ndvi_path)
|
| 55 |
-
mock_rgb_tif(rgb_path)
|
| 56 |
-
|
| 57 |
-
mock_cube = MagicMock()
|
| 58 |
-
|
| 59 |
-
def fake_download(path, **kwargs):
|
| 60 |
-
import shutil
|
| 61 |
-
if "ndvi" in path:
|
| 62 |
-
shutil.copy(ndvi_path, path)
|
| 63 |
-
else:
|
| 64 |
-
shutil.copy(rgb_path, path)
|
| 65 |
-
|
| 66 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 67 |
-
|
| 68 |
-
with patch("app.indicators.ndvi.get_connection") as mock_get_conn, \
|
| 69 |
-
patch("app.indicators.ndvi.build_ndvi_graph", return_value=mock_cube), \
|
| 70 |
-
patch("app.indicators.ndvi.build_true_color_graph", return_value=mock_cube):
|
| 71 |
-
|
| 72 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 73 |
-
|
| 74 |
-
assert result.indicator_id == "ndvi"
|
| 75 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 76 |
-
assert result.trend in (TrendDirection.IMPROVING, TrendDirection.STABLE, TrendDirection.DETERIORATING)
|
| 77 |
-
assert result.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.MODERATE, ConfidenceLevel.LOW)
|
| 78 |
-
assert "NDVI" in result.methodology or "ndvi" in result.methodology.lower()
|
| 79 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 80 |
-
assert len(result.chart_data.get("values", [])) > 0
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
@pytest.mark.asyncio
|
| 84 |
-
async def test_ndvi_falls_back_to_placeholder_on_failure(test_aoi, test_time_range):
|
| 85 |
-
"""NdviIndicator falls back gracefully when openEO fails."""
|
| 86 |
-
from app.indicators.ndvi import NdviIndicator
|
| 87 |
-
|
| 88 |
-
indicator = NdviIndicator()
|
| 89 |
-
|
| 90 |
-
with patch("app.indicators.ndvi.get_connection", side_effect=Exception("CDSE down")):
|
| 91 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 92 |
-
|
| 93 |
-
assert result.indicator_id == "ndvi"
|
| 94 |
-
assert result.data_source == "placeholder"
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
def test_ndvi_compute_stats():
|
| 98 |
-
"""_compute_stats() extracts correct statistics from a multi-band raster."""
|
| 99 |
-
from app.indicators.ndvi import NdviIndicator
|
| 100 |
-
|
| 101 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 102 |
-
path = os.path.join(tmpdir, "ndvi.tif")
|
| 103 |
-
_mock_ndvi_tif(path, n_months=12)
|
| 104 |
-
|
| 105 |
-
stats = NdviIndicator._compute_stats(path)
|
| 106 |
-
|
| 107 |
-
assert "monthly_means" in stats
|
| 108 |
-
assert len(stats["monthly_means"]) == 12
|
| 109 |
-
assert "overall_mean" in stats
|
| 110 |
-
assert 0 < stats["overall_mean"] < 1
|
| 111 |
-
assert "valid_months" in stats
|
| 112 |
-
assert stats["valid_months"] == 12
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
@pytest.mark.asyncio
|
| 116 |
-
async def test_ndvi_submit_batch_creates_three_jobs(test_aoi, test_time_range):
|
| 117 |
-
"""submit_batch() creates current, baseline, and true-color batch jobs."""
|
| 118 |
-
from app.indicators.ndvi import NdviIndicator
|
| 119 |
-
|
| 120 |
-
indicator = NdviIndicator()
|
| 121 |
-
|
| 122 |
-
mock_conn = MagicMock()
|
| 123 |
-
mock_job = MagicMock()
|
| 124 |
-
mock_job.job_id = "j-test"
|
| 125 |
-
mock_conn.create_job.return_value = mock_job
|
| 126 |
-
|
| 127 |
-
with patch("app.indicators.ndvi.get_connection", return_value=mock_conn), \
|
| 128 |
-
patch("app.indicators.ndvi.build_ndvi_graph") as mock_ndvi_graph, \
|
| 129 |
-
patch("app.indicators.ndvi.build_true_color_graph") as mock_tc_graph:
|
| 130 |
-
|
| 131 |
-
mock_ndvi_graph.return_value = MagicMock()
|
| 132 |
-
mock_tc_graph.return_value = MagicMock()
|
| 133 |
-
|
| 134 |
-
jobs = await indicator.submit_batch(test_aoi, test_time_range)
|
| 135 |
-
|
| 136 |
-
assert len(jobs) == 3
|
| 137 |
-
assert mock_ndvi_graph.call_count == 2 # current + baseline
|
| 138 |
-
assert mock_tc_graph.call_count == 1 # true-color
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
@pytest.mark.asyncio
|
| 142 |
-
async def test_ndvi_harvest_computes_result_from_batch_jobs(test_aoi, test_time_range):
|
| 143 |
-
"""harvest() downloads batch results and returns IndicatorResult."""
|
| 144 |
-
from app.indicators.ndvi import NdviIndicator
|
| 145 |
-
|
| 146 |
-
indicator = NdviIndicator()
|
| 147 |
-
|
| 148 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 149 |
-
ndvi_path = os.path.join(tmpdir, "ndvi.tif")
|
| 150 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 151 |
-
_mock_ndvi_tif(ndvi_path)
|
| 152 |
-
mock_rgb_tif(rgb_path)
|
| 153 |
-
|
| 154 |
-
current_job = make_mock_batch_job(ndvi_path)
|
| 155 |
-
baseline_job = make_mock_batch_job(ndvi_path)
|
| 156 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 157 |
-
|
| 158 |
-
result = await indicator.harvest(
|
| 159 |
-
test_aoi, test_time_range,
|
| 160 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 161 |
-
)
|
| 162 |
-
|
| 163 |
-
assert result.indicator_id == "ndvi"
|
| 164 |
-
assert result.data_source == "satellite"
|
| 165 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 166 |
-
assert result.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.MODERATE, ConfidenceLevel.LOW)
|
| 167 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
@pytest.mark.asyncio
|
| 171 |
-
async def test_ndvi_harvest_degrades_when_baseline_fails(test_aoi, test_time_range):
|
| 172 |
-
"""harvest() returns partial result when baseline job failed."""
|
| 173 |
-
from app.indicators.ndvi import NdviIndicator
|
| 174 |
-
|
| 175 |
-
indicator = NdviIndicator()
|
| 176 |
-
|
| 177 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 178 |
-
ndvi_path = os.path.join(tmpdir, "ndvi.tif")
|
| 179 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 180 |
-
_mock_ndvi_tif(ndvi_path)
|
| 181 |
-
mock_rgb_tif(rgb_path)
|
| 182 |
-
|
| 183 |
-
def make_mock_job(src_path, status="finished"):
|
| 184 |
-
job = MagicMock()
|
| 185 |
-
job.job_id = "j-test"
|
| 186 |
-
job.status.return_value = status
|
| 187 |
-
|
| 188 |
-
def fake_download_results(target):
|
| 189 |
-
if status == "error":
|
| 190 |
-
raise Exception("Batch job failed on CDSE")
|
| 191 |
-
os.makedirs(target, exist_ok=True)
|
| 192 |
-
dest = os.path.join(target, "result.tif")
|
| 193 |
-
import shutil
|
| 194 |
-
shutil.copy(src_path, dest)
|
| 195 |
-
from pathlib import Path
|
| 196 |
-
return {Path(dest): {"type": "image/tiff"}}
|
| 197 |
-
job.download_results.side_effect = fake_download_results
|
| 198 |
-
return job
|
| 199 |
-
|
| 200 |
-
current_job = make_mock_job(ndvi_path)
|
| 201 |
-
baseline_job = make_mock_job(ndvi_path, status="error")
|
| 202 |
-
true_color_job = make_mock_job(rgb_path)
|
| 203 |
-
|
| 204 |
-
result = await indicator.harvest(
|
| 205 |
-
test_aoi, test_time_range,
|
| 206 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 207 |
-
)
|
| 208 |
-
|
| 209 |
-
assert result.indicator_id == "ndvi"
|
| 210 |
-
assert result.data_source == "satellite"
|
| 211 |
-
assert result.confidence == ConfidenceLevel.LOW
|
| 212 |
-
assert result.trend == TrendDirection.STABLE
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
@pytest.mark.asyncio
|
| 216 |
-
async def test_ndvi_harvest_falls_back_when_current_fails(test_aoi, test_time_range):
|
| 217 |
-
"""harvest() returns placeholder when current NDVI job failed."""
|
| 218 |
-
from app.indicators.ndvi import NdviIndicator
|
| 219 |
-
|
| 220 |
-
indicator = NdviIndicator()
|
| 221 |
-
|
| 222 |
-
current_job = MagicMock()
|
| 223 |
-
current_job.status.return_value = "error"
|
| 224 |
-
current_job.download_results.side_effect = Exception("failed")
|
| 225 |
-
baseline_job = MagicMock()
|
| 226 |
-
baseline_job.status.return_value = "finished"
|
| 227 |
-
true_color_job = MagicMock()
|
| 228 |
-
true_color_job.status.return_value = "finished"
|
| 229 |
-
|
| 230 |
-
result = await indicator.harvest(
|
| 231 |
-
test_aoi, test_time_range,
|
| 232 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 233 |
-
)
|
| 234 |
-
|
| 235 |
-
assert result.data_source == "placeholder"
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
@pytest.mark.asyncio
|
| 239 |
-
async def test_ndvi_harvest_uses_baseline_when_available(test_aoi, test_time_range):
|
| 240 |
-
"""harvest() computes change vs baseline when baseline download succeeds."""
|
| 241 |
-
from app.indicators.ndvi import NdviIndicator
|
| 242 |
-
|
| 243 |
-
indicator = NdviIndicator()
|
| 244 |
-
|
| 245 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 246 |
-
# Current: higher NDVI
|
| 247 |
-
current_path = os.path.join(tmpdir, "current.tif")
|
| 248 |
-
_mock_ndvi_tif(current_path, n_months=12)
|
| 249 |
-
|
| 250 |
-
# Baseline: lower NDVI (shift down by 0.1)
|
| 251 |
-
baseline_path = os.path.join(tmpdir, "baseline.tif")
|
| 252 |
-
import rasterio
|
| 253 |
-
from rasterio.transform import from_bounds
|
| 254 |
-
rng = np.random.default_rng(99)
|
| 255 |
-
data = np.zeros((12, 10, 10), dtype=np.float32)
|
| 256 |
-
for m in range(12):
|
| 257 |
-
data[m] = 0.2 + 0.1 * np.sin(np.pi * (m - 3) / 6) + rng.normal(0, 0.02, (10, 10))
|
| 258 |
-
with rasterio.open(
|
| 259 |
-
baseline_path, "w", driver="GTiff", height=10, width=10, count=12,
|
| 260 |
-
dtype="float32", crs="EPSG:4326",
|
| 261 |
-
transform=from_bounds(32.45, 15.65, 32.65, 15.8, 10, 10),
|
| 262 |
-
nodata=-9999.0,
|
| 263 |
-
) as dst:
|
| 264 |
-
for i in range(12):
|
| 265 |
-
dst.write(data[i], i + 1)
|
| 266 |
-
|
| 267 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 268 |
-
mock_rgb_tif(rgb_path)
|
| 269 |
-
|
| 270 |
-
current_job = make_mock_batch_job(current_path)
|
| 271 |
-
baseline_job = make_mock_batch_job(baseline_path)
|
| 272 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 273 |
-
|
| 274 |
-
result = await indicator.harvest(
|
| 275 |
-
test_aoi, test_time_range,
|
| 276 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 277 |
-
)
|
| 278 |
-
|
| 279 |
-
assert result.data_source == "satellite"
|
| 280 |
-
# With distinct current vs baseline, confidence should NOT be LOW
|
| 281 |
-
# (it should be HIGH with 12 valid months)
|
| 282 |
-
assert result.confidence == ConfidenceLevel.HIGH
|
| 283 |
-
# Baseline band data should be present in chart
|
| 284 |
-
assert "baseline_mean" in result.chart_data
|
| 285 |
-
assert len(result.chart_data["baseline_mean"]) > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,91 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.nightlights — VIIRS DNB via EOG direct download."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch, AsyncMock
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, ConfidenceLevel
|
| 15 |
-
|
| 16 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
@pytest.fixture
|
| 20 |
-
def test_aoi():
|
| 21 |
-
return AOI(name="Test", bbox=BBOX)
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
@pytest.fixture
|
| 25 |
-
def test_time_range():
|
| 26 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def _mock_radiance_tif(path: str, mean_nw: float = 5.0):
|
| 30 |
-
"""Create synthetic VIIRS DNB radiance GeoTIFF (nW/cm2/sr)."""
|
| 31 |
-
rng = np.random.default_rng(47)
|
| 32 |
-
data = np.maximum(0, mean_nw + rng.normal(0, 2, (10, 10))).astype(np.float32)
|
| 33 |
-
with rasterio.open(
|
| 34 |
-
path, "w", driver="GTiff", height=10, width=10, count=1,
|
| 35 |
-
dtype="float32", crs="EPSG:4326",
|
| 36 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 37 |
-
) as dst:
|
| 38 |
-
dst.write(data, 1)
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
@pytest.mark.asyncio
|
| 42 |
-
async def test_nightlights_process_returns_result(test_aoi, test_time_range):
|
| 43 |
-
from app.indicators.nightlights import NightlightsIndicator
|
| 44 |
-
|
| 45 |
-
indicator = NightlightsIndicator()
|
| 46 |
-
|
| 47 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 48 |
-
current_path = os.path.join(tmpdir, "viirs_current.tif")
|
| 49 |
-
baseline_path = os.path.join(tmpdir, "viirs_baseline.tif")
|
| 50 |
-
_mock_radiance_tif(current_path, mean_nw=5.0)
|
| 51 |
-
_mock_radiance_tif(baseline_path, mean_nw=6.0)
|
| 52 |
-
|
| 53 |
-
with patch.object(indicator, '_download_viirs', new_callable=AsyncMock) as mock_dl:
|
| 54 |
-
async def fake_dl(bbox, year, output_path):
|
| 55 |
-
import shutil
|
| 56 |
-
if "current" in output_path:
|
| 57 |
-
shutil.copy(current_path, output_path)
|
| 58 |
-
else:
|
| 59 |
-
shutil.copy(baseline_path, output_path)
|
| 60 |
-
|
| 61 |
-
mock_dl.side_effect = fake_dl
|
| 62 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 63 |
-
|
| 64 |
-
assert result.indicator_id == "nightlights"
|
| 65 |
-
assert result.data_source == "satellite"
|
| 66 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
@pytest.mark.asyncio
|
| 70 |
-
async def test_nightlights_falls_back_on_failure(test_aoi, test_time_range):
|
| 71 |
-
from app.indicators.nightlights import NightlightsIndicator
|
| 72 |
-
indicator = NightlightsIndicator()
|
| 73 |
-
|
| 74 |
-
with patch.object(indicator, '_download_viirs', new_callable=AsyncMock, side_effect=Exception("Download failed")):
|
| 75 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 76 |
-
|
| 77 |
-
assert result.indicator_id == "nightlights"
|
| 78 |
-
assert result.data_source == "placeholder"
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
def test_nightlights_compute_stats():
|
| 82 |
-
from app.indicators.nightlights import NightlightsIndicator
|
| 83 |
-
|
| 84 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 85 |
-
path = os.path.join(tmpdir, "viirs.tif")
|
| 86 |
-
_mock_radiance_tif(path, mean_nw=5.0)
|
| 87 |
-
stats = NightlightsIndicator._compute_stats(path)
|
| 88 |
-
|
| 89 |
-
assert "mean_radiance" in stats
|
| 90 |
-
assert stats["mean_radiance"] > 0
|
| 91 |
-
assert "valid_pixel_fraction" in stats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,55 +0,0 @@
|
|
| 1 |
-
def test_build_chart_data_includes_baseline_range():
|
| 2 |
-
from app.indicators.no2 import NO2Indicator
|
| 3 |
-
from datetime import date
|
| 4 |
-
from app.models import TimeRange
|
| 5 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 6 |
-
result = NO2Indicator._build_chart_data(
|
| 7 |
-
current=16.5, baseline_mean=15.0, baseline_std=4.0, time_range=tr,
|
| 8 |
-
baseline_yearly_means=[12.0, 15.0, 18.0],
|
| 9 |
-
)
|
| 10 |
-
assert "baseline_range_mean" in result
|
| 11 |
-
assert result["baseline_range_min"] == 12.0
|
| 12 |
-
assert result["baseline_range_max"] == 18.0
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
def test_build_chart_data_no_range_when_single_year():
|
| 16 |
-
from app.indicators.no2 import NO2Indicator
|
| 17 |
-
from datetime import date
|
| 18 |
-
from app.models import TimeRange
|
| 19 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 20 |
-
result = NO2Indicator._build_chart_data(
|
| 21 |
-
current=16.5, baseline_mean=15.0, baseline_std=4.0, time_range=tr,
|
| 22 |
-
baseline_yearly_means=[15.0],
|
| 23 |
-
)
|
| 24 |
-
assert "baseline_range_mean" not in result
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
def test_build_chart_data_no_range_when_none():
|
| 28 |
-
from app.indicators.no2 import NO2Indicator
|
| 29 |
-
from datetime import date
|
| 30 |
-
from app.models import TimeRange
|
| 31 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 32 |
-
result = NO2Indicator._build_chart_data(
|
| 33 |
-
current=16.5, baseline_mean=15.0, baseline_std=4.0, time_range=tr,
|
| 34 |
-
)
|
| 35 |
-
assert "baseline_range_mean" not in result
|
| 36 |
-
assert result["baseline_std"] == 4.0
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def test_build_monthly_chart_data():
|
| 40 |
-
from app.indicators.no2 import NO2Indicator
|
| 41 |
-
from datetime import date
|
| 42 |
-
from app.models import TimeRange
|
| 43 |
-
|
| 44 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 45 |
-
current_monthly = {1: 12.0, 2: 13.0, 3: 14.0}
|
| 46 |
-
baseline_per_year_monthly = {1: [11.0, 12.0, 13.0], 2: [12.0, 13.0, 14.0], 3: [13.0, 14.0, 15.0]}
|
| 47 |
-
result = NO2Indicator._build_monthly_chart_data(
|
| 48 |
-
current_monthly=current_monthly,
|
| 49 |
-
baseline_per_year_monthly=baseline_per_year_monthly,
|
| 50 |
-
time_range=tr,
|
| 51 |
-
season_months=[1, 2, 3],
|
| 52 |
-
)
|
| 53 |
-
assert len(result["dates"]) == 3
|
| 54 |
-
assert "baseline_mean" in result
|
| 55 |
-
assert result["label"] == "NO2 concentration (µg/m³)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,97 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.rainfall — CHIRPS precipitation via direct download."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch, AsyncMock
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, ConfidenceLevel
|
| 15 |
-
|
| 16 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
@pytest.fixture
|
| 20 |
-
def test_aoi():
|
| 21 |
-
return AOI(name="Test", bbox=BBOX)
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
@pytest.fixture
|
| 25 |
-
def test_time_range():
|
| 26 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def _mock_precip_tif(path: str, n_months: int = 12, mean_mm: float = 50.0):
|
| 30 |
-
"""Create synthetic monthly precipitation GeoTIFF in mm."""
|
| 31 |
-
rng = np.random.default_rng(46)
|
| 32 |
-
data = np.zeros((n_months, 10, 10), dtype=np.float32)
|
| 33 |
-
for m in range(n_months):
|
| 34 |
-
seasonal = mean_mm * (0.5 + 0.8 * np.sin(np.pi * (m - 2) / 6))
|
| 35 |
-
data[m] = np.maximum(0, seasonal + rng.normal(0, 10, (10, 10)))
|
| 36 |
-
with rasterio.open(
|
| 37 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_months,
|
| 38 |
-
dtype="float32", crs="EPSG:4326",
|
| 39 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 40 |
-
) as dst:
|
| 41 |
-
for i in range(n_months):
|
| 42 |
-
dst.write(data[i], i + 1)
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
@pytest.mark.asyncio
|
| 46 |
-
async def test_rainfall_process_returns_result(test_aoi, test_time_range):
|
| 47 |
-
from app.indicators.rainfall import RainfallIndicator
|
| 48 |
-
|
| 49 |
-
indicator = RainfallIndicator()
|
| 50 |
-
|
| 51 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 52 |
-
current_path = os.path.join(tmpdir, "precip_current.tif")
|
| 53 |
-
baseline_path = os.path.join(tmpdir, "precip_baseline.tif")
|
| 54 |
-
_mock_precip_tif(current_path, mean_mm=45.0)
|
| 55 |
-
_mock_precip_tif(baseline_path, mean_mm=50.0)
|
| 56 |
-
|
| 57 |
-
with patch.object(indicator, '_download_chirps', new_callable=AsyncMock) as mock_dl:
|
| 58 |
-
async def fake_dl(bbox, start, end, output_path):
|
| 59 |
-
import shutil
|
| 60 |
-
if "current" in output_path:
|
| 61 |
-
shutil.copy(current_path, output_path)
|
| 62 |
-
else:
|
| 63 |
-
shutil.copy(baseline_path, output_path)
|
| 64 |
-
|
| 65 |
-
mock_dl.side_effect = fake_dl
|
| 66 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 67 |
-
|
| 68 |
-
assert result.indicator_id == "rainfall"
|
| 69 |
-
assert result.data_source == "satellite"
|
| 70 |
-
assert "CHIRPS" in result.methodology
|
| 71 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
@pytest.mark.asyncio
|
| 75 |
-
async def test_rainfall_falls_back_on_failure(test_aoi, test_time_range):
|
| 76 |
-
from app.indicators.rainfall import RainfallIndicator
|
| 77 |
-
indicator = RainfallIndicator()
|
| 78 |
-
|
| 79 |
-
with patch.object(indicator, '_download_chirps', new_callable=AsyncMock, side_effect=Exception("Download failed")):
|
| 80 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 81 |
-
|
| 82 |
-
assert result.indicator_id == "rainfall"
|
| 83 |
-
assert result.data_source == "placeholder"
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def test_rainfall_compute_stats():
|
| 87 |
-
from app.indicators.rainfall import RainfallIndicator
|
| 88 |
-
|
| 89 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 90 |
-
path = os.path.join(tmpdir, "precip.tif")
|
| 91 |
-
_mock_precip_tif(path, mean_mm=50.0)
|
| 92 |
-
stats = RainfallIndicator._compute_stats(path)
|
| 93 |
-
|
| 94 |
-
assert "monthly_means_mm" in stats
|
| 95 |
-
assert len(stats["monthly_means_mm"]) == 12
|
| 96 |
-
assert "total_mm" in stats
|
| 97 |
-
assert stats["total_mm"] > 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,235 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.sar — SAR backscatter via openEO."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, TrendDirection, ConfidenceLevel
|
| 15 |
-
from tests.conftest import mock_rgb_tif, make_mock_batch_job
|
| 16 |
-
|
| 17 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
@pytest.fixture
|
| 21 |
-
def test_aoi():
|
| 22 |
-
return AOI(name="Test", bbox=BBOX)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
@pytest.fixture
|
| 26 |
-
def test_time_range():
|
| 27 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def _mock_sar_tif(path: str, n_months: int = 12):
|
| 31 |
-
"""Create synthetic SAR backscatter GeoTIFF in dB scale.
|
| 32 |
-
|
| 33 |
-
Interleaved bands: VV_m1, VH_m1, VV_m2, VH_m2, ...
|
| 34 |
-
Total bands = n_months * 2.
|
| 35 |
-
"""
|
| 36 |
-
rng = np.random.default_rng(50)
|
| 37 |
-
n_bands = n_months * 2
|
| 38 |
-
data = np.zeros((n_bands, 10, 10), dtype=np.float32)
|
| 39 |
-
for m in range(n_months):
|
| 40 |
-
# VV: typical range -15 to -5 dB
|
| 41 |
-
data[m * 2] = rng.uniform(-12, -6, (10, 10))
|
| 42 |
-
# VH: typically 5-8 dB lower than VV
|
| 43 |
-
data[m * 2 + 1] = data[m * 2] - rng.uniform(5, 8, (10, 10))
|
| 44 |
-
with rasterio.open(
|
| 45 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_bands,
|
| 46 |
-
dtype="float32", crs="EPSG:4326",
|
| 47 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 48 |
-
) as dst:
|
| 49 |
-
for i in range(n_bands):
|
| 50 |
-
dst.write(data[i], i + 1)
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
@pytest.mark.asyncio
|
| 54 |
-
async def test_sar_process_returns_result(test_aoi, test_time_range):
|
| 55 |
-
"""SarIndicator.process() returns a valid IndicatorResult."""
|
| 56 |
-
from app.indicators.sar import SarIndicator
|
| 57 |
-
|
| 58 |
-
indicator = SarIndicator()
|
| 59 |
-
|
| 60 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 61 |
-
sar_path = os.path.join(tmpdir, "sar.tif")
|
| 62 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 63 |
-
_mock_sar_tif(sar_path)
|
| 64 |
-
mock_rgb_tif(rgb_path)
|
| 65 |
-
|
| 66 |
-
mock_cube = MagicMock()
|
| 67 |
-
|
| 68 |
-
def fake_download(path, **kwargs):
|
| 69 |
-
import shutil
|
| 70 |
-
if "sar" in path:
|
| 71 |
-
shutil.copy(sar_path, path)
|
| 72 |
-
else:
|
| 73 |
-
shutil.copy(rgb_path, path)
|
| 74 |
-
|
| 75 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 76 |
-
|
| 77 |
-
with patch("app.indicators.sar.get_connection"), \
|
| 78 |
-
patch("app.indicators.sar.build_sar_graph", return_value=mock_cube), \
|
| 79 |
-
patch("app.indicators.sar.build_true_color_graph", return_value=mock_cube):
|
| 80 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 81 |
-
|
| 82 |
-
assert result.indicator_id == "sar"
|
| 83 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 84 |
-
assert result.trend in (TrendDirection.IMPROVING, TrendDirection.STABLE, TrendDirection.DETERIORATING)
|
| 85 |
-
assert result.data_source == "satellite"
|
| 86 |
-
assert "SAR" in result.methodology or "backscatter" in result.methodology.lower()
|
| 87 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
@pytest.mark.asyncio
|
| 91 |
-
async def test_sar_falls_back_on_failure(test_aoi, test_time_range):
|
| 92 |
-
"""SarIndicator falls back gracefully when openEO fails."""
|
| 93 |
-
from app.indicators.sar import SarIndicator
|
| 94 |
-
|
| 95 |
-
indicator = SarIndicator()
|
| 96 |
-
|
| 97 |
-
with patch("app.indicators.sar.get_connection", side_effect=Exception("CDSE down")):
|
| 98 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 99 |
-
|
| 100 |
-
assert result.indicator_id == "sar"
|
| 101 |
-
assert result.confidence == ConfidenceLevel.LOW
|
| 102 |
-
assert "Insufficient" in result.headline or "placeholder" in result.data_source
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
def test_sar_compute_stats():
|
| 106 |
-
"""_compute_stats() extracts VV monthly means from interleaved SAR raster."""
|
| 107 |
-
from app.indicators.sar import SarIndicator
|
| 108 |
-
|
| 109 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 110 |
-
path = os.path.join(tmpdir, "sar.tif")
|
| 111 |
-
_mock_sar_tif(path, n_months=12)
|
| 112 |
-
stats = SarIndicator._compute_stats(path)
|
| 113 |
-
|
| 114 |
-
assert "monthly_vv_means" in stats
|
| 115 |
-
assert len(stats["monthly_vv_means"]) == 12
|
| 116 |
-
assert "overall_vv_mean" in stats
|
| 117 |
-
# VV should be negative dB values
|
| 118 |
-
assert stats["overall_vv_mean"] < 0
|
| 119 |
-
assert "valid_months" in stats
|
| 120 |
-
assert stats["valid_months"] == 12
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
def test_sar_classify_change():
|
| 124 |
-
"""_classify() maps change area percentage to correct status."""
|
| 125 |
-
from app.indicators.sar import SarIndicator
|
| 126 |
-
|
| 127 |
-
assert SarIndicator._classify(change_pct=3.0, flood_months=0) == StatusLevel.GREEN
|
| 128 |
-
assert SarIndicator._classify(change_pct=10.0, flood_months=0) == StatusLevel.AMBER
|
| 129 |
-
assert SarIndicator._classify(change_pct=10.0, flood_months=2) == StatusLevel.AMBER
|
| 130 |
-
assert SarIndicator._classify(change_pct=20.0, flood_months=0) == StatusLevel.RED
|
| 131 |
-
assert SarIndicator._classify(change_pct=3.0, flood_months=3) == StatusLevel.RED
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
@pytest.mark.asyncio
|
| 135 |
-
async def test_sar_submit_batch_creates_three_jobs(test_aoi, test_time_range):
|
| 136 |
-
"""submit_batch() creates current, baseline, and true-color batch jobs."""
|
| 137 |
-
from app.indicators.sar import SarIndicator
|
| 138 |
-
|
| 139 |
-
indicator = SarIndicator()
|
| 140 |
-
|
| 141 |
-
mock_conn = MagicMock()
|
| 142 |
-
mock_job = MagicMock()
|
| 143 |
-
mock_job.job_id = "j-test"
|
| 144 |
-
mock_conn.create_job.return_value = mock_job
|
| 145 |
-
|
| 146 |
-
with patch("app.indicators.sar.get_connection", return_value=mock_conn), \
|
| 147 |
-
patch("app.indicators.sar.build_sar_graph") as mock_sar_graph, \
|
| 148 |
-
patch("app.indicators.sar.build_true_color_graph") as mock_tc_graph:
|
| 149 |
-
|
| 150 |
-
mock_sar_graph.return_value = MagicMock()
|
| 151 |
-
mock_tc_graph.return_value = MagicMock()
|
| 152 |
-
|
| 153 |
-
jobs = await indicator.submit_batch(test_aoi, test_time_range)
|
| 154 |
-
|
| 155 |
-
assert len(jobs) == 3
|
| 156 |
-
assert mock_sar_graph.call_count == 2 # current + baseline
|
| 157 |
-
assert mock_tc_graph.call_count == 1 # true-color
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
@pytest.mark.asyncio
|
| 161 |
-
async def test_sar_harvest_computes_result_from_batch_jobs(test_aoi, test_time_range):
|
| 162 |
-
"""harvest() downloads batch results and returns IndicatorResult."""
|
| 163 |
-
from app.indicators.sar import SarIndicator
|
| 164 |
-
|
| 165 |
-
indicator = SarIndicator()
|
| 166 |
-
|
| 167 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 168 |
-
sar_path = os.path.join(tmpdir, "sar.tif")
|
| 169 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 170 |
-
_mock_sar_tif(sar_path)
|
| 171 |
-
mock_rgb_tif(rgb_path)
|
| 172 |
-
|
| 173 |
-
current_job = make_mock_batch_job(sar_path)
|
| 174 |
-
baseline_job = make_mock_batch_job(sar_path)
|
| 175 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 176 |
-
|
| 177 |
-
result = await indicator.harvest(
|
| 178 |
-
test_aoi, test_time_range,
|
| 179 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 180 |
-
)
|
| 181 |
-
|
| 182 |
-
assert result.indicator_id == "sar"
|
| 183 |
-
assert result.data_source == "satellite"
|
| 184 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 185 |
-
assert result.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.MODERATE, ConfidenceLevel.LOW)
|
| 186 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 187 |
-
assert "baseline_mean" in result.chart_data
|
| 188 |
-
assert len(result.chart_data["baseline_mean"]) > 0
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
@pytest.mark.asyncio
|
| 192 |
-
async def test_sar_harvest_falls_back_when_current_fails(test_aoi, test_time_range):
|
| 193 |
-
"""harvest() returns placeholder when current SAR job failed."""
|
| 194 |
-
from app.indicators.sar import SarIndicator
|
| 195 |
-
|
| 196 |
-
indicator = SarIndicator()
|
| 197 |
-
|
| 198 |
-
current_job = MagicMock()
|
| 199 |
-
current_job.download_results.side_effect = Exception("failed")
|
| 200 |
-
baseline_job = MagicMock()
|
| 201 |
-
true_color_job = MagicMock()
|
| 202 |
-
|
| 203 |
-
result = await indicator.harvest(
|
| 204 |
-
test_aoi, test_time_range,
|
| 205 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 206 |
-
)
|
| 207 |
-
|
| 208 |
-
assert result.data_source == "placeholder"
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
@pytest.mark.asyncio
|
| 212 |
-
async def test_sar_harvest_degrades_when_baseline_fails(test_aoi, test_time_range):
|
| 213 |
-
"""harvest() returns degraded result when baseline SAR job failed."""
|
| 214 |
-
from app.indicators.sar import SarIndicator
|
| 215 |
-
|
| 216 |
-
indicator = SarIndicator()
|
| 217 |
-
|
| 218 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 219 |
-
sar_path = os.path.join(tmpdir, "sar.tif")
|
| 220 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 221 |
-
_mock_sar_tif(sar_path)
|
| 222 |
-
mock_rgb_tif(rgb_path)
|
| 223 |
-
|
| 224 |
-
current_job = make_mock_batch_job(sar_path)
|
| 225 |
-
baseline_job = make_mock_batch_job(sar_path, fail=True)
|
| 226 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 227 |
-
|
| 228 |
-
result = await indicator.harvest(
|
| 229 |
-
test_aoi, test_time_range,
|
| 230 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 231 |
-
)
|
| 232 |
-
|
| 233 |
-
assert result.indicator_id == "sar"
|
| 234 |
-
assert result.data_source == "satellite"
|
| 235 |
-
assert result.confidence == ConfidenceLevel.LOW
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,90 +0,0 @@
|
|
| 1 |
-
"""Tests for the D2 Vegetation & Forest Cover indicator."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
from datetime import date
|
| 5 |
-
|
| 6 |
-
import pytest
|
| 7 |
-
|
| 8 |
-
from app.indicators.vegetation import VegetationIndicator
|
| 9 |
-
from app.models import TimeRange
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# ---------------------------------------------------------------------------
|
| 13 |
-
# Baseline range in chart data
|
| 14 |
-
# ---------------------------------------------------------------------------
|
| 15 |
-
|
| 16 |
-
def test_build_chart_data_includes_baseline_range():
|
| 17 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 18 |
-
result = VegetationIndicator._build_chart_data(
|
| 19 |
-
baseline=35.0, current=38.0, time_range=tr,
|
| 20 |
-
baseline_yearly_means=[32.0, 35.0, 38.0, 34.0, 36.0],
|
| 21 |
-
)
|
| 22 |
-
assert "baseline_range_mean" in result
|
| 23 |
-
assert "baseline_range_min" in result
|
| 24 |
-
assert "baseline_range_max" in result
|
| 25 |
-
assert result["baseline_range_min"] == 32.0
|
| 26 |
-
assert result["baseline_range_max"] == 38.0
|
| 27 |
-
assert result["baseline_range_min"] <= result["baseline_range_mean"] <= result["baseline_range_max"]
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def test_build_chart_data_no_baseline_range_when_absent():
|
| 31 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 32 |
-
result = VegetationIndicator._build_chart_data(
|
| 33 |
-
baseline=35.0, current=38.0, time_range=tr,
|
| 34 |
-
)
|
| 35 |
-
assert "baseline_range_mean" not in result
|
| 36 |
-
assert "baseline_range_min" not in result
|
| 37 |
-
assert "baseline_range_max" not in result
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
def test_build_chart_data_no_baseline_range_when_single_year():
|
| 41 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 42 |
-
result = VegetationIndicator._build_chart_data(
|
| 43 |
-
baseline=35.0, current=38.0, time_range=tr,
|
| 44 |
-
baseline_yearly_means=[35.0],
|
| 45 |
-
)
|
| 46 |
-
assert "baseline_range_mean" not in result
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
def test_build_chart_data_baseline_range_mean_is_rounded():
|
| 50 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 51 |
-
result = VegetationIndicator._build_chart_data(
|
| 52 |
-
baseline=35.0, current=38.0, time_range=tr,
|
| 53 |
-
baseline_yearly_means=[33.33, 36.67],
|
| 54 |
-
)
|
| 55 |
-
assert isinstance(result["baseline_range_mean"], float)
|
| 56 |
-
# Should be rounded to 1 decimal place
|
| 57 |
-
assert result["baseline_range_mean"] == round(result["baseline_range_mean"], 1)
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def test_build_chart_data_base_fields_always_present():
|
| 61 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 62 |
-
result = VegetationIndicator._build_chart_data(
|
| 63 |
-
baseline=35.0, current=38.0, time_range=tr,
|
| 64 |
-
)
|
| 65 |
-
assert "dates" in result
|
| 66 |
-
assert "values" in result
|
| 67 |
-
assert "label" in result
|
| 68 |
-
assert result["values"] == [35.0, 38.0]
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
def test_build_monthly_chart_data():
|
| 72 |
-
from app.indicators.vegetation import VegetationIndicator
|
| 73 |
-
from datetime import date
|
| 74 |
-
from app.models import TimeRange
|
| 75 |
-
|
| 76 |
-
tr = TimeRange(start=date(2025, 1, 1), end=date(2025, 12, 31))
|
| 77 |
-
current_monthly = {4: 38.0, 5: 40.0, 6: 42.0}
|
| 78 |
-
baseline_stats = {4: [33.0, 35.0, 37.0], 5: [38.0, 40.0, 42.0], 6: [40.0, 42.0, 44.0]}
|
| 79 |
-
result = VegetationIndicator._build_monthly_chart_data(
|
| 80 |
-
current_monthly=current_monthly,
|
| 81 |
-
baseline_per_year_monthly=baseline_stats,
|
| 82 |
-
time_range=tr,
|
| 83 |
-
season_months=[4, 5, 6],
|
| 84 |
-
)
|
| 85 |
-
assert len(result["dates"]) == 3
|
| 86 |
-
assert result["dates"][0] == "2025-04"
|
| 87 |
-
assert "baseline_mean" in result
|
| 88 |
-
assert len(result["baseline_mean"]) == 3
|
| 89 |
-
for i in range(3):
|
| 90 |
-
assert result["baseline_min"][i] <= result["baseline_mean"][i] <= result["baseline_max"][i]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,215 +0,0 @@
|
|
| 1 |
-
"""Tests for app.indicators.water — pixel-level MNDWI via openEO."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
import os
|
| 5 |
-
import tempfile
|
| 6 |
-
from unittest.mock import MagicMock, patch
|
| 7 |
-
from datetime import date
|
| 8 |
-
|
| 9 |
-
import numpy as np
|
| 10 |
-
import rasterio
|
| 11 |
-
from rasterio.transform import from_bounds
|
| 12 |
-
import pytest
|
| 13 |
-
|
| 14 |
-
from app.models import AOI, TimeRange, StatusLevel, TrendDirection, ConfidenceLevel
|
| 15 |
-
from tests.conftest import mock_rgb_tif, make_mock_batch_job
|
| 16 |
-
|
| 17 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
@pytest.fixture
|
| 21 |
-
def test_aoi():
|
| 22 |
-
return AOI(name="Test", bbox=BBOX)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
@pytest.fixture
|
| 26 |
-
def test_time_range():
|
| 27 |
-
return TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def _mock_mndwi_tif(path: str, n_months: int = 12, water_fraction: float = 0.15):
|
| 31 |
-
"""Create synthetic MNDWI GeoTIFF. Values > 0 are water."""
|
| 32 |
-
rng = np.random.default_rng(44)
|
| 33 |
-
data = np.zeros((n_months, 10, 10), dtype=np.float32)
|
| 34 |
-
for m in range(n_months):
|
| 35 |
-
vals = rng.normal(-0.2, 0.3, (10, 10))
|
| 36 |
-
water_mask = rng.random((10, 10)) < water_fraction
|
| 37 |
-
vals[water_mask] = rng.uniform(0.1, 0.6, water_mask.sum())
|
| 38 |
-
data[m] = vals
|
| 39 |
-
with rasterio.open(
|
| 40 |
-
path, "w", driver="GTiff", height=10, width=10, count=n_months,
|
| 41 |
-
dtype="float32", crs="EPSG:4326",
|
| 42 |
-
transform=from_bounds(*BBOX, 10, 10), nodata=-9999.0,
|
| 43 |
-
) as dst:
|
| 44 |
-
for i in range(n_months):
|
| 45 |
-
dst.write(data[i], i + 1)
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
@pytest.mark.asyncio
|
| 49 |
-
async def test_water_process_returns_result(test_aoi, test_time_range):
|
| 50 |
-
"""WaterIndicator.process() returns a valid IndicatorResult."""
|
| 51 |
-
from app.indicators.water import WaterIndicator
|
| 52 |
-
|
| 53 |
-
indicator = WaterIndicator()
|
| 54 |
-
|
| 55 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 56 |
-
mndwi_path = os.path.join(tmpdir, "mndwi.tif")
|
| 57 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 58 |
-
_mock_mndwi_tif(mndwi_path)
|
| 59 |
-
mock_rgb_tif(rgb_path)
|
| 60 |
-
|
| 61 |
-
mock_cube = MagicMock()
|
| 62 |
-
|
| 63 |
-
def fake_download(path, **kwargs):
|
| 64 |
-
import shutil
|
| 65 |
-
if "mndwi" in path or "water" in path:
|
| 66 |
-
shutil.copy(mndwi_path, path)
|
| 67 |
-
else:
|
| 68 |
-
shutil.copy(rgb_path, path)
|
| 69 |
-
|
| 70 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 71 |
-
|
| 72 |
-
with patch("app.indicators.water.get_connection"), \
|
| 73 |
-
patch("app.indicators.water.build_mndwi_graph", return_value=mock_cube), \
|
| 74 |
-
patch("app.indicators.water.build_true_color_graph", return_value=mock_cube):
|
| 75 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 76 |
-
|
| 77 |
-
assert result.indicator_id == "water"
|
| 78 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 79 |
-
assert "MNDWI" in result.methodology or "water" in result.methodology.lower()
|
| 80 |
-
assert result.data_source == "satellite"
|
| 81 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
@pytest.mark.asyncio
|
| 85 |
-
async def test_water_falls_back_on_failure(test_aoi, test_time_range):
|
| 86 |
-
"""WaterIndicator falls back gracefully when openEO fails."""
|
| 87 |
-
from app.indicators.water import WaterIndicator
|
| 88 |
-
|
| 89 |
-
indicator = WaterIndicator()
|
| 90 |
-
|
| 91 |
-
with patch("app.indicators.water.get_connection", side_effect=Exception("CDSE down")):
|
| 92 |
-
result = await indicator.process(test_aoi, test_time_range)
|
| 93 |
-
|
| 94 |
-
assert result.indicator_id == "water"
|
| 95 |
-
assert result.data_source == "placeholder"
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
def test_water_compute_stats():
|
| 99 |
-
"""_compute_stats() extracts water fraction from MNDWI raster."""
|
| 100 |
-
from app.indicators.water import WaterIndicator
|
| 101 |
-
|
| 102 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 103 |
-
path = os.path.join(tmpdir, "mndwi.tif")
|
| 104 |
-
_mock_mndwi_tif(path, n_months=12, water_fraction=0.2)
|
| 105 |
-
stats = WaterIndicator._compute_stats(path)
|
| 106 |
-
|
| 107 |
-
assert "monthly_water_fractions" in stats
|
| 108 |
-
assert len(stats["monthly_water_fractions"]) == 12
|
| 109 |
-
assert "overall_water_fraction" in stats
|
| 110 |
-
assert 0 < stats["overall_water_fraction"] < 1
|
| 111 |
-
assert "valid_months" in stats
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
@pytest.mark.asyncio
|
| 115 |
-
async def test_water_submit_batch_creates_three_jobs(test_aoi, test_time_range):
|
| 116 |
-
"""submit_batch() creates current, baseline, and true-color batch jobs."""
|
| 117 |
-
from app.indicators.water import WaterIndicator
|
| 118 |
-
|
| 119 |
-
indicator = WaterIndicator()
|
| 120 |
-
|
| 121 |
-
mock_conn = MagicMock()
|
| 122 |
-
mock_job = MagicMock()
|
| 123 |
-
mock_job.job_id = "j-test"
|
| 124 |
-
mock_conn.create_job.return_value = mock_job
|
| 125 |
-
|
| 126 |
-
with patch("app.indicators.water.get_connection", return_value=mock_conn), \
|
| 127 |
-
patch("app.indicators.water.build_mndwi_graph") as mock_water_graph, \
|
| 128 |
-
patch("app.indicators.water.build_true_color_graph") as mock_tc_graph:
|
| 129 |
-
|
| 130 |
-
mock_water_graph.return_value = MagicMock()
|
| 131 |
-
mock_tc_graph.return_value = MagicMock()
|
| 132 |
-
|
| 133 |
-
jobs = await indicator.submit_batch(test_aoi, test_time_range)
|
| 134 |
-
|
| 135 |
-
assert len(jobs) == 3
|
| 136 |
-
assert mock_water_graph.call_count == 2
|
| 137 |
-
assert mock_tc_graph.call_count == 1
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
@pytest.mark.asyncio
|
| 141 |
-
async def test_water_harvest_computes_result_from_batch_jobs(test_aoi, test_time_range):
|
| 142 |
-
"""harvest() downloads batch results and returns IndicatorResult."""
|
| 143 |
-
from app.indicators.water import WaterIndicator
|
| 144 |
-
|
| 145 |
-
indicator = WaterIndicator()
|
| 146 |
-
|
| 147 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 148 |
-
mndwi_path = os.path.join(tmpdir, "mndwi.tif")
|
| 149 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 150 |
-
_mock_mndwi_tif(mndwi_path)
|
| 151 |
-
mock_rgb_tif(rgb_path)
|
| 152 |
-
|
| 153 |
-
current_job = make_mock_batch_job(mndwi_path)
|
| 154 |
-
baseline_job = make_mock_batch_job(mndwi_path)
|
| 155 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 156 |
-
|
| 157 |
-
result = await indicator.harvest(
|
| 158 |
-
test_aoi, test_time_range,
|
| 159 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 160 |
-
)
|
| 161 |
-
|
| 162 |
-
assert result.indicator_id == "water"
|
| 163 |
-
assert result.data_source == "satellite"
|
| 164 |
-
assert result.status in (StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED)
|
| 165 |
-
assert result.confidence in (ConfidenceLevel.HIGH, ConfidenceLevel.MODERATE, ConfidenceLevel.LOW)
|
| 166 |
-
assert len(result.chart_data.get("dates", [])) > 0
|
| 167 |
-
assert "baseline_mean" in result.chart_data
|
| 168 |
-
assert len(result.chart_data["baseline_mean"]) > 0
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
@pytest.mark.asyncio
|
| 172 |
-
async def test_water_harvest_falls_back_when_current_fails(test_aoi, test_time_range):
|
| 173 |
-
"""harvest() returns placeholder when current MNDWI job failed."""
|
| 174 |
-
from app.indicators.water import WaterIndicator
|
| 175 |
-
|
| 176 |
-
indicator = WaterIndicator()
|
| 177 |
-
|
| 178 |
-
current_job = MagicMock()
|
| 179 |
-
current_job.download_results.side_effect = Exception("failed")
|
| 180 |
-
baseline_job = MagicMock()
|
| 181 |
-
true_color_job = MagicMock()
|
| 182 |
-
|
| 183 |
-
result = await indicator.harvest(
|
| 184 |
-
test_aoi, test_time_range,
|
| 185 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 186 |
-
)
|
| 187 |
-
|
| 188 |
-
assert result.data_source == "placeholder"
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
@pytest.mark.asyncio
|
| 192 |
-
async def test_water_harvest_degrades_when_baseline_fails(test_aoi, test_time_range):
|
| 193 |
-
"""harvest() returns degraded result when baseline MNDWI job failed."""
|
| 194 |
-
from app.indicators.water import WaterIndicator
|
| 195 |
-
|
| 196 |
-
indicator = WaterIndicator()
|
| 197 |
-
|
| 198 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 199 |
-
mndwi_path = os.path.join(tmpdir, "mndwi.tif")
|
| 200 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 201 |
-
_mock_mndwi_tif(mndwi_path)
|
| 202 |
-
mock_rgb_tif(rgb_path)
|
| 203 |
-
|
| 204 |
-
current_job = make_mock_batch_job(mndwi_path)
|
| 205 |
-
baseline_job = make_mock_batch_job(mndwi_path, fail=True)
|
| 206 |
-
true_color_job = make_mock_batch_job(rgb_path)
|
| 207 |
-
|
| 208 |
-
result = await indicator.harvest(
|
| 209 |
-
test_aoi, test_time_range,
|
| 210 |
-
batch_jobs=[current_job, baseline_job, true_color_job],
|
| 211 |
-
)
|
| 212 |
-
|
| 213 |
-
assert result.indicator_id == "water"
|
| 214 |
-
assert result.data_source == "satellite"
|
| 215 |
-
assert result.confidence == ConfidenceLevel.LOW
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,74 +0,0 @@
|
|
| 1 |
-
import pytest
|
| 2 |
-
import tempfile
|
| 3 |
-
import os
|
| 4 |
-
import numpy as np
|
| 5 |
-
from app.outputs.maps import render_indicator_map, render_status_map
|
| 6 |
-
from app.indicators.base import SpatialData
|
| 7 |
-
from app.models import AOI, StatusLevel
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
@pytest.fixture
|
| 11 |
-
def aoi():
|
| 12 |
-
return AOI(name="Test Area", bbox=[36.75, -1.35, 36.95, -1.20])
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
def test_render_grid_map(aoi):
|
| 16 |
-
spatial = SpatialData(
|
| 17 |
-
data=np.random.rand(5, 5) * 30 + 10,
|
| 18 |
-
lons=np.linspace(36.75, 36.95, 5),
|
| 19 |
-
lats=np.linspace(-1.35, -1.20, 5),
|
| 20 |
-
label="Temperature (°C)",
|
| 21 |
-
colormap="coolwarm",
|
| 22 |
-
map_type="grid",
|
| 23 |
-
)
|
| 24 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 25 |
-
out = os.path.join(tmpdir, "grid.png")
|
| 26 |
-
render_indicator_map(spatial=spatial, aoi=aoi, status=StatusLevel.GREEN, output_path=out)
|
| 27 |
-
assert os.path.exists(out)
|
| 28 |
-
assert os.path.getsize(out) > 1000
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
def test_render_points_map(aoi):
|
| 32 |
-
spatial = SpatialData(
|
| 33 |
-
geojson={
|
| 34 |
-
"type": "FeatureCollection",
|
| 35 |
-
"features": [
|
| 36 |
-
{"type": "Feature", "geometry": {"type": "Point", "coordinates": [36.82, -1.28]}, "properties": {"confidence": "high"}},
|
| 37 |
-
{"type": "Feature", "geometry": {"type": "Point", "coordinates": [36.88, -1.30]}, "properties": {"confidence": "nominal"}},
|
| 38 |
-
],
|
| 39 |
-
},
|
| 40 |
-
map_type="points",
|
| 41 |
-
label="Fire detections",
|
| 42 |
-
)
|
| 43 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 44 |
-
out = os.path.join(tmpdir, "points.png")
|
| 45 |
-
render_indicator_map(spatial=spatial, aoi=aoi, status=StatusLevel.RED, output_path=out)
|
| 46 |
-
assert os.path.exists(out)
|
| 47 |
-
assert os.path.getsize(out) > 1000
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def test_render_choropleth_map(aoi):
|
| 51 |
-
spatial = SpatialData(
|
| 52 |
-
geojson={
|
| 53 |
-
"type": "FeatureCollection",
|
| 54 |
-
"features": [
|
| 55 |
-
{"type": "Feature", "geometry": {"type": "Polygon", "coordinates": [[[36.7, -1.4], [36.9, -1.4], [36.9, -1.2], [36.7, -1.2], [36.7, -1.4]]]}, "properties": {"value": 65.0}},
|
| 56 |
-
],
|
| 57 |
-
},
|
| 58 |
-
map_type="choropleth",
|
| 59 |
-
label="Vegetation %",
|
| 60 |
-
colormap="YlGn",
|
| 61 |
-
)
|
| 62 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 63 |
-
out = os.path.join(tmpdir, "choropleth.png")
|
| 64 |
-
render_indicator_map(spatial=spatial, aoi=aoi, status=StatusLevel.AMBER, output_path=out)
|
| 65 |
-
assert os.path.exists(out)
|
| 66 |
-
assert os.path.getsize(out) > 1000
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def test_render_status_map(aoi):
|
| 70 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 71 |
-
out = os.path.join(tmpdir, "status.png")
|
| 72 |
-
render_status_map(aoi=aoi, status=StatusLevel.RED, output_path=out)
|
| 73 |
-
assert os.path.exists(out)
|
| 74 |
-
assert os.path.getsize(out) > 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,151 +0,0 @@
|
|
| 1 |
-
import pytest
|
| 2 |
-
from datetime import datetime, date
|
| 3 |
-
from shapely.geometry import box
|
| 4 |
-
|
| 5 |
-
from app.models import (
|
| 6 |
-
AOI,
|
| 7 |
-
TimeRange,
|
| 8 |
-
JobRequest,
|
| 9 |
-
JobStatus,
|
| 10 |
-
Job,
|
| 11 |
-
IndicatorMeta,
|
| 12 |
-
IndicatorResult,
|
| 13 |
-
StatusLevel,
|
| 14 |
-
TrendDirection,
|
| 15 |
-
ConfidenceLevel,
|
| 16 |
-
)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def test_aoi_from_bbox():
|
| 20 |
-
aoi = AOI(name="Test Area", bbox=[32.45, 15.65, 32.65, 15.80])
|
| 21 |
-
assert aoi.name == "Test Area"
|
| 22 |
-
assert aoi.area_km2 > 0
|
| 23 |
-
assert aoi.area_km2 < 10_000
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def test_aoi_rejects_too_large():
|
| 27 |
-
with pytest.raises(ValueError, match="10,000"):
|
| 28 |
-
AOI(name="Huge", bbox=[0.0, 0.0, 20.0, 20.0])
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
def test_aoi_rejects_outside_east_africa():
|
| 32 |
-
with pytest.raises(ValueError, match="East Africa"):
|
| 33 |
-
AOI(name="Europe", bbox=[10.0, 50.0, 11.0, 51.0])
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def test_time_range_defaults_to_last_12_months():
|
| 37 |
-
tr = TimeRange()
|
| 38 |
-
assert (tr.end - tr.start).days >= 360
|
| 39 |
-
assert (tr.end - tr.start).days <= 370
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
def test_time_range_rejects_over_3_years():
|
| 43 |
-
with pytest.raises(ValueError, match="3 years"):
|
| 44 |
-
TimeRange(start=date(2020, 1, 1), end=date(2026, 1, 1))
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def test_job_request_valid():
|
| 48 |
-
req = JobRequest(
|
| 49 |
-
aoi=AOI(name="Khartoum", bbox=[32.45, 15.65, 32.65, 15.80]),
|
| 50 |
-
time_range=TimeRange(),
|
| 51 |
-
indicator_ids=["fires", "cropland"],
|
| 52 |
-
email="test@example.com",
|
| 53 |
-
)
|
| 54 |
-
assert len(req.indicator_ids) == 2
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
def test_job_request_rejects_empty_indicators():
|
| 58 |
-
with pytest.raises(ValueError, match="indicator"):
|
| 59 |
-
JobRequest(
|
| 60 |
-
aoi=AOI(name="Khartoum", bbox=[32.45, 15.65, 32.65, 15.80]),
|
| 61 |
-
time_range=TimeRange(),
|
| 62 |
-
indicator_ids=[],
|
| 63 |
-
email="test@example.com",
|
| 64 |
-
)
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
def test_job_status_transitions():
|
| 68 |
-
assert JobStatus.QUEUED == "queued"
|
| 69 |
-
assert JobStatus.PROCESSING == "processing"
|
| 70 |
-
assert JobStatus.COMPLETE == "complete"
|
| 71 |
-
assert JobStatus.FAILED == "failed"
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
def test_indicator_result_fields():
|
| 75 |
-
result = IndicatorResult(
|
| 76 |
-
indicator_id="fires",
|
| 77 |
-
headline="5 fire events detected in the past 12 months",
|
| 78 |
-
status=StatusLevel.AMBER,
|
| 79 |
-
trend=TrendDirection.DETERIORATING,
|
| 80 |
-
confidence=ConfidenceLevel.HIGH,
|
| 81 |
-
map_layer_path="/results/123/fires_map.tif",
|
| 82 |
-
chart_data={"dates": ["2025-01", "2025-02"], "values": [2, 3]},
|
| 83 |
-
summary="Five fire events were detected in the analysis area.",
|
| 84 |
-
methodology="Fire detections sourced from NASA FIRMS active fire API.",
|
| 85 |
-
limitations=["VIIRS has a 375m resolution — small fires may be missed."],
|
| 86 |
-
)
|
| 87 |
-
assert result.status == "amber"
|
| 88 |
-
assert result.trend == "deteriorating"
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
def test_job_request_season_defaults():
|
| 92 |
-
"""Default season is full year (1-12)."""
|
| 93 |
-
req = JobRequest(
|
| 94 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 95 |
-
indicator_ids=["fires"],
|
| 96 |
-
email="t@t.com",
|
| 97 |
-
)
|
| 98 |
-
assert req.season_start == 1
|
| 99 |
-
assert req.season_end == 12
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
def test_job_request_season_months_normal():
|
| 103 |
-
"""Non-wrapping season: Apr-Sep."""
|
| 104 |
-
req = JobRequest(
|
| 105 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 106 |
-
indicator_ids=["fires"],
|
| 107 |
-
email="t@t.com",
|
| 108 |
-
season_start=4,
|
| 109 |
-
season_end=9,
|
| 110 |
-
)
|
| 111 |
-
assert req.season_months() == [4, 5, 6, 7, 8, 9]
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
def test_job_request_season_months_wrapping():
|
| 115 |
-
"""Wrapping season: Oct-Mar (Southern Hemisphere)."""
|
| 116 |
-
req = JobRequest(
|
| 117 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 118 |
-
indicator_ids=["fires"],
|
| 119 |
-
email="t@t.com",
|
| 120 |
-
season_start=10,
|
| 121 |
-
season_end=3,
|
| 122 |
-
)
|
| 123 |
-
assert req.season_months() == [10, 11, 12, 1, 2, 3]
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
def test_job_request_season_months_full_year():
|
| 127 |
-
"""Full year default."""
|
| 128 |
-
req = JobRequest(
|
| 129 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 130 |
-
indicator_ids=["fires"],
|
| 131 |
-
email="t@t.com",
|
| 132 |
-
)
|
| 133 |
-
assert req.season_months() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
def test_job_request_season_validation():
|
| 137 |
-
"""Season months must be 1-12."""
|
| 138 |
-
with pytest.raises(Exception):
|
| 139 |
-
JobRequest(
|
| 140 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 141 |
-
indicator_ids=["fires"],
|
| 142 |
-
email="t@t.com",
|
| 143 |
-
season_start=0,
|
| 144 |
-
)
|
| 145 |
-
with pytest.raises(Exception):
|
| 146 |
-
JobRequest(
|
| 147 |
-
aoi=AOI(name="Test", bbox=[36.75, -1.35, 36.95, -1.20]),
|
| 148 |
-
indicator_ids=["fires"],
|
| 149 |
-
email="t@t.com",
|
| 150 |
-
season_end=13,
|
| 151 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,83 +0,0 @@
|
|
| 1 |
-
"""Tests for app.outputs.narrative — cross-indicator narrative generation."""
|
| 2 |
-
from app.models import IndicatorResult, StatusLevel, TrendDirection, ConfidenceLevel
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
def _make_result(
|
| 6 |
-
indicator_id: str,
|
| 7 |
-
status: StatusLevel = StatusLevel.GREEN,
|
| 8 |
-
trend: TrendDirection = TrendDirection.STABLE,
|
| 9 |
-
data_source: str = "satellite",
|
| 10 |
-
headline: str = "Test headline",
|
| 11 |
-
summary: str = "Test summary.",
|
| 12 |
-
) -> IndicatorResult:
|
| 13 |
-
return IndicatorResult(
|
| 14 |
-
indicator_id=indicator_id,
|
| 15 |
-
headline=headline,
|
| 16 |
-
status=status,
|
| 17 |
-
trend=trend,
|
| 18 |
-
confidence=ConfidenceLevel.HIGH,
|
| 19 |
-
map_layer_path="",
|
| 20 |
-
chart_data={"dates": ["2025-06"], "values": [0.5]},
|
| 21 |
-
summary=summary,
|
| 22 |
-
methodology="Test methodology.",
|
| 23 |
-
limitations=["Test limitation."],
|
| 24 |
-
data_source=data_source,
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def test_generate_narrative_all_green():
|
| 29 |
-
from app.outputs.narrative import generate_narrative
|
| 30 |
-
results = [
|
| 31 |
-
_make_result("ndvi"),
|
| 32 |
-
_make_result("water"),
|
| 33 |
-
]
|
| 34 |
-
narrative = generate_narrative(results)
|
| 35 |
-
assert "within normal ranges" in narrative.lower() or "stable" in narrative.lower()
|
| 36 |
-
assert len(narrative) > 20
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def test_generate_narrative_drought_pattern():
|
| 40 |
-
from app.outputs.narrative import generate_narrative
|
| 41 |
-
results = [
|
| 42 |
-
_make_result("ndvi", status=StatusLevel.RED, trend=TrendDirection.DETERIORATING),
|
| 43 |
-
_make_result("rainfall", status=StatusLevel.RED, trend=TrendDirection.DETERIORATING),
|
| 44 |
-
]
|
| 45 |
-
narrative = generate_narrative(results)
|
| 46 |
-
assert "drought" in narrative.lower() or "precipitation" in narrative.lower()
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
def test_generate_narrative_landuse_pattern():
|
| 50 |
-
from app.outputs.narrative import generate_narrative
|
| 51 |
-
results = [
|
| 52 |
-
_make_result("ndvi", status=StatusLevel.AMBER, trend=TrendDirection.DETERIORATING),
|
| 53 |
-
_make_result("buildup", status=StatusLevel.AMBER, trend=TrendDirection.DETERIORATING),
|
| 54 |
-
]
|
| 55 |
-
narrative = generate_narrative(results)
|
| 56 |
-
assert "settlement" in narrative.lower() or "land-use" in narrative.lower()
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def test_generate_narrative_placeholder_caveat():
|
| 60 |
-
from app.outputs.narrative import generate_narrative
|
| 61 |
-
results = [
|
| 62 |
-
_make_result("ndvi", data_source="placeholder"),
|
| 63 |
-
_make_result("water"),
|
| 64 |
-
]
|
| 65 |
-
narrative = generate_narrative(results)
|
| 66 |
-
assert "placeholder" in narrative.lower() or "estimated" in narrative.lower() or "limited" in narrative.lower()
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def test_generate_narrative_single_indicator():
|
| 70 |
-
from app.outputs.narrative import generate_narrative
|
| 71 |
-
results = [_make_result("ndvi", status=StatusLevel.AMBER, trend=TrendDirection.DETERIORATING)]
|
| 72 |
-
narrative = generate_narrative(results)
|
| 73 |
-
assert isinstance(narrative, str)
|
| 74 |
-
assert len(narrative) > 20
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
def test_get_interpretation_returns_nonempty():
|
| 78 |
-
from app.outputs.narrative import get_interpretation
|
| 79 |
-
for ind_id in ["ndvi", "water", "sar", "buildup", "fires", "rainfall"]:
|
| 80 |
-
for status in [StatusLevel.GREEN, StatusLevel.AMBER, StatusLevel.RED]:
|
| 81 |
-
result = get_interpretation(ind_id, status)
|
| 82 |
-
assert isinstance(result, str)
|
| 83 |
-
assert len(result) > 10, f"Empty interpretation for {ind_id}/{status}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,217 +0,0 @@
|
|
| 1 |
-
"""End-to-end test: NDVI indicator → raster map → chart → report section.
|
| 2 |
-
|
| 3 |
-
Uses mocked openEO (no CDSE credentials needed) with synthetic GeoTIFFs.
|
| 4 |
-
"""
|
| 5 |
-
from __future__ import annotations
|
| 6 |
-
|
| 7 |
-
import os
|
| 8 |
-
import tempfile
|
| 9 |
-
from unittest.mock import MagicMock, patch
|
| 10 |
-
from datetime import date
|
| 11 |
-
|
| 12 |
-
import numpy as np
|
| 13 |
-
import rasterio
|
| 14 |
-
from rasterio.transform import from_bounds
|
| 15 |
-
import pytest
|
| 16 |
-
|
| 17 |
-
from app.models import AOI, TimeRange, StatusLevel
|
| 18 |
-
from app.outputs.charts import render_timeseries_chart
|
| 19 |
-
from app.outputs.maps import render_raster_map
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
BBOX = [32.45, 15.65, 32.65, 15.8]
|
| 23 |
-
WIDTH, HEIGHT = 15, 12
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def _write_ndvi_tif(path: str):
|
| 27 |
-
rng = np.random.default_rng(42)
|
| 28 |
-
data = np.zeros((12, HEIGHT, WIDTH), dtype=np.float32)
|
| 29 |
-
for m in range(12):
|
| 30 |
-
data[m] = 0.3 + 0.2 * np.sin(np.pi * (m - 3) / 6) + rng.normal(0, 0.03, (HEIGHT, WIDTH))
|
| 31 |
-
with rasterio.open(
|
| 32 |
-
path, "w", driver="GTiff", height=HEIGHT, width=WIDTH, count=12,
|
| 33 |
-
dtype="float32", crs="EPSG:4326",
|
| 34 |
-
transform=from_bounds(*BBOX, WIDTH, HEIGHT), nodata=-9999.0,
|
| 35 |
-
) as dst:
|
| 36 |
-
for i in range(12):
|
| 37 |
-
dst.write(data[i], i + 1)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
def _write_rgb_tif(path: str):
|
| 41 |
-
rng = np.random.default_rng(43)
|
| 42 |
-
data = rng.integers(500, 1500, (3, HEIGHT, WIDTH), dtype=np.uint16)
|
| 43 |
-
with rasterio.open(
|
| 44 |
-
path, "w", driver="GTiff", height=HEIGHT, width=WIDTH, count=3,
|
| 45 |
-
dtype="uint16", crs="EPSG:4326",
|
| 46 |
-
transform=from_bounds(*BBOX, WIDTH, HEIGHT), nodata=0,
|
| 47 |
-
) as dst:
|
| 48 |
-
for i in range(3):
|
| 49 |
-
dst.write(data[i], i + 1)
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
@pytest.mark.asyncio
|
| 53 |
-
async def test_ndvi_full_pipeline():
|
| 54 |
-
"""Full pipeline: process NDVI → render raster map → render chart."""
|
| 55 |
-
from app.indicators.ndvi import NdviIndicator
|
| 56 |
-
|
| 57 |
-
aoi = AOI(name="Khartoum Test", bbox=BBOX)
|
| 58 |
-
time_range = TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 59 |
-
|
| 60 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 61 |
-
ndvi_path = os.path.join(tmpdir, "ndvi.tif")
|
| 62 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 63 |
-
_write_ndvi_tif(ndvi_path)
|
| 64 |
-
_write_rgb_tif(rgb_path)
|
| 65 |
-
|
| 66 |
-
mock_cube = MagicMock()
|
| 67 |
-
|
| 68 |
-
def fake_download(path, **kwargs):
|
| 69 |
-
import shutil
|
| 70 |
-
if "ndvi" in path:
|
| 71 |
-
shutil.copy(ndvi_path, path)
|
| 72 |
-
else:
|
| 73 |
-
shutil.copy(rgb_path, path)
|
| 74 |
-
|
| 75 |
-
mock_cube.download = MagicMock(side_effect=fake_download)
|
| 76 |
-
|
| 77 |
-
with patch("app.indicators.ndvi.get_connection") as mock_conn, \
|
| 78 |
-
patch("app.indicators.ndvi.build_ndvi_graph", return_value=mock_cube), \
|
| 79 |
-
patch("app.indicators.ndvi.build_true_color_graph", return_value=mock_cube):
|
| 80 |
-
|
| 81 |
-
indicator = NdviIndicator()
|
| 82 |
-
result = await indicator.process(aoi, time_range)
|
| 83 |
-
|
| 84 |
-
# Verify result quality
|
| 85 |
-
assert result.indicator_id == "ndvi"
|
| 86 |
-
assert result.data_source == "satellite"
|
| 87 |
-
assert "NDVI" in result.methodology
|
| 88 |
-
assert "pixel" in result.methodology.lower()
|
| 89 |
-
assert len(result.chart_data["dates"]) >= 6
|
| 90 |
-
assert all(isinstance(v, float) for v in result.chart_data["values"])
|
| 91 |
-
|
| 92 |
-
# Render the raster map
|
| 93 |
-
map_out = os.path.join(tmpdir, "ndvi_map.png")
|
| 94 |
-
raster_path = indicator._indicator_raster_path
|
| 95 |
-
tc_path = indicator._true_color_path
|
| 96 |
-
peak = indicator._ndvi_peak_band
|
| 97 |
-
|
| 98 |
-
render_raster_map(
|
| 99 |
-
true_color_path=tc_path,
|
| 100 |
-
indicator_path=raster_path,
|
| 101 |
-
indicator_band=peak,
|
| 102 |
-
aoi=aoi,
|
| 103 |
-
status=result.status,
|
| 104 |
-
output_path=map_out,
|
| 105 |
-
cmap="RdYlGn",
|
| 106 |
-
vmin=-0.2,
|
| 107 |
-
vmax=0.9,
|
| 108 |
-
label="NDVI",
|
| 109 |
-
)
|
| 110 |
-
assert os.path.exists(map_out)
|
| 111 |
-
assert os.path.getsize(map_out) > 10000 # Real map should be >10KB
|
| 112 |
-
|
| 113 |
-
# Render the chart
|
| 114 |
-
chart_out = os.path.join(tmpdir, "ndvi_chart.png")
|
| 115 |
-
render_timeseries_chart(
|
| 116 |
-
chart_data=result.chart_data,
|
| 117 |
-
indicator_name="Vegetation (NDVI)",
|
| 118 |
-
status=result.status,
|
| 119 |
-
trend=result.trend,
|
| 120 |
-
output_path=chart_out,
|
| 121 |
-
y_label="NDVI",
|
| 122 |
-
)
|
| 123 |
-
assert os.path.exists(chart_out)
|
| 124 |
-
assert os.path.getsize(chart_out) > 5000
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
@pytest.mark.asyncio
|
| 128 |
-
async def test_ndvi_batch_pipeline():
|
| 129 |
-
"""Batch pipeline: submit → harvest → render map → render chart."""
|
| 130 |
-
from app.indicators.ndvi import NdviIndicator
|
| 131 |
-
import shutil
|
| 132 |
-
|
| 133 |
-
aoi = AOI(name="Khartoum Batch", bbox=BBOX)
|
| 134 |
-
time_range = TimeRange(start=date(2025, 3, 1), end=date(2026, 3, 1))
|
| 135 |
-
|
| 136 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
| 137 |
-
ndvi_path = os.path.join(tmpdir, "ndvi.tif")
|
| 138 |
-
rgb_path = os.path.join(tmpdir, "rgb.tif")
|
| 139 |
-
_write_ndvi_tif(ndvi_path)
|
| 140 |
-
_write_rgb_tif(rgb_path)
|
| 141 |
-
|
| 142 |
-
mock_conn = MagicMock()
|
| 143 |
-
|
| 144 |
-
def make_mock_job(src_path):
|
| 145 |
-
job = MagicMock()
|
| 146 |
-
job.job_id = "j-e2e"
|
| 147 |
-
job.status.return_value = "finished"
|
| 148 |
-
def fake_download_results(target):
|
| 149 |
-
os.makedirs(target, exist_ok=True)
|
| 150 |
-
dest = os.path.join(target, "result.tif")
|
| 151 |
-
shutil.copy(src_path, dest)
|
| 152 |
-
from pathlib import Path
|
| 153 |
-
return {Path(dest): {"type": "image/tiff"}}
|
| 154 |
-
job.download_results.side_effect = fake_download_results
|
| 155 |
-
return job
|
| 156 |
-
|
| 157 |
-
mock_ndvi_job = make_mock_job(ndvi_path)
|
| 158 |
-
mock_tc_job = make_mock_job(rgb_path)
|
| 159 |
-
mock_conn.create_job.return_value = mock_ndvi_job
|
| 160 |
-
|
| 161 |
-
with patch("app.indicators.ndvi.get_connection", return_value=mock_conn), \
|
| 162 |
-
patch("app.indicators.ndvi.build_ndvi_graph", return_value=MagicMock()), \
|
| 163 |
-
patch("app.indicators.ndvi.build_true_color_graph", return_value=MagicMock()), \
|
| 164 |
-
patch("app.indicators.ndvi.submit_as_batch") as mock_submit:
|
| 165 |
-
|
| 166 |
-
mock_submit.side_effect = [
|
| 167 |
-
make_mock_job(ndvi_path), # current
|
| 168 |
-
make_mock_job(ndvi_path), # baseline
|
| 169 |
-
make_mock_job(rgb_path), # true-color
|
| 170 |
-
]
|
| 171 |
-
|
| 172 |
-
indicator = NdviIndicator()
|
| 173 |
-
|
| 174 |
-
# Phase 1: submit
|
| 175 |
-
jobs = await indicator.submit_batch(aoi, time_range)
|
| 176 |
-
assert len(jobs) == 3
|
| 177 |
-
|
| 178 |
-
# Phase 3: harvest
|
| 179 |
-
result = await indicator.harvest(aoi, time_range, batch_jobs=jobs)
|
| 180 |
-
|
| 181 |
-
assert result.indicator_id == "ndvi"
|
| 182 |
-
assert result.data_source == "satellite"
|
| 183 |
-
assert len(result.chart_data["dates"]) >= 6
|
| 184 |
-
|
| 185 |
-
# Render the raster map
|
| 186 |
-
map_out = os.path.join(tmpdir, "ndvi_map.png")
|
| 187 |
-
raster_path = indicator._indicator_raster_path
|
| 188 |
-
tc_path = indicator._true_color_path
|
| 189 |
-
peak = indicator._ndvi_peak_band
|
| 190 |
-
|
| 191 |
-
render_raster_map(
|
| 192 |
-
true_color_path=tc_path,
|
| 193 |
-
indicator_path=raster_path,
|
| 194 |
-
indicator_band=peak,
|
| 195 |
-
aoi=aoi,
|
| 196 |
-
status=result.status,
|
| 197 |
-
output_path=map_out,
|
| 198 |
-
cmap="RdYlGn",
|
| 199 |
-
vmin=-0.2,
|
| 200 |
-
vmax=0.9,
|
| 201 |
-
label="NDVI",
|
| 202 |
-
)
|
| 203 |
-
assert os.path.exists(map_out)
|
| 204 |
-
assert os.path.getsize(map_out) > 10000
|
| 205 |
-
|
| 206 |
-
# Render the chart
|
| 207 |
-
chart_out = os.path.join(tmpdir, "ndvi_chart.png")
|
| 208 |
-
render_timeseries_chart(
|
| 209 |
-
chart_data=result.chart_data,
|
| 210 |
-
indicator_name="Vegetation (NDVI)",
|
| 211 |
-
status=result.status,
|
| 212 |
-
trend=result.trend,
|
| 213 |
-
output_path=chart_out,
|
| 214 |
-
y_label="NDVI",
|
| 215 |
-
)
|
| 216 |
-
assert os.path.exists(chart_out)
|
| 217 |
-
assert os.path.getsize(chart_out) > 5000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -1,190 +0,0 @@
|
|
| 1 |
-
"""Tests for app.openeo_client — openEO connection and graph builders."""
|
| 2 |
-
from __future__ import annotations
|
| 3 |
-
|
| 4 |
-
from unittest.mock import MagicMock, patch
|
| 5 |
-
import pytest
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def test_get_connection_creates_authenticated_connection():
|
| 9 |
-
"""get_connection() connects to CDSE and authenticates."""
|
| 10 |
-
mock_conn = MagicMock()
|
| 11 |
-
with patch("openeo.connect", return_value=mock_conn) as mock_connect:
|
| 12 |
-
from app.openeo_client import get_connection, _reset_connection
|
| 13 |
-
_reset_connection()
|
| 14 |
-
conn = get_connection()
|
| 15 |
-
|
| 16 |
-
mock_connect.assert_called_once_with("openeo.dataspace.copernicus.eu")
|
| 17 |
-
mock_conn.authenticate_oidc.assert_called_once()
|
| 18 |
-
assert conn is mock_conn
|
| 19 |
-
_reset_connection()
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
def test_get_connection_reuses_cached():
|
| 23 |
-
"""Subsequent calls return the same connection object."""
|
| 24 |
-
mock_conn = MagicMock()
|
| 25 |
-
with patch("openeo.connect", return_value=mock_conn):
|
| 26 |
-
from app.openeo_client import get_connection, _reset_connection
|
| 27 |
-
_reset_connection()
|
| 28 |
-
conn1 = get_connection()
|
| 29 |
-
conn2 = get_connection()
|
| 30 |
-
assert conn1 is conn2
|
| 31 |
-
_reset_connection()
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def test_build_ndvi_graph():
|
| 35 |
-
"""build_ndvi_graph() loads Sentinel-2 with correct bands."""
|
| 36 |
-
mock_conn = MagicMock()
|
| 37 |
-
mock_cube = MagicMock()
|
| 38 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 39 |
-
|
| 40 |
-
from app.openeo_client import build_ndvi_graph
|
| 41 |
-
|
| 42 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 43 |
-
result = build_ndvi_graph(
|
| 44 |
-
conn=mock_conn,
|
| 45 |
-
bbox=bbox,
|
| 46 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 47 |
-
resolution_m=100,
|
| 48 |
-
)
|
| 49 |
-
|
| 50 |
-
mock_conn.load_collection.assert_called_once()
|
| 51 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 52 |
-
assert call_kwargs[1]["collection_id"] == "SENTINEL2_L2A"
|
| 53 |
-
assert call_kwargs[1]["spatial_extent"] == bbox
|
| 54 |
-
assert "B04" in call_kwargs[1]["bands"]
|
| 55 |
-
assert "B08" in call_kwargs[1]["bands"]
|
| 56 |
-
assert "SCL" in call_kwargs[1]["bands"]
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def test_build_true_color_graph():
|
| 60 |
-
"""build_true_color_graph() loads RGB bands."""
|
| 61 |
-
mock_conn = MagicMock()
|
| 62 |
-
mock_cube = MagicMock()
|
| 63 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 64 |
-
|
| 65 |
-
from app.openeo_client import build_true_color_graph
|
| 66 |
-
|
| 67 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 68 |
-
result = build_true_color_graph(
|
| 69 |
-
conn=mock_conn,
|
| 70 |
-
bbox=bbox,
|
| 71 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 72 |
-
resolution_m=100,
|
| 73 |
-
)
|
| 74 |
-
|
| 75 |
-
mock_conn.load_collection.assert_called_once()
|
| 76 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 77 |
-
bands = call_kwargs[1]["bands"]
|
| 78 |
-
assert "B04" in bands
|
| 79 |
-
assert "B03" in bands
|
| 80 |
-
assert "B02" in bands
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
def test_build_mndwi_graph():
|
| 84 |
-
"""build_mndwi_graph() loads Sentinel-2 with water index bands."""
|
| 85 |
-
mock_conn = MagicMock()
|
| 86 |
-
mock_cube = MagicMock()
|
| 87 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 88 |
-
|
| 89 |
-
from app.openeo_client import build_mndwi_graph
|
| 90 |
-
|
| 91 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 92 |
-
result = build_mndwi_graph(
|
| 93 |
-
conn=mock_conn,
|
| 94 |
-
bbox=bbox,
|
| 95 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 96 |
-
resolution_m=100,
|
| 97 |
-
)
|
| 98 |
-
|
| 99 |
-
mock_conn.load_collection.assert_called_once()
|
| 100 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 101 |
-
assert call_kwargs[1]["collection_id"] == "SENTINEL2_L2A"
|
| 102 |
-
assert "B03" in call_kwargs[1]["bands"]
|
| 103 |
-
assert "B11" in call_kwargs[1]["bands"]
|
| 104 |
-
assert "SCL" in call_kwargs[1]["bands"]
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
def test_build_lst_graph():
|
| 108 |
-
"""build_lst_graph() loads Sentinel-3 SLSTR LST data."""
|
| 109 |
-
mock_conn = MagicMock()
|
| 110 |
-
mock_cube = MagicMock()
|
| 111 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 112 |
-
|
| 113 |
-
from app.openeo_client import build_lst_graph
|
| 114 |
-
|
| 115 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 116 |
-
result = build_lst_graph(
|
| 117 |
-
conn=mock_conn,
|
| 118 |
-
bbox=bbox,
|
| 119 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 120 |
-
resolution_m=1000,
|
| 121 |
-
)
|
| 122 |
-
|
| 123 |
-
mock_conn.load_collection.assert_called_once()
|
| 124 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 125 |
-
assert "SENTINEL3" in call_kwargs[1]["collection_id"].upper() or "SLSTR" in call_kwargs[1]["collection_id"].upper()
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
def test_build_sar_graph():
|
| 129 |
-
"""build_sar_graph() loads Sentinel-1 GRD with VV and VH bands."""
|
| 130 |
-
mock_conn = MagicMock()
|
| 131 |
-
mock_cube = MagicMock()
|
| 132 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 133 |
-
|
| 134 |
-
from app.openeo_client import build_sar_graph
|
| 135 |
-
|
| 136 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 137 |
-
result = build_sar_graph(
|
| 138 |
-
conn=mock_conn,
|
| 139 |
-
bbox=bbox,
|
| 140 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 141 |
-
resolution_m=100,
|
| 142 |
-
)
|
| 143 |
-
|
| 144 |
-
mock_conn.load_collection.assert_called_once()
|
| 145 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 146 |
-
assert call_kwargs[1]["collection_id"] == "SENTINEL1_GRD"
|
| 147 |
-
assert "VV" in call_kwargs[1]["bands"]
|
| 148 |
-
assert "VH" in call_kwargs[1]["bands"]
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
def test_build_buildup_graph():
|
| 152 |
-
"""build_buildup_graph() loads Sentinel-2 with SWIR, NIR, Red, and SCL bands."""
|
| 153 |
-
mock_conn = MagicMock()
|
| 154 |
-
mock_cube = MagicMock()
|
| 155 |
-
mock_conn.load_collection.return_value = mock_cube
|
| 156 |
-
|
| 157 |
-
from app.openeo_client import build_buildup_graph
|
| 158 |
-
|
| 159 |
-
bbox = {"west": 32.45, "south": 15.65, "east": 32.65, "north": 15.8}
|
| 160 |
-
result = build_buildup_graph(
|
| 161 |
-
conn=mock_conn,
|
| 162 |
-
bbox=bbox,
|
| 163 |
-
temporal_extent=["2025-03-01", "2026-03-01"],
|
| 164 |
-
resolution_m=100,
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
mock_conn.load_collection.assert_called_once()
|
| 168 |
-
call_kwargs = mock_conn.load_collection.call_args
|
| 169 |
-
assert call_kwargs[1]["collection_id"] == "SENTINEL2_L2A"
|
| 170 |
-
assert "B04" in call_kwargs[1]["bands"]
|
| 171 |
-
assert "B08" in call_kwargs[1]["bands"]
|
| 172 |
-
assert "B11" in call_kwargs[1]["bands"]
|
| 173 |
-
assert "SCL" in call_kwargs[1]["bands"]
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
def test_submit_as_batch_creates_and_starts_job():
|
| 177 |
-
"""submit_as_batch() creates a batch job and starts it."""
|
| 178 |
-
from app.openeo_client import submit_as_batch
|
| 179 |
-
|
| 180 |
-
mock_conn = MagicMock()
|
| 181 |
-
mock_cube = MagicMock()
|
| 182 |
-
mock_job = MagicMock()
|
| 183 |
-
mock_job.job_id = "j-12345"
|
| 184 |
-
mock_conn.create_job.return_value = mock_job
|
| 185 |
-
|
| 186 |
-
result = submit_as_batch(mock_conn, mock_cube, "ndvi-current-Test")
|
| 187 |
-
|
| 188 |
-
mock_conn.create_job.assert_called_once_with(mock_cube, title="ndvi-current-Test")
|
| 189 |
-
mock_job.start.assert_called_once()
|
| 190 |
-
assert result is mock_job
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|