fix: report contradictions, NaN propagation, NDBI threshold, blue tint
Browse filesFive fixes for the v2 analytical report based on the test PDF review:
1. SAR NaN propagation
- Add safe_float helper that converts NaN/inf to 0.0
- Apply at every np.nanmean/nanstd source (sar, ndvi, water, buildup)
- Prevents 'nan dB' / 'z=+nan' strings in headlines and chart data
2. Headline/status alignment
- Add BaseProduct._generate_headline as single source of truth
- Headlines for RED/AMBER never say 'within normal range'; they
describe whichever signal (z-score, hotspot %, anomaly months)
drove the non-GREEN status
- All four products use the unified generator
- Fix _build_seasonal_chart_data to use a year-aware date helper
(was reusing time_range.end.year for every month)
- Buildup chart had unit mismatch (current ha vs raw NDBI ha);
now uses paired baseline buildup fractions for envelope
3. Settlement NDBI threshold for arid terrain
- openEO graph now emits paired NDBI/NDVI bands per timestep
- _compute_stats applies combined mask (NDBI > 0 AND NDVI < 0.2)
to exclude bare soil/rock from built-up classification
- _write_change_raster handles paired layout (with backward-compat
fallback to NDBI-only for legacy data)
4. Satellite overview blue tint
- build_true_color_graph now emits bands in [B04, B03, B02] order
so renderers reading [1,2,3] get true RGB instead of BGR
5. Non-tech language pass on report
- 'EO Product' → 'Indicator' throughout
- Display names: ndvi → 'Vegetation health', water → 'Water bodies',
sar → 'Ground surface change', buildup → 'Built-up areas'
- Plain-language interpretations rewritten
- New 'What to verify on the ground' box per non-GREEN indicator
- Status traffic-light line gains action verbs (action recommended,
worth monitoring, within normal range)
- Anomaly count uses actual N months instead of hardcoded /12
Verified by smoke test: safe_float catches NaN/inf, headline generator
never produces 'within normal range' for non-GREEN status, chart dates
correctly cross year boundaries.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
- app/eo_products/base.py +79 -0
- app/eo_products/buildup.py +156 -89
- app/eo_products/ndvi.py +51 -42
- app/eo_products/sar.py +72 -61
- app/eo_products/water.py +54 -45
- app/openeo_client.py +21 -11
- app/outputs/narrative.py +36 -16
- app/outputs/report.py +28 -15
|
@@ -1,8 +1,10 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
import abc
|
|
|
|
| 4 |
import os
|
| 5 |
from dataclasses import dataclass, field
|
|
|
|
| 6 |
from typing import Optional
|
| 7 |
|
| 8 |
import numpy as np
|
|
@@ -10,6 +12,17 @@ import numpy as np
|
|
| 10 |
from app.models import AOI, TimeRange, ProductResult, ProductMeta
|
| 11 |
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
@dataclass
|
| 14 |
class SpatialData:
|
| 15 |
"""Spatial data produced by an indicator for map rendering."""
|
|
@@ -92,6 +105,72 @@ class BaseProduct(abc.ABC):
|
|
| 92 |
return StatusLevel.AMBER
|
| 93 |
return StatusLevel.GREEN
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
@staticmethod
|
| 96 |
def _compute_trend_zscore(monthly_zscores: list[float]) -> "TrendDirection":
|
| 97 |
"""Compute trend from direction of monthly z-scores."""
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
import abc
|
| 4 |
+
import math
|
| 5 |
import os
|
| 6 |
from dataclasses import dataclass, field
|
| 7 |
+
from datetime import date
|
| 8 |
from typing import Optional
|
| 9 |
|
| 10 |
import numpy as np
|
|
|
|
| 12 |
from app.models import AOI, TimeRange, ProductResult, ProductMeta
|
| 13 |
|
| 14 |
|
| 15 |
+
def safe_float(v, default: float = 0.0) -> float:
|
| 16 |
+
"""Convert a value to float, replacing NaN/inf with default."""
|
| 17 |
+
try:
|
| 18 |
+
f = float(v)
|
| 19 |
+
except (TypeError, ValueError):
|
| 20 |
+
return default
|
| 21 |
+
if math.isnan(f) or math.isinf(f):
|
| 22 |
+
return default
|
| 23 |
+
return f
|
| 24 |
+
|
| 25 |
+
|
| 26 |
@dataclass
|
| 27 |
class SpatialData:
|
| 28 |
"""Spatial data produced by an indicator for map rendering."""
|
|
|
|
| 105 |
return StatusLevel.AMBER
|
| 106 |
return StatusLevel.GREEN
|
| 107 |
|
| 108 |
+
@staticmethod
|
| 109 |
+
def _generate_headline(
|
| 110 |
+
*,
|
| 111 |
+
status: "StatusLevel",
|
| 112 |
+
z_current: float,
|
| 113 |
+
hotspot_pct: float,
|
| 114 |
+
anomaly_months: int,
|
| 115 |
+
total_months: int,
|
| 116 |
+
value_phrase: str,
|
| 117 |
+
indicator_label: str,
|
| 118 |
+
direction_up: str = "increase",
|
| 119 |
+
direction_down: str = "decline",
|
| 120 |
+
) -> str:
|
| 121 |
+
"""Generate a plain-language headline that matches the status level.
|
| 122 |
+
|
| 123 |
+
Rule: if status is not GREEN, the headline MUST describe the anomaly
|
| 124 |
+
that triggered the non-GREEN status. Never say "within normal range"
|
| 125 |
+
for an AMBER or RED status.
|
| 126 |
+
"""
|
| 127 |
+
from app.models import StatusLevel
|
| 128 |
+
|
| 129 |
+
z = safe_float(z_current)
|
| 130 |
+
hot = safe_float(hotspot_pct)
|
| 131 |
+
|
| 132 |
+
if status == StatusLevel.GREEN:
|
| 133 |
+
return f"{indicator_label} within normal range ({value_phrase})."
|
| 134 |
+
|
| 135 |
+
# Pick the driver of the non-GREEN status
|
| 136 |
+
if abs(z) > 1.0:
|
| 137 |
+
direction = direction_up if z > 0 else direction_down
|
| 138 |
+
severity = "major" if status == StatusLevel.RED else "moderate"
|
| 139 |
+
reason = (
|
| 140 |
+
f"{severity} {direction} vs seasonal baseline "
|
| 141 |
+
f"(z={z:+.1f})"
|
| 142 |
+
)
|
| 143 |
+
elif hot > 25:
|
| 144 |
+
reason = f"widespread change in {hot:.0f}% of the area"
|
| 145 |
+
elif hot > 10:
|
| 146 |
+
reason = f"localised change in {hot:.0f}% of the area"
|
| 147 |
+
elif anomaly_months > 0:
|
| 148 |
+
reason = (
|
| 149 |
+
f"{anomaly_months} of {total_months} months show anomalies"
|
| 150 |
+
)
|
| 151 |
+
else:
|
| 152 |
+
reason = "anomalous conditions detected"
|
| 153 |
+
|
| 154 |
+
return f"{indicator_label}: {reason} ({value_phrase})."
|
| 155 |
+
|
| 156 |
+
@staticmethod
|
| 157 |
+
def _build_monthly_dates(start: "date", n_months: int) -> list[str]:
|
| 158 |
+
"""Return YYYY-MM strings for n consecutive months starting at start.
|
| 159 |
+
|
| 160 |
+
Correctly increments the year when crossing December, unlike the old
|
| 161 |
+
behaviour of reusing time_range.end.year for every month.
|
| 162 |
+
"""
|
| 163 |
+
dates: list[str] = []
|
| 164 |
+
year = start.year
|
| 165 |
+
month = start.month
|
| 166 |
+
for _ in range(n_months):
|
| 167 |
+
dates.append(f"{year}-{month:02d}")
|
| 168 |
+
month += 1
|
| 169 |
+
if month > 12:
|
| 170 |
+
month = 1
|
| 171 |
+
year += 1
|
| 172 |
+
return dates
|
| 173 |
+
|
| 174 |
@staticmethod
|
| 175 |
def _compute_trend_zscore(monthly_zscores: list[float]) -> "TrendDirection":
|
| 176 |
"""Compute trend from direction of monthly z-scores."""
|
|
@@ -23,7 +23,7 @@ from app.config import (
|
|
| 23 |
ZSCORE_THRESHOLD,
|
| 24 |
MIN_CLUSTER_PIXELS,
|
| 25 |
)
|
| 26 |
-
from app.eo_products.base import BaseProduct, SpatialData
|
| 27 |
from app.models import (
|
| 28 |
AOI,
|
| 29 |
TimeRange,
|
|
@@ -46,6 +46,7 @@ logger = logging.getLogger(__name__)
|
|
| 46 |
|
| 47 |
BASELINE_YEARS = 5
|
| 48 |
NDBI_THRESHOLD = 0.0 # NDBI > 0 = potential built-up
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
class BuiltupProduct(BaseProduct):
|
|
@@ -142,8 +143,8 @@ class BuiltupProduct(BaseProduct):
|
|
| 142 |
|
| 143 |
# --- Seasonal baseline analysis ---
|
| 144 |
current_stats = self._compute_stats(current_path)
|
| 145 |
-
current_mean = current_stats["overall_mean"]
|
| 146 |
-
current_frac = current_stats["overall_buildup_fraction"]
|
| 147 |
n_current_bands = current_stats["valid_months"]
|
| 148 |
aoi_ha = aoi.area_km2 * 100 # km² → hectares
|
| 149 |
current_ha = current_frac * aoi_ha
|
|
@@ -162,7 +163,7 @@ class BuiltupProduct(BaseProduct):
|
|
| 162 |
# Z-score for overall current mean NDBI vs seasonal baseline
|
| 163 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 164 |
s = seasonal_stats[most_recent_month]
|
| 165 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_BUILDUP)
|
| 166 |
else:
|
| 167 |
z_current = 0.0
|
| 168 |
|
|
@@ -172,8 +173,8 @@ class BuiltupProduct(BaseProduct):
|
|
| 172 |
for i, val in enumerate(current_stats["monthly_means"]):
|
| 173 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 174 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 175 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 176 |
-
seasonal_stats[cal_month]["std"], MIN_STD_BUILDUP)
|
| 177 |
monthly_zscores.append(z)
|
| 178 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 179 |
anomaly_months += 1
|
|
@@ -216,27 +217,25 @@ class BuiltupProduct(BaseProduct):
|
|
| 216 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 217 |
trend = self._compute_trend_zscore(monthly_zscores)
|
| 218 |
|
|
|
|
|
|
|
|
|
|
| 219 |
chart_data = self._build_seasonal_chart_data(
|
| 220 |
-
current_stats["monthly_buildup_fractions"],
|
| 221 |
time_range, monthly_zscores, aoi_ha,
|
| 222 |
)
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
else:
|
| 236 |
-
headline = (
|
| 237 |
-
f"Settlement contraction detected: {current_ha:.0f} ha "
|
| 238 |
-
f"(z={z_current:+.1f} below seasonal baseline)"
|
| 239 |
-
)
|
| 240 |
|
| 241 |
# Write change raster for map rendering
|
| 242 |
change_map_path = os.path.join(results_dir, "buildup_change.tif")
|
|
@@ -488,27 +487,25 @@ class BuiltupProduct(BaseProduct):
|
|
| 488 |
|
| 489 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 490 |
trend = self._compute_trend_zscore(monthly_zscores)
|
|
|
|
|
|
|
|
|
|
| 491 |
chart_data = self._build_seasonal_chart_data(
|
| 492 |
-
current_stats["monthly_buildup_fractions"],
|
| 493 |
time_range, monthly_zscores, aoi_ha,
|
| 494 |
)
|
| 495 |
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
else:
|
| 508 |
-
headline = (
|
| 509 |
-
f"Settlement contraction detected: {current_ha:.0f} ha "
|
| 510 |
-
f"(z={z_current:+.1f} below seasonal baseline)"
|
| 511 |
-
)
|
| 512 |
|
| 513 |
# Write change raster for map rendering
|
| 514 |
change_map_path = os.path.join(results_dir, "buildup_change.tif")
|
|
@@ -567,38 +564,66 @@ class BuiltupProduct(BaseProduct):
|
|
| 567 |
def _compute_stats(tif_path: str) -> dict[str, Any]:
|
| 568 |
"""Extract monthly built-up fraction and raw NDBI stats from GeoTIFF.
|
| 569 |
|
| 570 |
-
Built-up = NDBI >
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 571 |
"""
|
| 572 |
with rasterio.open(tif_path) as src:
|
| 573 |
n_bands = src.count
|
|
|
|
|
|
|
| 574 |
monthly_fractions: list[float] = []
|
| 575 |
monthly_means: list[float] = []
|
| 576 |
peak_frac = -1.0
|
| 577 |
peak_band = 1
|
| 578 |
-
|
| 579 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 580 |
nodata = src.nodata
|
| 581 |
if nodata is not None:
|
| 582 |
-
|
| 583 |
else:
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 589 |
monthly_fractions.append(frac)
|
| 590 |
monthly_means.append(mean_val)
|
| 591 |
if frac > peak_frac:
|
| 592 |
peak_frac = frac
|
| 593 |
-
peak_band =
|
| 594 |
else:
|
| 595 |
monthly_fractions.append(0.0)
|
| 596 |
monthly_means.append(0.0)
|
| 597 |
|
| 598 |
-
overall_frac =
|
| 599 |
valid_months = sum(1 for m in monthly_means if m != 0.0)
|
| 600 |
overall_mean = (
|
| 601 |
-
|
| 602 |
if valid_months > 0 else 0.0
|
| 603 |
)
|
| 604 |
|
|
@@ -606,52 +631,51 @@ class BuiltupProduct(BaseProduct):
|
|
| 606 |
"monthly_buildup_fractions": monthly_fractions,
|
| 607 |
"overall_buildup_fraction": overall_frac,
|
| 608 |
"valid_months": valid_months,
|
| 609 |
-
"valid_months_total":
|
| 610 |
"peak_buildup_band": peak_band,
|
| 611 |
"overall_mean": overall_mean,
|
| 612 |
"monthly_means": monthly_means,
|
| 613 |
}
|
| 614 |
|
| 615 |
-
@
|
| 616 |
def _build_seasonal_chart_data(
|
|
|
|
| 617 |
current_monthly_fractions: list[float],
|
| 618 |
-
|
| 619 |
time_range: TimeRange,
|
| 620 |
monthly_zscores: list[float],
|
| 621 |
aoi_ha: float,
|
| 622 |
) -> dict[str, Any]:
|
| 623 |
-
"""Build chart data with seasonal baseline envelope, in hectares.
|
| 624 |
-
start_month = time_range.start.month
|
| 625 |
-
n = len(current_monthly_fractions)
|
| 626 |
-
year = time_range.end.year
|
| 627 |
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 634 |
|
| 635 |
for i in range(n):
|
| 636 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 637 |
-
|
| 638 |
-
values.append(round(current_monthly_fractions[i] * aoi_ha, 1))
|
| 639 |
|
| 640 |
-
if cal_month in
|
| 641 |
-
s =
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
| 645 |
-
b_mean.append(round(s["mean"] * aoi_ha, 1) if s["mean"] > 0 else 0.0)
|
| 646 |
-
b_min.append(round(s["min"] * aoi_ha, 1) if s["min"] > 0 else 0.0)
|
| 647 |
-
b_max.append(round(s["max"] * aoi_ha, 1) if s["max"] > 0 else 0.0)
|
| 648 |
else:
|
| 649 |
b_mean.append(0.0)
|
| 650 |
b_min.append(0.0)
|
| 651 |
b_max.append(0.0)
|
| 652 |
|
| 653 |
if i < len(monthly_zscores):
|
| 654 |
-
anomaly_flags.append(abs(monthly_zscores[i]) > ZSCORE_THRESHOLD)
|
| 655 |
else:
|
| 656 |
anomaly_flags.append(False)
|
| 657 |
|
|
@@ -662,23 +686,66 @@ class BuiltupProduct(BaseProduct):
|
|
| 662 |
"baseline_min": b_min,
|
| 663 |
"baseline_max": b_max,
|
| 664 |
"anomaly_flags": anomaly_flags,
|
| 665 |
-
"label": "Built-up area (
|
| 666 |
}
|
| 667 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 668 |
@staticmethod
|
| 669 |
def _write_change_raster(current_path: str, baseline_path: str, output_path: str) -> None:
|
| 670 |
-
"""Write single-band change raster: current built-up mask minus baseline.
|
| 671 |
-
|
| 672 |
-
|
| 673 |
-
|
| 674 |
-
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 682 |
change = c_buildup - b_buildup
|
| 683 |
|
| 684 |
profile.update(count=1, dtype="float32")
|
|
|
|
| 23 |
ZSCORE_THRESHOLD,
|
| 24 |
MIN_CLUSTER_PIXELS,
|
| 25 |
)
|
| 26 |
+
from app.eo_products.base import BaseProduct, SpatialData, safe_float
|
| 27 |
from app.models import (
|
| 28 |
AOI,
|
| 29 |
TimeRange,
|
|
|
|
| 46 |
|
| 47 |
BASELINE_YEARS = 5
|
| 48 |
NDBI_THRESHOLD = 0.0 # NDBI > 0 = potential built-up
|
| 49 |
+
NDVI_BUILDUP_MAX = 0.2 # NDVI < 0.2 required to exclude vegetation (combined with NDBI threshold)
|
| 50 |
|
| 51 |
|
| 52 |
class BuiltupProduct(BaseProduct):
|
|
|
|
| 143 |
|
| 144 |
# --- Seasonal baseline analysis ---
|
| 145 |
current_stats = self._compute_stats(current_path)
|
| 146 |
+
current_mean = safe_float(current_stats["overall_mean"])
|
| 147 |
+
current_frac = safe_float(current_stats["overall_buildup_fraction"])
|
| 148 |
n_current_bands = current_stats["valid_months"]
|
| 149 |
aoi_ha = aoi.area_km2 * 100 # km² → hectares
|
| 150 |
current_ha = current_frac * aoi_ha
|
|
|
|
| 163 |
# Z-score for overall current mean NDBI vs seasonal baseline
|
| 164 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 165 |
s = seasonal_stats[most_recent_month]
|
| 166 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_BUILDUP))
|
| 167 |
else:
|
| 168 |
z_current = 0.0
|
| 169 |
|
|
|
|
| 173 |
for i, val in enumerate(current_stats["monthly_means"]):
|
| 174 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 175 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 176 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 177 |
+
seasonal_stats[cal_month]["std"], MIN_STD_BUILDUP))
|
| 178 |
monthly_zscores.append(z)
|
| 179 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 180 |
anomaly_months += 1
|
|
|
|
| 217 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 218 |
trend = self._compute_trend_zscore(monthly_zscores)
|
| 219 |
|
| 220 |
+
baseline_buildup_fractions = self._build_seasonal_buildup_fractions(
|
| 221 |
+
baseline_stats["monthly_buildup_fractions"], BASELINE_YEARS,
|
| 222 |
+
)
|
| 223 |
chart_data = self._build_seasonal_chart_data(
|
| 224 |
+
current_stats["monthly_buildup_fractions"], baseline_buildup_fractions,
|
| 225 |
time_range, monthly_zscores, aoi_ha,
|
| 226 |
)
|
| 227 |
|
| 228 |
+
headline = self._generate_headline(
|
| 229 |
+
status=status,
|
| 230 |
+
z_current=z_current,
|
| 231 |
+
hotspot_pct=hotspot_pct,
|
| 232 |
+
anomaly_months=anomaly_months,
|
| 233 |
+
total_months=n_current_bands,
|
| 234 |
+
value_phrase=f"{current_ha:.0f} ha built-up",
|
| 235 |
+
indicator_label="Built-up areas",
|
| 236 |
+
direction_up="expansion",
|
| 237 |
+
direction_down="contraction",
|
| 238 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
|
| 240 |
# Write change raster for map rendering
|
| 241 |
change_map_path = os.path.join(results_dir, "buildup_change.tif")
|
|
|
|
| 487 |
|
| 488 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 489 |
trend = self._compute_trend_zscore(monthly_zscores)
|
| 490 |
+
baseline_buildup_fractions = self._build_seasonal_buildup_fractions(
|
| 491 |
+
baseline_stats["monthly_buildup_fractions"], BASELINE_YEARS,
|
| 492 |
+
)
|
| 493 |
chart_data = self._build_seasonal_chart_data(
|
| 494 |
+
current_stats["monthly_buildup_fractions"], baseline_buildup_fractions,
|
| 495 |
time_range, monthly_zscores, aoi_ha,
|
| 496 |
)
|
| 497 |
|
| 498 |
+
headline = self._generate_headline(
|
| 499 |
+
status=status,
|
| 500 |
+
z_current=z_current,
|
| 501 |
+
hotspot_pct=hotspot_pct,
|
| 502 |
+
anomaly_months=anomaly_months,
|
| 503 |
+
total_months=n_current_bands,
|
| 504 |
+
value_phrase=f"{current_ha:.0f} ha built-up",
|
| 505 |
+
indicator_label="Built-up areas",
|
| 506 |
+
direction_up="expansion",
|
| 507 |
+
direction_down="contraction",
|
| 508 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 509 |
|
| 510 |
# Write change raster for map rendering
|
| 511 |
change_map_path = os.path.join(results_dir, "buildup_change.tif")
|
|
|
|
| 564 |
def _compute_stats(tif_path: str) -> dict[str, Any]:
|
| 565 |
"""Extract monthly built-up fraction and raw NDBI stats from GeoTIFF.
|
| 566 |
|
| 567 |
+
Built-up = NDBI > NDBI_THRESHOLD AND NDVI < NDVI_BUILDUP_MAX.
|
| 568 |
+
Bands are interleaved (NDBI, NDVI) per month: band 1 = month0 NDBI,
|
| 569 |
+
band 2 = month0 NDVI, band 3 = month1 NDBI, band 4 = month1 NDVI, ...
|
| 570 |
+
|
| 571 |
+
For backward compatibility with single-band (NDBI-only) TIFs, falls
|
| 572 |
+
back to NDBI-only thresholding when band count is odd or n_bands == 1.
|
| 573 |
"""
|
| 574 |
with rasterio.open(tif_path) as src:
|
| 575 |
n_bands = src.count
|
| 576 |
+
paired_layout = n_bands >= 2 and n_bands % 2 == 0
|
| 577 |
+
n_months = n_bands // 2 if paired_layout else n_bands
|
| 578 |
monthly_fractions: list[float] = []
|
| 579 |
monthly_means: list[float] = []
|
| 580 |
peak_frac = -1.0
|
| 581 |
peak_band = 1
|
| 582 |
+
|
| 583 |
+
for m in range(n_months):
|
| 584 |
+
if paired_layout:
|
| 585 |
+
ndbi_band = m * 2 + 1
|
| 586 |
+
ndvi_band = m * 2 + 2
|
| 587 |
+
ndbi_data = src.read(ndbi_band).astype(np.float32)
|
| 588 |
+
ndvi_data = src.read(ndvi_band).astype(np.float32)
|
| 589 |
+
else:
|
| 590 |
+
ndbi_band = m + 1
|
| 591 |
+
ndvi_band = None
|
| 592 |
+
ndbi_data = src.read(ndbi_band).astype(np.float32)
|
| 593 |
+
ndvi_data = None
|
| 594 |
+
|
| 595 |
nodata = src.nodata
|
| 596 |
if nodata is not None:
|
| 597 |
+
mask = ndbi_data != nodata
|
| 598 |
else:
|
| 599 |
+
mask = ~np.isnan(ndbi_data)
|
| 600 |
+
ndbi_valid = ndbi_data[mask]
|
| 601 |
+
|
| 602 |
+
if len(ndbi_valid) > 0:
|
| 603 |
+
if ndvi_data is not None:
|
| 604 |
+
ndvi_valid = ndvi_data[mask]
|
| 605 |
+
buildup_pixels = np.sum(
|
| 606 |
+
(ndbi_valid > NDBI_THRESHOLD)
|
| 607 |
+
& (ndvi_valid < NDVI_BUILDUP_MAX)
|
| 608 |
+
)
|
| 609 |
+
else:
|
| 610 |
+
buildup_pixels = np.sum(ndbi_valid > NDBI_THRESHOLD)
|
| 611 |
+
frac = safe_float(buildup_pixels / len(ndbi_valid))
|
| 612 |
+
with np.errstate(all="ignore"):
|
| 613 |
+
mean_val = safe_float(np.nanmean(ndbi_valid))
|
| 614 |
monthly_fractions.append(frac)
|
| 615 |
monthly_means.append(mean_val)
|
| 616 |
if frac > peak_frac:
|
| 617 |
peak_frac = frac
|
| 618 |
+
peak_band = ndbi_band
|
| 619 |
else:
|
| 620 |
monthly_fractions.append(0.0)
|
| 621 |
monthly_means.append(0.0)
|
| 622 |
|
| 623 |
+
overall_frac = safe_float(np.mean(monthly_fractions)) if monthly_fractions else 0.0
|
| 624 |
valid_months = sum(1 for m in monthly_means if m != 0.0)
|
| 625 |
overall_mean = (
|
| 626 |
+
safe_float(np.mean([m for m in monthly_means if m != 0.0]))
|
| 627 |
if valid_months > 0 else 0.0
|
| 628 |
)
|
| 629 |
|
|
|
|
| 631 |
"monthly_buildup_fractions": monthly_fractions,
|
| 632 |
"overall_buildup_fraction": overall_frac,
|
| 633 |
"valid_months": valid_months,
|
| 634 |
+
"valid_months_total": n_months,
|
| 635 |
"peak_buildup_band": peak_band,
|
| 636 |
"overall_mean": overall_mean,
|
| 637 |
"monthly_means": monthly_means,
|
| 638 |
}
|
| 639 |
|
| 640 |
+
@classmethod
|
| 641 |
def _build_seasonal_chart_data(
|
| 642 |
+
cls,
|
| 643 |
current_monthly_fractions: list[float],
|
| 644 |
+
baseline_seasonal_fractions: dict[int, dict],
|
| 645 |
time_range: TimeRange,
|
| 646 |
monthly_zscores: list[float],
|
| 647 |
aoi_ha: float,
|
| 648 |
) -> dict[str, Any]:
|
| 649 |
+
"""Build chart data with seasonal baseline envelope, in hectares.
|
|
|
|
|
|
|
|
|
|
| 650 |
|
| 651 |
+
Both current values and baseline envelope use the same unit
|
| 652 |
+
(built-up fraction × AOI hectares) so the chart is meaningful.
|
| 653 |
+
"""
|
| 654 |
+
n = len(current_monthly_fractions)
|
| 655 |
+
dates = cls._build_monthly_dates(time_range.start, n)
|
| 656 |
+
values: list[float] = []
|
| 657 |
+
b_mean: list[float] = []
|
| 658 |
+
b_min: list[float] = []
|
| 659 |
+
b_max: list[float] = []
|
| 660 |
+
anomaly_flags: list[bool] = []
|
| 661 |
+
start_month = time_range.start.month
|
| 662 |
|
| 663 |
for i in range(n):
|
| 664 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 665 |
+
values.append(round(safe_float(current_monthly_fractions[i]) * aoi_ha, 1))
|
|
|
|
| 666 |
|
| 667 |
+
if cal_month in baseline_seasonal_fractions:
|
| 668 |
+
s = baseline_seasonal_fractions[cal_month]
|
| 669 |
+
b_mean.append(round(safe_float(s["mean"]) * aoi_ha, 1))
|
| 670 |
+
b_min.append(round(safe_float(s["min"]) * aoi_ha, 1))
|
| 671 |
+
b_max.append(round(safe_float(s["max"]) * aoi_ha, 1))
|
|
|
|
|
|
|
|
|
|
| 672 |
else:
|
| 673 |
b_mean.append(0.0)
|
| 674 |
b_min.append(0.0)
|
| 675 |
b_max.append(0.0)
|
| 676 |
|
| 677 |
if i < len(monthly_zscores):
|
| 678 |
+
anomaly_flags.append(abs(safe_float(monthly_zscores[i])) > ZSCORE_THRESHOLD)
|
| 679 |
else:
|
| 680 |
anomaly_flags.append(False)
|
| 681 |
|
|
|
|
| 686 |
"baseline_min": b_min,
|
| 687 |
"baseline_max": b_max,
|
| 688 |
"anomaly_flags": anomaly_flags,
|
| 689 |
+
"label": "Built-up area (hectares)",
|
| 690 |
}
|
| 691 |
|
| 692 |
+
@staticmethod
|
| 693 |
+
def _build_seasonal_buildup_fractions(
|
| 694 |
+
monthly_buildup_fractions: list[float],
|
| 695 |
+
n_years: int,
|
| 696 |
+
) -> dict[int, dict]:
|
| 697 |
+
"""Group baseline monthly buildup fractions by calendar month."""
|
| 698 |
+
month_buckets: dict[int, list[float]] = {m: [] for m in range(1, 13)}
|
| 699 |
+
for idx, frac in enumerate(monthly_buildup_fractions):
|
| 700 |
+
cal_month = (idx % 12) + 1
|
| 701 |
+
month_buckets[cal_month].append(safe_float(frac))
|
| 702 |
+
|
| 703 |
+
result: dict[int, dict] = {}
|
| 704 |
+
for m, fracs in month_buckets.items():
|
| 705 |
+
if fracs:
|
| 706 |
+
result[m] = {
|
| 707 |
+
"mean": float(np.mean(fracs)),
|
| 708 |
+
"min": float(np.min(fracs)),
|
| 709 |
+
"max": float(np.max(fracs)),
|
| 710 |
+
}
|
| 711 |
+
return result
|
| 712 |
+
|
| 713 |
@staticmethod
|
| 714 |
def _write_change_raster(current_path: str, baseline_path: str, output_path: str) -> None:
|
| 715 |
+
"""Write single-band change raster: current built-up mask minus baseline.
|
| 716 |
+
|
| 717 |
+
Built-up mask uses combined NDBI > threshold AND NDVI < veg threshold,
|
| 718 |
+
when the TIF has paired (NDBI, NDVI) bands per timestep. Falls back
|
| 719 |
+
to NDBI-only when band layout is not paired (legacy data).
|
| 720 |
+
"""
|
| 721 |
+
def _build_buildup_mask(path: str) -> tuple[np.ndarray, dict]:
|
| 722 |
+
with rasterio.open(path) as src:
|
| 723 |
+
count = src.count
|
| 724 |
+
profile = src.profile.copy()
|
| 725 |
+
paired = count >= 2 and count % 2 == 0
|
| 726 |
+
if paired:
|
| 727 |
+
n_months = count // 2
|
| 728 |
+
ndbi_stack = []
|
| 729 |
+
ndvi_stack = []
|
| 730 |
+
for m in range(n_months):
|
| 731 |
+
ndbi_stack.append(src.read(m * 2 + 1).astype(np.float32))
|
| 732 |
+
ndvi_stack.append(src.read(m * 2 + 2).astype(np.float32))
|
| 733 |
+
with np.errstate(all="ignore"):
|
| 734 |
+
ndbi_mean = np.nanmean(np.stack(ndbi_stack), axis=0)
|
| 735 |
+
ndvi_mean = np.nanmean(np.stack(ndvi_stack), axis=0)
|
| 736 |
+
ndbi_mean = np.nan_to_num(ndbi_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 737 |
+
ndvi_mean = np.nan_to_num(ndvi_mean, nan=1.0, posinf=1.0, neginf=1.0)
|
| 738 |
+
mask = (ndbi_mean > NDBI_THRESHOLD) & (ndvi_mean < NDVI_BUILDUP_MAX)
|
| 739 |
+
else:
|
| 740 |
+
ndbi_stack = [src.read(b + 1).astype(np.float32) for b in range(count)]
|
| 741 |
+
with np.errstate(all="ignore"):
|
| 742 |
+
ndbi_mean = np.nanmean(np.stack(ndbi_stack), axis=0)
|
| 743 |
+
ndbi_mean = np.nan_to_num(ndbi_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 744 |
+
mask = ndbi_mean > NDBI_THRESHOLD
|
| 745 |
+
return mask.astype(np.float32), profile
|
| 746 |
+
|
| 747 |
+
c_buildup, profile = _build_buildup_mask(current_path)
|
| 748 |
+
b_buildup, _ = _build_buildup_mask(baseline_path)
|
| 749 |
change = c_buildup - b_buildup
|
| 750 |
|
| 751 |
profile.update(count=1, dtype="float32")
|
|
@@ -22,7 +22,7 @@ from app.config import (
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
-
from app.eo_products.base import BaseProduct, SpatialData
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
@@ -138,7 +138,7 @@ class NdviProduct(BaseProduct):
|
|
| 138 |
|
| 139 |
# --- Seasonal baseline analysis ---
|
| 140 |
current_stats = self._compute_stats(current_path)
|
| 141 |
-
current_mean = current_stats["overall_mean"]
|
| 142 |
n_current_bands = current_stats["valid_months"]
|
| 143 |
|
| 144 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
|
@@ -152,7 +152,7 @@ class NdviProduct(BaseProduct):
|
|
| 152 |
|
| 153 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 154 |
s = seasonal_stats[most_recent_month]
|
| 155 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_NDVI)
|
| 156 |
else:
|
| 157 |
z_current = 0.0
|
| 158 |
|
|
@@ -164,8 +164,8 @@ class NdviProduct(BaseProduct):
|
|
| 164 |
continue
|
| 165 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 166 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 167 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 168 |
-
seasonal_stats[cal_month]["std"], MIN_STD_NDVI)
|
| 169 |
monthly_zscores.append(z)
|
| 170 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 171 |
anomaly_months += 1
|
|
@@ -218,17 +218,22 @@ class NdviProduct(BaseProduct):
|
|
| 218 |
trend = TrendDirection.STABLE
|
| 219 |
change = 0.0
|
| 220 |
chart_data = {
|
| 221 |
-
"dates":
|
| 222 |
-
"values": [round(v, 3) for v in current_stats["monthly_means"]],
|
| 223 |
-
"label": "NDVI",
|
| 224 |
}
|
| 225 |
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
self._spatial_data = SpatialData(
|
| 234 |
map_type="raster", label="NDVI", colormap="RdYlGn",
|
|
@@ -335,7 +340,7 @@ class NdviProduct(BaseProduct):
|
|
| 335 |
# --- Seasonal baseline analysis ---
|
| 336 |
current_stats = self._compute_stats(current_path)
|
| 337 |
baseline_stats = self._compute_stats(baseline_path)
|
| 338 |
-
current_mean = current_stats["overall_mean"]
|
| 339 |
n_current_bands = current_stats["valid_months"]
|
| 340 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
| 341 |
|
|
@@ -345,7 +350,7 @@ class NdviProduct(BaseProduct):
|
|
| 345 |
|
| 346 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 347 |
s = seasonal_stats[most_recent_month]
|
| 348 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_NDVI)
|
| 349 |
else:
|
| 350 |
z_current = 0.0
|
| 351 |
|
|
@@ -357,8 +362,8 @@ class NdviProduct(BaseProduct):
|
|
| 357 |
continue
|
| 358 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 359 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 360 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 361 |
-
seasonal_stats[cal_month]["std"], MIN_STD_NDVI)
|
| 362 |
monthly_zscores.append(z)
|
| 363 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 364 |
anomaly_months += 1
|
|
@@ -399,12 +404,17 @@ class NdviProduct(BaseProduct):
|
|
| 399 |
)
|
| 400 |
change = current_mean - baseline_stats["overall_mean"]
|
| 401 |
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
|
| 409 |
# Spatial data — store the current NDVI path for map rendering
|
| 410 |
self._spatial_data = SpatialData(
|
|
@@ -470,7 +480,8 @@ class NdviProduct(BaseProduct):
|
|
| 470 |
else:
|
| 471 |
valid = data.ravel()
|
| 472 |
if len(valid) > 0:
|
| 473 |
-
|
|
|
|
| 474 |
monthly_means.append(mean)
|
| 475 |
if mean > peak_val:
|
| 476 |
peak_val = mean
|
|
@@ -489,42 +500,40 @@ class NdviProduct(BaseProduct):
|
|
| 489 |
"peak_month_band": peak_band,
|
| 490 |
}
|
| 491 |
|
| 492 |
-
@
|
| 493 |
def _build_seasonal_chart_data(
|
|
|
|
| 494 |
current_monthly: list[float],
|
| 495 |
seasonal_stats: dict[int, dict],
|
| 496 |
time_range: TimeRange,
|
| 497 |
monthly_zscores: list[float],
|
| 498 |
) -> dict[str, Any]:
|
| 499 |
"""Build chart data with seasonal baseline envelope."""
|
| 500 |
-
start_month = time_range.start.month
|
| 501 |
n = len(current_monthly)
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
anomaly_flags = []
|
| 510 |
|
| 511 |
for i in range(n):
|
| 512 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 513 |
-
|
| 514 |
-
values.append(round(current_monthly[i], 3))
|
| 515 |
|
| 516 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 517 |
s = seasonal_stats[cal_month]
|
| 518 |
-
b_mean.append(round(s["mean"], 3))
|
| 519 |
-
b_min.append(round(s["min"], 3))
|
| 520 |
-
b_max.append(round(s["max"], 3))
|
| 521 |
else:
|
| 522 |
b_mean.append(0.0)
|
| 523 |
b_min.append(0.0)
|
| 524 |
b_max.append(0.0)
|
| 525 |
|
| 526 |
if i < len(monthly_zscores):
|
| 527 |
-
anomaly_flags.append(abs(monthly_zscores[i]) > ZSCORE_THRESHOLD)
|
| 528 |
else:
|
| 529 |
anomaly_flags.append(False)
|
| 530 |
|
|
@@ -535,6 +544,6 @@ class NdviProduct(BaseProduct):
|
|
| 535 |
"baseline_min": b_min,
|
| 536 |
"baseline_max": b_max,
|
| 537 |
"anomaly_flags": anomaly_flags,
|
| 538 |
-
"label": "NDVI",
|
| 539 |
}
|
| 540 |
|
|
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
+
from app.eo_products.base import BaseProduct, SpatialData, safe_float
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
|
|
| 138 |
|
| 139 |
# --- Seasonal baseline analysis ---
|
| 140 |
current_stats = self._compute_stats(current_path)
|
| 141 |
+
current_mean = safe_float(current_stats["overall_mean"])
|
| 142 |
n_current_bands = current_stats["valid_months"]
|
| 143 |
|
| 144 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
|
|
|
| 152 |
|
| 153 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 154 |
s = seasonal_stats[most_recent_month]
|
| 155 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_NDVI))
|
| 156 |
else:
|
| 157 |
z_current = 0.0
|
| 158 |
|
|
|
|
| 164 |
continue
|
| 165 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 166 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 167 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 168 |
+
seasonal_stats[cal_month]["std"], MIN_STD_NDVI))
|
| 169 |
monthly_zscores.append(z)
|
| 170 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 171 |
anomaly_months += 1
|
|
|
|
| 218 |
trend = TrendDirection.STABLE
|
| 219 |
change = 0.0
|
| 220 |
chart_data = {
|
| 221 |
+
"dates": self._build_monthly_dates(time_range.start, len(current_stats["monthly_means"])),
|
| 222 |
+
"values": [round(safe_float(v), 3) for v in current_stats["monthly_means"]],
|
| 223 |
+
"label": "Vegetation greenness (NDVI)",
|
| 224 |
}
|
| 225 |
|
| 226 |
+
headline = self._generate_headline(
|
| 227 |
+
status=status,
|
| 228 |
+
z_current=z_current,
|
| 229 |
+
hotspot_pct=hotspot_pct,
|
| 230 |
+
anomaly_months=anomaly_months,
|
| 231 |
+
total_months=n_current_bands,
|
| 232 |
+
value_phrase=f"NDVI {current_mean:.2f}",
|
| 233 |
+
indicator_label="Vegetation health",
|
| 234 |
+
direction_up="greening",
|
| 235 |
+
direction_down="decline",
|
| 236 |
+
)
|
| 237 |
|
| 238 |
self._spatial_data = SpatialData(
|
| 239 |
map_type="raster", label="NDVI", colormap="RdYlGn",
|
|
|
|
| 340 |
# --- Seasonal baseline analysis ---
|
| 341 |
current_stats = self._compute_stats(current_path)
|
| 342 |
baseline_stats = self._compute_stats(baseline_path)
|
| 343 |
+
current_mean = safe_float(current_stats["overall_mean"])
|
| 344 |
n_current_bands = current_stats["valid_months"]
|
| 345 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
| 346 |
|
|
|
|
| 350 |
|
| 351 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 352 |
s = seasonal_stats[most_recent_month]
|
| 353 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_NDVI))
|
| 354 |
else:
|
| 355 |
z_current = 0.0
|
| 356 |
|
|
|
|
| 362 |
continue
|
| 363 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 364 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 365 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 366 |
+
seasonal_stats[cal_month]["std"], MIN_STD_NDVI))
|
| 367 |
monthly_zscores.append(z)
|
| 368 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 369 |
anomaly_months += 1
|
|
|
|
| 404 |
)
|
| 405 |
change = current_mean - baseline_stats["overall_mean"]
|
| 406 |
|
| 407 |
+
headline = self._generate_headline(
|
| 408 |
+
status=status,
|
| 409 |
+
z_current=z_current,
|
| 410 |
+
hotspot_pct=hotspot_pct,
|
| 411 |
+
anomaly_months=anomaly_months,
|
| 412 |
+
total_months=n_current_bands,
|
| 413 |
+
value_phrase=f"NDVI {current_mean:.2f}",
|
| 414 |
+
indicator_label="Vegetation health",
|
| 415 |
+
direction_up="greening",
|
| 416 |
+
direction_down="decline",
|
| 417 |
+
)
|
| 418 |
|
| 419 |
# Spatial data — store the current NDVI path for map rendering
|
| 420 |
self._spatial_data = SpatialData(
|
|
|
|
| 480 |
else:
|
| 481 |
valid = data.ravel()
|
| 482 |
if len(valid) > 0:
|
| 483 |
+
with np.errstate(all="ignore"):
|
| 484 |
+
mean = safe_float(np.nanmean(valid))
|
| 485 |
monthly_means.append(mean)
|
| 486 |
if mean > peak_val:
|
| 487 |
peak_val = mean
|
|
|
|
| 500 |
"peak_month_band": peak_band,
|
| 501 |
}
|
| 502 |
|
| 503 |
+
@classmethod
|
| 504 |
def _build_seasonal_chart_data(
|
| 505 |
+
cls,
|
| 506 |
current_monthly: list[float],
|
| 507 |
seasonal_stats: dict[int, dict],
|
| 508 |
time_range: TimeRange,
|
| 509 |
monthly_zscores: list[float],
|
| 510 |
) -> dict[str, Any]:
|
| 511 |
"""Build chart data with seasonal baseline envelope."""
|
|
|
|
| 512 |
n = len(current_monthly)
|
| 513 |
+
dates = cls._build_monthly_dates(time_range.start, n)
|
| 514 |
+
values: list[float] = []
|
| 515 |
+
b_mean: list[float] = []
|
| 516 |
+
b_min: list[float] = []
|
| 517 |
+
b_max: list[float] = []
|
| 518 |
+
anomaly_flags: list[bool] = []
|
| 519 |
+
start_month = time_range.start.month
|
|
|
|
| 520 |
|
| 521 |
for i in range(n):
|
| 522 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 523 |
+
values.append(round(safe_float(current_monthly[i]), 3))
|
|
|
|
| 524 |
|
| 525 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 526 |
s = seasonal_stats[cal_month]
|
| 527 |
+
b_mean.append(round(safe_float(s["mean"]), 3))
|
| 528 |
+
b_min.append(round(safe_float(s["min"]), 3))
|
| 529 |
+
b_max.append(round(safe_float(s["max"]), 3))
|
| 530 |
else:
|
| 531 |
b_mean.append(0.0)
|
| 532 |
b_min.append(0.0)
|
| 533 |
b_max.append(0.0)
|
| 534 |
|
| 535 |
if i < len(monthly_zscores):
|
| 536 |
+
anomaly_flags.append(abs(safe_float(monthly_zscores[i])) > ZSCORE_THRESHOLD)
|
| 537 |
else:
|
| 538 |
anomaly_flags.append(False)
|
| 539 |
|
|
|
|
| 544 |
"baseline_min": b_min,
|
| 545 |
"baseline_max": b_max,
|
| 546 |
"anomaly_flags": anomaly_flags,
|
| 547 |
+
"label": "Vegetation greenness (NDVI)",
|
| 548 |
}
|
| 549 |
|
|
@@ -22,7 +22,7 @@ from app.config import (
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
-
from app.eo_products.base import BaseProduct, SpatialData
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
@@ -133,7 +133,7 @@ class SarProduct(BaseProduct):
|
|
| 133 |
|
| 134 |
# --- Seasonal baseline analysis ---
|
| 135 |
current_stats = self._compute_stats(current_path)
|
| 136 |
-
current_mean = current_stats["overall_vv_mean"]
|
| 137 |
n_current_bands = current_stats["valid_months"]
|
| 138 |
|
| 139 |
if n_current_bands == 0:
|
|
@@ -154,7 +154,7 @@ class SarProduct(BaseProduct):
|
|
| 154 |
|
| 155 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 156 |
s = seasonal_stats[most_recent_month]
|
| 157 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_SAR)
|
| 158 |
else:
|
| 159 |
z_current = 0.0
|
| 160 |
|
|
@@ -167,8 +167,8 @@ class SarProduct(BaseProduct):
|
|
| 167 |
continue
|
| 168 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 169 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 170 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 171 |
-
seasonal_stats[cal_month]["std"], MIN_STD_SAR)
|
| 172 |
monthly_zscores.append(z)
|
| 173 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 174 |
anomaly_months += 1
|
|
@@ -220,10 +220,11 @@ class SarProduct(BaseProduct):
|
|
| 220 |
baseline_stats["vv_std"],
|
| 221 |
)
|
| 222 |
|
| 223 |
-
change_db = current_mean - baseline_stats["overall_vv_mean"]
|
| 224 |
-
change_pct = self._compute_change_area_pct(
|
| 225 |
current_path, baseline_path, current_stats, baseline_stats,
|
| 226 |
-
)
|
|
|
|
| 227 |
|
| 228 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 229 |
trend = self._compute_trend_zscore(monthly_zscores)
|
|
@@ -232,25 +233,24 @@ class SarProduct(BaseProduct):
|
|
| 232 |
current_stats["monthly_vv_means"], seasonal_stats, time_range, monthly_zscores,
|
| 233 |
)
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
headline = f"Stable backscatter conditions (mean VV {current_mean:.1f} dB, z={z_current:+.1f})"
|
| 247 |
|
| 248 |
change_map_path = os.path.join(results_dir, "sar_change.tif")
|
| 249 |
self._write_change_raster(current_path, baseline_path, change_map_path)
|
| 250 |
|
| 251 |
self._spatial_data = SpatialData(
|
| 252 |
map_type="raster",
|
| 253 |
-
label="
|
| 254 |
colormap="RdBu_r",
|
| 255 |
vmin=-6,
|
| 256 |
vmax=6,
|
|
@@ -405,7 +405,7 @@ class SarProduct(BaseProduct):
|
|
| 405 |
# --- Seasonal baseline analysis ---
|
| 406 |
current_stats = self._compute_stats(current_path)
|
| 407 |
baseline_stats = self._compute_stats(baseline_path)
|
| 408 |
-
current_mean = current_stats["overall_vv_mean"]
|
| 409 |
n_current_bands = current_stats["valid_months"]
|
| 410 |
|
| 411 |
if n_current_bands == 0:
|
|
@@ -423,7 +423,7 @@ class SarProduct(BaseProduct):
|
|
| 423 |
|
| 424 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 425 |
s = seasonal_stats[most_recent_month]
|
| 426 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_SAR)
|
| 427 |
else:
|
| 428 |
z_current = 0.0
|
| 429 |
|
|
@@ -436,8 +436,8 @@ class SarProduct(BaseProduct):
|
|
| 436 |
continue
|
| 437 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 438 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 439 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 440 |
-
seasonal_stats[cal_month]["std"], MIN_STD_SAR)
|
| 441 |
monthly_zscores.append(z)
|
| 442 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 443 |
anomaly_months += 1
|
|
@@ -488,10 +488,11 @@ class SarProduct(BaseProduct):
|
|
| 488 |
baseline_stats["vv_std"],
|
| 489 |
)
|
| 490 |
|
| 491 |
-
change_db = current_mean - baseline_stats["overall_vv_mean"]
|
| 492 |
-
change_pct = self._compute_change_area_pct(
|
| 493 |
current_path, baseline_path, current_stats, baseline_stats,
|
| 494 |
-
)
|
|
|
|
| 495 |
|
| 496 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 497 |
trend = self._compute_trend_zscore(monthly_zscores)
|
|
@@ -500,18 +501,17 @@ class SarProduct(BaseProduct):
|
|
| 500 |
current_stats["monthly_vv_means"], seasonal_stats, time_range, monthly_zscores,
|
| 501 |
)
|
| 502 |
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
headline = f"Stable backscatter conditions (mean VV {current_mean:.1f} dB, z={z_current:+.1f})"
|
| 515 |
|
| 516 |
# Store raster path for map rendering — write a change map
|
| 517 |
change_map_path = os.path.join(results_dir, "sar_change.tif")
|
|
@@ -519,7 +519,7 @@ class SarProduct(BaseProduct):
|
|
| 519 |
|
| 520 |
self._spatial_data = SpatialData(
|
| 521 |
map_type="raster",
|
| 522 |
-
label="
|
| 523 |
colormap="RdBu_r",
|
| 524 |
vmin=-6,
|
| 525 |
vmax=6,
|
|
@@ -593,7 +593,8 @@ class SarProduct(BaseProduct):
|
|
| 593 |
else:
|
| 594 |
valid = data.ravel()
|
| 595 |
if len(valid) > 0:
|
| 596 |
-
|
|
|
|
| 597 |
monthly_vv_means.append(mean_val)
|
| 598 |
all_vv_values.extend(valid.tolist())
|
| 599 |
else:
|
|
@@ -601,8 +602,8 @@ class SarProduct(BaseProduct):
|
|
| 601 |
|
| 602 |
valid_months = sum(1 for m in monthly_vv_means if m != 0.0)
|
| 603 |
valid_means = [m for m in monthly_vv_means if m != 0.0]
|
| 604 |
-
overall_vv_mean =
|
| 605 |
-
vv_std =
|
| 606 |
|
| 607 |
return {
|
| 608 |
"monthly_vv_means": monthly_vv_means,
|
|
@@ -685,44 +686,46 @@ class SarProduct(BaseProduct):
|
|
| 685 |
with np.errstate(all="ignore"):
|
| 686 |
mean = np.nanmean(arr, axis=0)
|
| 687 |
std = np.nanstd(arr, axis=0, ddof=1) if arr.shape[0] > 1 else np.zeros_like(mean)
|
|
|
|
|
|
|
|
|
|
| 688 |
return mean, std
|
| 689 |
|
| 690 |
-
@
|
| 691 |
def _build_seasonal_chart_data(
|
|
|
|
| 692 |
current_monthly: list[float],
|
| 693 |
seasonal_stats: dict[int, dict],
|
| 694 |
time_range: TimeRange,
|
| 695 |
monthly_zscores: list[float],
|
| 696 |
) -> dict[str, Any]:
|
| 697 |
"""Build chart data with seasonal baseline envelope."""
|
| 698 |
-
start_month = time_range.start.month
|
| 699 |
n = len(current_monthly)
|
| 700 |
-
|
| 701 |
-
|
| 702 |
-
dates: list[str] = []
|
| 703 |
values: list[float] = []
|
| 704 |
b_mean: list[float] = []
|
| 705 |
b_min: list[float] = []
|
| 706 |
b_max: list[float] = []
|
| 707 |
anomaly_flags: list[bool] = []
|
|
|
|
| 708 |
|
| 709 |
for i in range(n):
|
| 710 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 711 |
-
|
| 712 |
-
values.append(round(current_monthly[i], 2))
|
| 713 |
|
| 714 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 715 |
s = seasonal_stats[cal_month]
|
| 716 |
-
b_mean.append(round(s["mean"], 2))
|
| 717 |
-
b_min.append(round(s["min"], 2))
|
| 718 |
-
b_max.append(round(s["max"], 2))
|
| 719 |
else:
|
| 720 |
b_mean.append(0.0)
|
| 721 |
b_min.append(0.0)
|
| 722 |
b_max.append(0.0)
|
| 723 |
|
| 724 |
if i < len(monthly_zscores):
|
| 725 |
-
|
|
|
|
| 726 |
else:
|
| 727 |
anomaly_flags.append(False)
|
| 728 |
|
|
@@ -733,7 +736,7 @@ class SarProduct(BaseProduct):
|
|
| 733 |
"baseline_min": b_min,
|
| 734 |
"baseline_max": b_max,
|
| 735 |
"anomaly_flags": anomaly_flags,
|
| 736 |
-
"label": "
|
| 737 |
}
|
| 738 |
|
| 739 |
# ------------------------------------------------------------------
|
|
@@ -753,17 +756,21 @@ class SarProduct(BaseProduct):
|
|
| 753 |
c_vv = []
|
| 754 |
for m in range(c_months):
|
| 755 |
c_vv.append(csrc.read(m * 2 + 1).astype(np.float32))
|
| 756 |
-
|
|
|
|
| 757 |
|
| 758 |
b_vv = []
|
| 759 |
for m in range(b_months):
|
| 760 |
b_vv.append(bsrc.read(m * 2 + 1).astype(np.float32))
|
| 761 |
-
|
|
|
|
| 762 |
|
|
|
|
|
|
|
| 763 |
diff = np.abs(c_mean - b_mean)
|
| 764 |
significant = np.sum(diff > CHANGE_THRESHOLD_DB)
|
| 765 |
total = diff.size
|
| 766 |
-
return
|
| 767 |
|
| 768 |
@staticmethod
|
| 769 |
def _count_flood_months(
|
|
@@ -779,14 +786,18 @@ class SarProduct(BaseProduct):
|
|
| 779 |
with rasterio.open(current_path) as csrc:
|
| 780 |
c_months = csrc.count // 2
|
| 781 |
c_vv = [csrc.read(m * 2 + 1).astype(np.float32) for m in range(c_months)]
|
| 782 |
-
|
|
|
|
| 783 |
profile = csrc.profile.copy()
|
| 784 |
|
| 785 |
with rasterio.open(baseline_path) as bsrc:
|
| 786 |
b_months = bsrc.count // 2
|
| 787 |
b_vv = [bsrc.read(m * 2 + 1).astype(np.float32) for m in range(b_months)]
|
| 788 |
-
|
|
|
|
| 789 |
|
|
|
|
|
|
|
| 790 |
change = c_mean - b_mean
|
| 791 |
profile.update(count=1, dtype="float32")
|
| 792 |
with rasterio.open(output_path, "w", **profile) as dst:
|
|
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
+
from app.eo_products.base import BaseProduct, SpatialData, safe_float
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
|
|
| 133 |
|
| 134 |
# --- Seasonal baseline analysis ---
|
| 135 |
current_stats = self._compute_stats(current_path)
|
| 136 |
+
current_mean = safe_float(current_stats["overall_vv_mean"])
|
| 137 |
n_current_bands = current_stats["valid_months"]
|
| 138 |
|
| 139 |
if n_current_bands == 0:
|
|
|
|
| 154 |
|
| 155 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 156 |
s = seasonal_stats[most_recent_month]
|
| 157 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_SAR))
|
| 158 |
else:
|
| 159 |
z_current = 0.0
|
| 160 |
|
|
|
|
| 167 |
continue
|
| 168 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 169 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 170 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 171 |
+
seasonal_stats[cal_month]["std"], MIN_STD_SAR))
|
| 172 |
monthly_zscores.append(z)
|
| 173 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 174 |
anomaly_months += 1
|
|
|
|
| 220 |
baseline_stats["vv_std"],
|
| 221 |
)
|
| 222 |
|
| 223 |
+
change_db = safe_float(current_mean - baseline_stats["overall_vv_mean"])
|
| 224 |
+
change_pct = safe_float(self._compute_change_area_pct(
|
| 225 |
current_path, baseline_path, current_stats, baseline_stats,
|
| 226 |
+
))
|
| 227 |
+
hotspot_pct = safe_float(hotspot_pct)
|
| 228 |
|
| 229 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 230 |
trend = self._compute_trend_zscore(monthly_zscores)
|
|
|
|
| 233 |
current_stats["monthly_vv_means"], seasonal_stats, time_range, monthly_zscores,
|
| 234 |
)
|
| 235 |
|
| 236 |
+
headline = self._generate_headline(
|
| 237 |
+
status=status,
|
| 238 |
+
z_current=z_current,
|
| 239 |
+
hotspot_pct=hotspot_pct,
|
| 240 |
+
anomaly_months=anomaly_months,
|
| 241 |
+
total_months=n_current_bands,
|
| 242 |
+
value_phrase=f"backscatter {current_mean:.1f} dB",
|
| 243 |
+
indicator_label="Ground surface",
|
| 244 |
+
direction_up="brightening (drying or new structures)",
|
| 245 |
+
direction_down="darkening (possible flooding or moisture)",
|
| 246 |
+
)
|
|
|
|
| 247 |
|
| 248 |
change_map_path = os.path.join(results_dir, "sar_change.tif")
|
| 249 |
self._write_change_raster(current_path, baseline_path, change_map_path)
|
| 250 |
|
| 251 |
self._spatial_data = SpatialData(
|
| 252 |
map_type="raster",
|
| 253 |
+
label="Ground surface change (dB)",
|
| 254 |
colormap="RdBu_r",
|
| 255 |
vmin=-6,
|
| 256 |
vmax=6,
|
|
|
|
| 405 |
# --- Seasonal baseline analysis ---
|
| 406 |
current_stats = self._compute_stats(current_path)
|
| 407 |
baseline_stats = self._compute_stats(baseline_path)
|
| 408 |
+
current_mean = safe_float(current_stats["overall_vv_mean"])
|
| 409 |
n_current_bands = current_stats["valid_months"]
|
| 410 |
|
| 411 |
if n_current_bands == 0:
|
|
|
|
| 423 |
|
| 424 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 425 |
s = seasonal_stats[most_recent_month]
|
| 426 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_SAR))
|
| 427 |
else:
|
| 428 |
z_current = 0.0
|
| 429 |
|
|
|
|
| 436 |
continue
|
| 437 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 438 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 439 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 440 |
+
seasonal_stats[cal_month]["std"], MIN_STD_SAR))
|
| 441 |
monthly_zscores.append(z)
|
| 442 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 443 |
anomaly_months += 1
|
|
|
|
| 488 |
baseline_stats["vv_std"],
|
| 489 |
)
|
| 490 |
|
| 491 |
+
change_db = safe_float(current_mean - baseline_stats["overall_vv_mean"])
|
| 492 |
+
change_pct = safe_float(self._compute_change_area_pct(
|
| 493 |
current_path, baseline_path, current_stats, baseline_stats,
|
| 494 |
+
))
|
| 495 |
+
hotspot_pct = safe_float(hotspot_pct)
|
| 496 |
|
| 497 |
status = self._classify_zscore(z_current, hotspot_pct)
|
| 498 |
trend = self._compute_trend_zscore(monthly_zscores)
|
|
|
|
| 501 |
current_stats["monthly_vv_means"], seasonal_stats, time_range, monthly_zscores,
|
| 502 |
)
|
| 503 |
|
| 504 |
+
headline = self._generate_headline(
|
| 505 |
+
status=status,
|
| 506 |
+
z_current=z_current,
|
| 507 |
+
hotspot_pct=hotspot_pct,
|
| 508 |
+
anomaly_months=anomaly_months,
|
| 509 |
+
total_months=n_current_bands,
|
| 510 |
+
value_phrase=f"backscatter {current_mean:.1f} dB",
|
| 511 |
+
indicator_label="Ground surface",
|
| 512 |
+
direction_up="brightening (drying or new structures)",
|
| 513 |
+
direction_down="darkening (possible flooding or moisture)",
|
| 514 |
+
)
|
|
|
|
| 515 |
|
| 516 |
# Store raster path for map rendering — write a change map
|
| 517 |
change_map_path = os.path.join(results_dir, "sar_change.tif")
|
|
|
|
| 519 |
|
| 520 |
self._spatial_data = SpatialData(
|
| 521 |
map_type="raster",
|
| 522 |
+
label="Ground surface change (dB)",
|
| 523 |
colormap="RdBu_r",
|
| 524 |
vmin=-6,
|
| 525 |
vmax=6,
|
|
|
|
| 593 |
else:
|
| 594 |
valid = data.ravel()
|
| 595 |
if len(valid) > 0:
|
| 596 |
+
with np.errstate(all="ignore"):
|
| 597 |
+
mean_val = safe_float(np.nanmean(valid))
|
| 598 |
monthly_vv_means.append(mean_val)
|
| 599 |
all_vv_values.extend(valid.tolist())
|
| 600 |
else:
|
|
|
|
| 602 |
|
| 603 |
valid_months = sum(1 for m in monthly_vv_means if m != 0.0)
|
| 604 |
valid_means = [m for m in monthly_vv_means if m != 0.0]
|
| 605 |
+
overall_vv_mean = safe_float(np.mean(valid_means)) if valid_means else 0.0
|
| 606 |
+
vv_std = safe_float(np.std(all_vv_values), default=1.0) if all_vv_values else 1.0
|
| 607 |
|
| 608 |
return {
|
| 609 |
"monthly_vv_means": monthly_vv_means,
|
|
|
|
| 686 |
with np.errstate(all="ignore"):
|
| 687 |
mean = np.nanmean(arr, axis=0)
|
| 688 |
std = np.nanstd(arr, axis=0, ddof=1) if arr.shape[0] > 1 else np.zeros_like(mean)
|
| 689 |
+
# Replace any all-NaN pixel stats with 0 so downstream z-score math is stable
|
| 690 |
+
mean = np.nan_to_num(mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 691 |
+
std = np.nan_to_num(std, nan=0.0, posinf=0.0, neginf=0.0)
|
| 692 |
return mean, std
|
| 693 |
|
| 694 |
+
@classmethod
|
| 695 |
def _build_seasonal_chart_data(
|
| 696 |
+
cls,
|
| 697 |
current_monthly: list[float],
|
| 698 |
seasonal_stats: dict[int, dict],
|
| 699 |
time_range: TimeRange,
|
| 700 |
monthly_zscores: list[float],
|
| 701 |
) -> dict[str, Any]:
|
| 702 |
"""Build chart data with seasonal baseline envelope."""
|
|
|
|
| 703 |
n = len(current_monthly)
|
| 704 |
+
dates = cls._build_monthly_dates(time_range.start, n)
|
|
|
|
|
|
|
| 705 |
values: list[float] = []
|
| 706 |
b_mean: list[float] = []
|
| 707 |
b_min: list[float] = []
|
| 708 |
b_max: list[float] = []
|
| 709 |
anomaly_flags: list[bool] = []
|
| 710 |
+
start_month = time_range.start.month
|
| 711 |
|
| 712 |
for i in range(n):
|
| 713 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 714 |
+
values.append(round(safe_float(current_monthly[i]), 2))
|
|
|
|
| 715 |
|
| 716 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 717 |
s = seasonal_stats[cal_month]
|
| 718 |
+
b_mean.append(round(safe_float(s["mean"]), 2))
|
| 719 |
+
b_min.append(round(safe_float(s["min"]), 2))
|
| 720 |
+
b_max.append(round(safe_float(s["max"]), 2))
|
| 721 |
else:
|
| 722 |
b_mean.append(0.0)
|
| 723 |
b_min.append(0.0)
|
| 724 |
b_max.append(0.0)
|
| 725 |
|
| 726 |
if i < len(monthly_zscores):
|
| 727 |
+
z = safe_float(monthly_zscores[i])
|
| 728 |
+
anomaly_flags.append(abs(z) > ZSCORE_THRESHOLD)
|
| 729 |
else:
|
| 730 |
anomaly_flags.append(False)
|
| 731 |
|
|
|
|
| 736 |
"baseline_min": b_min,
|
| 737 |
"baseline_max": b_max,
|
| 738 |
"anomaly_flags": anomaly_flags,
|
| 739 |
+
"label": "Ground surface backscatter (dB)",
|
| 740 |
}
|
| 741 |
|
| 742 |
# ------------------------------------------------------------------
|
|
|
|
| 756 |
c_vv = []
|
| 757 |
for m in range(c_months):
|
| 758 |
c_vv.append(csrc.read(m * 2 + 1).astype(np.float32))
|
| 759 |
+
with np.errstate(all="ignore"):
|
| 760 |
+
c_mean = np.nanmean(np.stack(c_vv), axis=0)
|
| 761 |
|
| 762 |
b_vv = []
|
| 763 |
for m in range(b_months):
|
| 764 |
b_vv.append(bsrc.read(m * 2 + 1).astype(np.float32))
|
| 765 |
+
with np.errstate(all="ignore"):
|
| 766 |
+
b_mean = np.nanmean(np.stack(b_vv), axis=0)
|
| 767 |
|
| 768 |
+
c_mean = np.nan_to_num(c_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 769 |
+
b_mean = np.nan_to_num(b_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 770 |
diff = np.abs(c_mean - b_mean)
|
| 771 |
significant = np.sum(diff > CHANGE_THRESHOLD_DB)
|
| 772 |
total = diff.size
|
| 773 |
+
return safe_float(significant / total * 100) if total > 0 else 0.0
|
| 774 |
|
| 775 |
@staticmethod
|
| 776 |
def _count_flood_months(
|
|
|
|
| 786 |
with rasterio.open(current_path) as csrc:
|
| 787 |
c_months = csrc.count // 2
|
| 788 |
c_vv = [csrc.read(m * 2 + 1).astype(np.float32) for m in range(c_months)]
|
| 789 |
+
with np.errstate(all="ignore"):
|
| 790 |
+
c_mean = np.nanmean(np.stack(c_vv), axis=0)
|
| 791 |
profile = csrc.profile.copy()
|
| 792 |
|
| 793 |
with rasterio.open(baseline_path) as bsrc:
|
| 794 |
b_months = bsrc.count // 2
|
| 795 |
b_vv = [bsrc.read(m * 2 + 1).astype(np.float32) for m in range(b_months)]
|
| 796 |
+
with np.errstate(all="ignore"):
|
| 797 |
+
b_mean = np.nanmean(np.stack(b_vv), axis=0)
|
| 798 |
|
| 799 |
+
c_mean = np.nan_to_num(c_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 800 |
+
b_mean = np.nan_to_num(b_mean, nan=0.0, posinf=0.0, neginf=0.0)
|
| 801 |
change = c_mean - b_mean
|
| 802 |
profile.update(count=1, dtype="float32")
|
| 803 |
with rasterio.open(output_path, "w", **profile) as dst:
|
|
@@ -22,7 +22,7 @@ from app.config import (
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
-
from app.eo_products.base import BaseProduct, SpatialData
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
@@ -139,8 +139,8 @@ class WaterProduct(BaseProduct):
|
|
| 139 |
|
| 140 |
# --- Seasonal baseline analysis ---
|
| 141 |
current_stats = self._compute_stats(current_path)
|
| 142 |
-
current_mean = current_stats["overall_mean"]
|
| 143 |
-
current_frac = current_stats["overall_water_fraction"]
|
| 144 |
n_current_bands = current_stats["valid_months"]
|
| 145 |
|
| 146 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
|
@@ -154,7 +154,7 @@ class WaterProduct(BaseProduct):
|
|
| 154 |
|
| 155 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 156 |
s = seasonal_stats[most_recent_month]
|
| 157 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_WATER)
|
| 158 |
else:
|
| 159 |
z_current = 0.0
|
| 160 |
|
|
@@ -166,8 +166,8 @@ class WaterProduct(BaseProduct):
|
|
| 166 |
continue
|
| 167 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 168 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 169 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 170 |
-
seasonal_stats[cal_month]["std"], MIN_STD_WATER)
|
| 171 |
monthly_zscores.append(z)
|
| 172 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 173 |
anomaly_months += 1
|
|
@@ -228,17 +228,22 @@ class WaterProduct(BaseProduct):
|
|
| 228 |
self._zscore_raster = None
|
| 229 |
self._hotspot_mask = None
|
| 230 |
chart_data = {
|
| 231 |
-
"dates":
|
| 232 |
-
"values": [round(v * 100, 1) for v in current_stats["monthly_water_fractions"]],
|
| 233 |
-
"label": "Water extent (%)",
|
| 234 |
}
|
| 235 |
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
self._spatial_data = SpatialData(
|
| 244 |
map_type="raster",
|
|
@@ -345,8 +350,8 @@ class WaterProduct(BaseProduct):
|
|
| 345 |
# --- Seasonal baseline analysis ---
|
| 346 |
current_stats = self._compute_stats(current_path)
|
| 347 |
baseline_stats = self._compute_stats(baseline_path)
|
| 348 |
-
current_mean = current_stats["overall_mean"]
|
| 349 |
-
current_frac = current_stats["overall_water_fraction"]
|
| 350 |
n_current_bands = current_stats["valid_months"]
|
| 351 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
| 352 |
|
|
@@ -356,7 +361,7 @@ class WaterProduct(BaseProduct):
|
|
| 356 |
|
| 357 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 358 |
s = seasonal_stats[most_recent_month]
|
| 359 |
-
z_current = compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_WATER)
|
| 360 |
else:
|
| 361 |
z_current = 0.0
|
| 362 |
|
|
@@ -368,8 +373,8 @@ class WaterProduct(BaseProduct):
|
|
| 368 |
continue
|
| 369 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 370 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 371 |
-
z = compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 372 |
-
seasonal_stats[cal_month]["std"], MIN_STD_WATER)
|
| 373 |
monthly_zscores.append(z)
|
| 374 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 375 |
anomaly_months += 1
|
|
@@ -415,12 +420,17 @@ class WaterProduct(BaseProduct):
|
|
| 415 |
)
|
| 416 |
change = current_mean - baseline_stats["overall_mean"]
|
| 417 |
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
|
| 425 |
self._spatial_data = SpatialData(
|
| 426 |
map_type="raster",
|
|
@@ -487,9 +497,10 @@ class WaterProduct(BaseProduct):
|
|
| 487 |
valid = data.ravel()
|
| 488 |
if len(valid) > 0:
|
| 489 |
water_pixels = np.sum(valid > WATER_THRESHOLD)
|
| 490 |
-
frac =
|
| 491 |
monthly_fractions.append(frac)
|
| 492 |
-
|
|
|
|
| 493 |
monthly_means.append(mean_val)
|
| 494 |
if frac > peak_frac:
|
| 495 |
peak_frac = frac
|
|
@@ -549,8 +560,9 @@ class WaterProduct(BaseProduct):
|
|
| 549 |
}
|
| 550 |
return result
|
| 551 |
|
| 552 |
-
@
|
| 553 |
def _build_seasonal_chart_data(
|
|
|
|
| 554 |
current_monthly_fractions: list[float],
|
| 555 |
baseline_seasonal_fractions: dict[int, dict],
|
| 556 |
time_range: TimeRange,
|
|
@@ -571,34 +583,31 @@ class WaterProduct(BaseProduct):
|
|
| 571 |
monthly_zscores:
|
| 572 |
Per-month z-scores for anomaly flagging.
|
| 573 |
"""
|
| 574 |
-
start_month = time_range.start.month
|
| 575 |
n = len(current_monthly_fractions)
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
anomaly_flags = []
|
| 584 |
|
| 585 |
for i in range(n):
|
| 586 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 587 |
-
|
| 588 |
-
values.append(round(current_monthly_fractions[i] * 100, 1))
|
| 589 |
|
| 590 |
if cal_month in baseline_seasonal_fractions:
|
| 591 |
s = baseline_seasonal_fractions[cal_month]
|
| 592 |
-
b_mean.append(round(s["mean"] * 100, 1))
|
| 593 |
-
b_min.append(round(s["min"] * 100, 1))
|
| 594 |
-
b_max.append(round(s["max"] * 100, 1))
|
| 595 |
else:
|
| 596 |
b_mean.append(0.0)
|
| 597 |
b_min.append(0.0)
|
| 598 |
b_max.append(0.0)
|
| 599 |
|
| 600 |
if i < len(monthly_zscores):
|
| 601 |
-
anomaly_flags.append(abs(monthly_zscores[i]) > ZSCORE_THRESHOLD)
|
| 602 |
else:
|
| 603 |
anomaly_flags.append(False)
|
| 604 |
|
|
@@ -609,5 +618,5 @@ class WaterProduct(BaseProduct):
|
|
| 609 |
"baseline_min": b_min,
|
| 610 |
"baseline_max": b_max,
|
| 611 |
"anomaly_flags": anomaly_flags,
|
| 612 |
-
"label": "Water extent (%)",
|
| 613 |
}
|
|
|
|
| 22 |
ZSCORE_THRESHOLD,
|
| 23 |
MIN_CLUSTER_PIXELS,
|
| 24 |
)
|
| 25 |
+
from app.eo_products.base import BaseProduct, SpatialData, safe_float
|
| 26 |
from app.models import (
|
| 27 |
AOI,
|
| 28 |
TimeRange,
|
|
|
|
| 139 |
|
| 140 |
# --- Seasonal baseline analysis ---
|
| 141 |
current_stats = self._compute_stats(current_path)
|
| 142 |
+
current_mean = safe_float(current_stats["overall_mean"])
|
| 143 |
+
current_frac = safe_float(current_stats["overall_water_fraction"])
|
| 144 |
n_current_bands = current_stats["valid_months"]
|
| 145 |
|
| 146 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
|
|
|
| 154 |
|
| 155 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 156 |
s = seasonal_stats[most_recent_month]
|
| 157 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_WATER))
|
| 158 |
else:
|
| 159 |
z_current = 0.0
|
| 160 |
|
|
|
|
| 166 |
continue
|
| 167 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 168 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 169 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 170 |
+
seasonal_stats[cal_month]["std"], MIN_STD_WATER))
|
| 171 |
monthly_zscores.append(z)
|
| 172 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 173 |
anomaly_months += 1
|
|
|
|
| 228 |
self._zscore_raster = None
|
| 229 |
self._hotspot_mask = None
|
| 230 |
chart_data = {
|
| 231 |
+
"dates": self._build_monthly_dates(time_range.start, len(current_stats["monthly_water_fractions"])),
|
| 232 |
+
"values": [round(safe_float(v) * 100, 1) for v in current_stats["monthly_water_fractions"]],
|
| 233 |
+
"label": "Water extent (% of area)",
|
| 234 |
}
|
| 235 |
|
| 236 |
+
headline = self._generate_headline(
|
| 237 |
+
status=status,
|
| 238 |
+
z_current=z_current,
|
| 239 |
+
hotspot_pct=hotspot_pct,
|
| 240 |
+
anomaly_months=anomaly_months,
|
| 241 |
+
total_months=n_current_bands,
|
| 242 |
+
value_phrase=f"{current_frac*100:.1f}% of area covered by water",
|
| 243 |
+
indicator_label="Water bodies",
|
| 244 |
+
direction_up="expansion (possible flooding)",
|
| 245 |
+
direction_down="contraction (possible drought)",
|
| 246 |
+
)
|
| 247 |
|
| 248 |
self._spatial_data = SpatialData(
|
| 249 |
map_type="raster",
|
|
|
|
| 350 |
# --- Seasonal baseline analysis ---
|
| 351 |
current_stats = self._compute_stats(current_path)
|
| 352 |
baseline_stats = self._compute_stats(baseline_path)
|
| 353 |
+
current_mean = safe_float(current_stats["overall_mean"])
|
| 354 |
+
current_frac = safe_float(current_stats["overall_water_fraction"])
|
| 355 |
n_current_bands = current_stats["valid_months"]
|
| 356 |
spatial_completeness = self._compute_spatial_completeness(current_path)
|
| 357 |
|
|
|
|
| 361 |
|
| 362 |
if most_recent_month in seasonal_stats and seasonal_stats[most_recent_month]["n_years"] > 0:
|
| 363 |
s = seasonal_stats[most_recent_month]
|
| 364 |
+
z_current = safe_float(compute_zscore(current_mean, s["mean"], s["std"], MIN_STD_WATER))
|
| 365 |
else:
|
| 366 |
z_current = 0.0
|
| 367 |
|
|
|
|
| 373 |
continue
|
| 374 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 375 |
if cal_month in seasonal_stats and seasonal_stats[cal_month]["n_years"] > 0:
|
| 376 |
+
z = safe_float(compute_zscore(val, seasonal_stats[cal_month]["mean"],
|
| 377 |
+
seasonal_stats[cal_month]["std"], MIN_STD_WATER))
|
| 378 |
monthly_zscores.append(z)
|
| 379 |
if abs(z) > ZSCORE_THRESHOLD:
|
| 380 |
anomaly_months += 1
|
|
|
|
| 420 |
)
|
| 421 |
change = current_mean - baseline_stats["overall_mean"]
|
| 422 |
|
| 423 |
+
headline = self._generate_headline(
|
| 424 |
+
status=status,
|
| 425 |
+
z_current=z_current,
|
| 426 |
+
hotspot_pct=hotspot_pct,
|
| 427 |
+
anomaly_months=anomaly_months,
|
| 428 |
+
total_months=n_current_bands,
|
| 429 |
+
value_phrase=f"{current_frac*100:.1f}% of area covered by water",
|
| 430 |
+
indicator_label="Water bodies",
|
| 431 |
+
direction_up="expansion (possible flooding)",
|
| 432 |
+
direction_down="contraction (possible drought)",
|
| 433 |
+
)
|
| 434 |
|
| 435 |
self._spatial_data = SpatialData(
|
| 436 |
map_type="raster",
|
|
|
|
| 497 |
valid = data.ravel()
|
| 498 |
if len(valid) > 0:
|
| 499 |
water_pixels = np.sum(valid > WATER_THRESHOLD)
|
| 500 |
+
frac = safe_float(water_pixels / len(valid))
|
| 501 |
monthly_fractions.append(frac)
|
| 502 |
+
with np.errstate(all="ignore"):
|
| 503 |
+
mean_val = safe_float(np.nanmean(valid))
|
| 504 |
monthly_means.append(mean_val)
|
| 505 |
if frac > peak_frac:
|
| 506 |
peak_frac = frac
|
|
|
|
| 560 |
}
|
| 561 |
return result
|
| 562 |
|
| 563 |
+
@classmethod
|
| 564 |
def _build_seasonal_chart_data(
|
| 565 |
+
cls,
|
| 566 |
current_monthly_fractions: list[float],
|
| 567 |
baseline_seasonal_fractions: dict[int, dict],
|
| 568 |
time_range: TimeRange,
|
|
|
|
| 583 |
monthly_zscores:
|
| 584 |
Per-month z-scores for anomaly flagging.
|
| 585 |
"""
|
|
|
|
| 586 |
n = len(current_monthly_fractions)
|
| 587 |
+
dates = cls._build_monthly_dates(time_range.start, n)
|
| 588 |
+
values: list[float] = []
|
| 589 |
+
b_mean: list[float] = []
|
| 590 |
+
b_min: list[float] = []
|
| 591 |
+
b_max: list[float] = []
|
| 592 |
+
anomaly_flags: list[bool] = []
|
| 593 |
+
start_month = time_range.start.month
|
|
|
|
| 594 |
|
| 595 |
for i in range(n):
|
| 596 |
cal_month = ((start_month + i - 1) % 12) + 1
|
| 597 |
+
values.append(round(safe_float(current_monthly_fractions[i]) * 100, 1))
|
|
|
|
| 598 |
|
| 599 |
if cal_month in baseline_seasonal_fractions:
|
| 600 |
s = baseline_seasonal_fractions[cal_month]
|
| 601 |
+
b_mean.append(round(safe_float(s["mean"]) * 100, 1))
|
| 602 |
+
b_min.append(round(safe_float(s["min"]) * 100, 1))
|
| 603 |
+
b_max.append(round(safe_float(s["max"]) * 100, 1))
|
| 604 |
else:
|
| 605 |
b_mean.append(0.0)
|
| 606 |
b_min.append(0.0)
|
| 607 |
b_max.append(0.0)
|
| 608 |
|
| 609 |
if i < len(monthly_zscores):
|
| 610 |
+
anomaly_flags.append(abs(safe_float(monthly_zscores[i])) > ZSCORE_THRESHOLD)
|
| 611 |
else:
|
| 612 |
anomaly_flags.append(False)
|
| 613 |
|
|
|
|
| 618 |
"baseline_min": b_min,
|
| 619 |
"baseline_max": b_max,
|
| 620 |
"anomaly_flags": anomaly_flags,
|
| 621 |
+
"label": "Water extent (% of area)",
|
| 622 |
}
|
|
@@ -149,8 +149,10 @@ def build_true_color_graph(
|
|
| 149 |
cloud_mask = (scl == 4) | (scl == 5) | (scl == 6)
|
| 150 |
cube = cube.mask(cloud_mask == 0)
|
| 151 |
|
| 152 |
-
# Drop SCL, keep RGB
|
| 153 |
-
|
|
|
|
|
|
|
| 154 |
|
| 155 |
# Temporal median composite
|
| 156 |
composite = rgb.reduce_dimension(dimension="t", reducer="median")
|
|
@@ -240,15 +242,18 @@ def build_buildup_graph(
|
|
| 240 |
temporal_extent: list[str],
|
| 241 |
resolution_m: int = 20,
|
| 242 |
) -> openeo.DataCube:
|
| 243 |
-
"""Build an openEO process graph for monthly
|
| 244 |
|
| 245 |
-
|
| 246 |
-
|
|
|
|
| 247 |
|
| 248 |
-
|
|
|
|
|
|
|
|
|
|
| 249 |
|
| 250 |
-
|
| 251 |
-
indicator's process() method, not here — keeping the graph builder simple.
|
| 252 |
"""
|
| 253 |
cube = conn.load_collection(
|
| 254 |
collection_id="SENTINEL2_L2A",
|
|
@@ -262,13 +267,18 @@ def build_buildup_graph(
|
|
| 262 |
cloud_mask = (scl == 4) | (scl == 5) | (scl == 6)
|
| 263 |
cube = cube.mask(cloud_mask == 0)
|
| 264 |
|
| 265 |
-
|
| 266 |
-
b11 = cube.band("B11")
|
| 267 |
b08 = cube.band("B08")
|
|
|
|
|
|
|
| 268 |
ndbi = (b11 - b08) / (b11 + b08)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
|
| 270 |
# Monthly median composite
|
| 271 |
-
monthly =
|
| 272 |
|
| 273 |
# Always reproject to EPSG:4326
|
| 274 |
monthly = monthly.resample_spatial(resolution=resolution_m / 111320, projection="EPSG:4326")
|
|
|
|
| 149 |
cloud_mask = (scl == 4) | (scl == 5) | (scl == 6)
|
| 150 |
cube = cube.mask(cloud_mask == 0)
|
| 151 |
|
| 152 |
+
# Drop SCL, keep RGB in actual R,G,B order so band 1=Red, 2=Green, 3=Blue.
|
| 153 |
+
# (S2 native order is B02 Blue, B03 Green, B04 Red — we reorder here so
|
| 154 |
+
# downstream renderers can read bands [1,2,3] as RGB without swapping.)
|
| 155 |
+
rgb = cube.filter_bands(["B04", "B03", "B02"])
|
| 156 |
|
| 157 |
# Temporal median composite
|
| 158 |
composite = rgb.reduce_dimension(dimension="t", reducer="median")
|
|
|
|
| 242 |
temporal_extent: list[str],
|
| 243 |
resolution_m: int = 20,
|
| 244 |
) -> openeo.DataCube:
|
| 245 |
+
"""Build an openEO process graph for monthly built-up composites.
|
| 246 |
|
| 247 |
+
Emits two bands per timestep:
|
| 248 |
+
- NDBI = (B11 - B08) / (B11 + B08) — built-up index
|
| 249 |
+
- NDVI = (B08 - B04) / (B08 + B04) — vegetation index
|
| 250 |
|
| 251 |
+
The pair is needed because NDBI alone misclassifies bare soil/rock
|
| 252 |
+
as built-up in arid landscapes. The indicator's _compute_stats applies
|
| 253 |
+
the combined mask (NDBI > threshold AND NDVI < vegetation threshold)
|
| 254 |
+
to isolate true impervious surfaces.
|
| 255 |
|
| 256 |
+
Native resolution is 20m (due to B11 SWIR band). Cloud-masked via SCL.
|
|
|
|
| 257 |
"""
|
| 258 |
cube = conn.load_collection(
|
| 259 |
collection_id="SENTINEL2_L2A",
|
|
|
|
| 267 |
cloud_mask = (scl == 4) | (scl == 5) | (scl == 6)
|
| 268 |
cube = cube.mask(cloud_mask == 0)
|
| 269 |
|
| 270 |
+
b04 = cube.band("B04")
|
|
|
|
| 271 |
b08 = cube.band("B08")
|
| 272 |
+
b11 = cube.band("B11")
|
| 273 |
+
|
| 274 |
ndbi = (b11 - b08) / (b11 + b08)
|
| 275 |
+
ndvi = (b08 - b04) / (b08 + b04)
|
| 276 |
+
|
| 277 |
+
# Stack NDBI and NDVI as a 2-band datacube
|
| 278 |
+
stacked = ndbi.merge_cubes(ndvi)
|
| 279 |
|
| 280 |
# Monthly median composite
|
| 281 |
+
monthly = stacked.aggregate_temporal_period("month", reducer="median")
|
| 282 |
|
| 283 |
# Always reproject to EPSG:4326
|
| 284 |
monthly = monthly.resample_spatial(resolution=resolution_m / 111320, projection="EPSG:4326")
|
|
@@ -6,22 +6,42 @@ from typing import Sequence
|
|
| 6 |
from app.models import ProductResult, StatusLevel, TrendDirection
|
| 7 |
|
| 8 |
|
| 9 |
-
# --- Per-indicator interpretation templates ---
|
| 10 |
_INTERPRETATIONS: dict[tuple[str, StatusLevel], str] = {
|
| 11 |
-
("ndvi", StatusLevel.RED): "
|
| 12 |
-
("ndvi", StatusLevel.AMBER): "
|
| 13 |
-
("ndvi", StatusLevel.GREEN): "Vegetation cover is within the normal range for this area and season.",
|
| 14 |
-
("water", StatusLevel.RED): "
|
| 15 |
-
("water", StatusLevel.AMBER): "
|
| 16 |
-
("water", StatusLevel.GREEN): "
|
| 17 |
-
("sar", StatusLevel.RED): "
|
| 18 |
-
("sar", StatusLevel.AMBER): "
|
| 19 |
-
("sar", StatusLevel.GREEN): "
|
| 20 |
-
("buildup", StatusLevel.RED): "
|
| 21 |
-
("buildup", StatusLevel.AMBER): "
|
| 22 |
-
("buildup", StatusLevel.GREEN): "
|
| 23 |
}
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# --- Cross-indicator pattern rules ---
|
| 26 |
_CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
| 27 |
(
|
|
@@ -39,9 +59,9 @@ _CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
|
| 39 |
]
|
| 40 |
|
| 41 |
_LEAD_TEMPLATES = {
|
| 42 |
-
StatusLevel.RED: "
|
| 43 |
-
StatusLevel.AMBER: "
|
| 44 |
-
StatusLevel.GREEN: "All
|
| 45 |
}
|
| 46 |
|
| 47 |
|
|
|
|
| 6 |
from app.models import ProductResult, StatusLevel, TrendDirection
|
| 7 |
|
| 8 |
|
| 9 |
+
# --- Per-indicator interpretation templates (plain language) ---
|
| 10 |
_INTERPRETATIONS: dict[tuple[str, StatusLevel], str] = {
|
| 11 |
+
("ndvi", StatusLevel.RED): "Vegetation is well below normal for this season. This pattern is consistent with severe drought, widespread crop failure, intense grazing pressure, or rapid land clearing.",
|
| 12 |
+
("ndvi", StatusLevel.AMBER): "Vegetation is somewhat below normal for this season. May reflect early-season drought stress, mild degradation, or patchy land-use change.",
|
| 13 |
+
("ndvi", StatusLevel.GREEN): "Vegetation cover is within the normal range for this area and season — no anomaly detected.",
|
| 14 |
+
("water", StatusLevel.RED): "Surface water differs sharply from the seasonal norm. This may indicate flooding, dam release, drought, or hydrological disruption — direction and pattern matter.",
|
| 15 |
+
("water", StatusLevel.AMBER): "Surface water is somewhat outside the normal seasonal range. Possible early flood, irrigation change, or drying.",
|
| 16 |
+
("water", StatusLevel.GREEN): "Surface water is within the normal seasonal range.",
|
| 17 |
+
("sar", StatusLevel.RED): "Radar signal shows major ground-surface changes. Common causes: flooding, new construction, deforestation, or large soil-moisture shifts.",
|
| 18 |
+
("sar", StatusLevel.AMBER): "Radar signal shows moderate ground-surface changes. May reflect seasonal soil moisture variation or gradual land-use change.",
|
| 19 |
+
("sar", StatusLevel.GREEN): "Radar signal is within the normal range — ground surface looks stable.",
|
| 20 |
+
("buildup", StatusLevel.RED): "Built-up area shows rapid change. In a humanitarian context this can indicate displacement-driven settlement growth, unplanned camps, or destruction.",
|
| 21 |
+
("buildup", StatusLevel.AMBER): "Built-up area shows moderate change, consistent with gradual urbanisation.",
|
| 22 |
+
("buildup", StatusLevel.GREEN): "Built-up area is stable relative to the baseline period.",
|
| 23 |
}
|
| 24 |
|
| 25 |
+
|
| 26 |
+
# --- "What to verify on the ground" suggestions per indicator + status ---
|
| 27 |
+
_VERIFY_SUGGESTIONS: dict[tuple[str, StatusLevel], str] = {
|
| 28 |
+
("ndvi", StatusLevel.RED): "Check rainfall records, FEWS NET food security alerts, and contact local agronomists. Look for fire scars, locust reports, or recent clearing.",
|
| 29 |
+
("ndvi", StatusLevel.AMBER): "Compare with seasonal rainfall and consult local extension officers if this falls in a growing season.",
|
| 30 |
+
("water", StatusLevel.RED): "Cross-check with river gauge data, dam releases, and recent weather. If flooding is suspected, verify with affected communities or partner organisations downstream.",
|
| 31 |
+
("water", StatusLevel.AMBER): "Compare with rainfall logs and seasonal flood calendars for the region.",
|
| 32 |
+
("sar", StatusLevel.RED): "Cross-check with optical imagery (cloud-free dates), settlement reports, and conflict event data. Hotspot map shows where to look first.",
|
| 33 |
+
("sar", StatusLevel.AMBER): "Compare with rainfall and seasonal moisture patterns; consider whether this aligns with known activity.",
|
| 34 |
+
("buildup", StatusLevel.RED): "Check displacement and IDP reports, recent satellite imagery from open sources (Copernicus EMS, UNOSAT), and partner field assessments. Examine the change map for clusters.",
|
| 35 |
+
("buildup", StatusLevel.AMBER): "Compare with population estimates and recent infrastructure projects.",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_verify_suggestion(product_id: str, status: StatusLevel) -> str:
|
| 40 |
+
"""Return a 1-sentence 'what to verify' suggestion, or empty string if none."""
|
| 41 |
+
if status == StatusLevel.GREEN:
|
| 42 |
+
return ""
|
| 43 |
+
return _VERIFY_SUGGESTIONS.get((product_id, status), "")
|
| 44 |
+
|
| 45 |
# --- Cross-indicator pattern rules ---
|
| 46 |
_CROSS_PATTERNS: list[tuple[dict[str, set[StatusLevel]], str]] = [
|
| 47 |
(
|
|
|
|
| 59 |
]
|
| 60 |
|
| 61 |
_LEAD_TEMPLATES = {
|
| 62 |
+
StatusLevel.RED: "One or more indicators show major changes that warrant action and ground verification.",
|
| 63 |
+
StatusLevel.AMBER: "One or more indicators show elevated change that should be monitored.",
|
| 64 |
+
StatusLevel.GREEN: "All indicators are within normal ranges for this area and period.",
|
| 65 |
}
|
| 66 |
|
| 67 |
|
|
@@ -24,10 +24,12 @@ from reportlab.platypus.flowables import KeepTogether
|
|
| 24 |
|
| 25 |
from app.models import AOI, TimeRange, ProductResult, StatusLevel
|
| 26 |
|
| 27 |
-
#
|
| 28 |
_DISPLAY_NAMES: dict[str, str] = {
|
| 29 |
-
"
|
| 30 |
-
"
|
|
|
|
|
|
|
| 31 |
}
|
| 32 |
|
| 33 |
|
|
@@ -236,8 +238,17 @@ def _product_block(
|
|
| 236 |
elements.append(Paragraph("<b>What this means</b>", styles["body_muted"]))
|
| 237 |
elements.append(Paragraph(interpretation, styles["body"]))
|
| 238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
# Data quality line
|
| 240 |
-
data_source_label = result.data_source or "unknown"
|
|
|
|
|
|
|
| 241 |
quality_line = (
|
| 242 |
f"Confidence: <b>{result.confidence.value.capitalize()}</b> | "
|
| 243 |
f"Trend: <b>{result.trend.value.capitalize()}</b> | "
|
|
@@ -419,11 +430,11 @@ def generate_pdf_report(
|
|
| 419 |
green_count = sum(1 for r in results if r.status == StatusLevel.GREEN)
|
| 420 |
total = len(results)
|
| 421 |
count_line = (
|
| 422 |
-
f"This report covers <b>{total}</b>
|
| 423 |
f"over the period {time_range.start} to {time_range.end}. "
|
| 424 |
-
f"<b><font color='{_RED_HEX}'>{red_count}</font></b> at RED, "
|
| 425 |
-
f"<b><font color='{_AMBER_HEX}'>{amber_count}</font></b> at AMBER, "
|
| 426 |
-
f"<b><font color='{_GREEN_HEX}'>{green_count}</font></b> at GREEN."
|
| 427 |
)
|
| 428 |
story.append(Paragraph(count_line, styles["body"]))
|
| 429 |
|
|
@@ -433,11 +444,11 @@ def generate_pdf_report(
|
|
| 433 |
|
| 434 |
# Compact summary table
|
| 435 |
summary_header = [
|
| 436 |
-
Paragraph("<b>
|
| 437 |
Paragraph("<b>Status</b>", styles["body"]),
|
| 438 |
Paragraph("<b>Trend</b>", styles["body"]),
|
| 439 |
Paragraph("<b>Confidence</b>", styles["body"]),
|
| 440 |
-
Paragraph("<b>
|
| 441 |
Paragraph("<b>Headline</b>", styles["body"]),
|
| 442 |
]
|
| 443 |
summary_rows = [summary_header]
|
|
@@ -453,13 +464,15 @@ def generate_pdf_report(
|
|
| 453 |
alignment=TA_CENTER,
|
| 454 |
),
|
| 455 |
)
|
|
|
|
|
|
|
| 456 |
summary_rows.append([
|
| 457 |
Paragraph(label, styles["body_muted"]),
|
| 458 |
status_cell,
|
| 459 |
Paragraph(result.trend.value.capitalize(), styles["body_muted"]),
|
| 460 |
Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
|
| 461 |
-
Paragraph(f"{result.anomaly_months}/
|
| 462 |
-
Paragraph(result.headline[:
|
| 463 |
])
|
| 464 |
|
| 465 |
ov_col_w = PAGE_W - 2 * MARGIN
|
|
@@ -518,7 +531,7 @@ def generate_pdf_report(
|
|
| 518 |
# ================================================================== #
|
| 519 |
# SECTION 3: Indicator Deep Dives #
|
| 520 |
# ================================================================== #
|
| 521 |
-
story.append(Paragraph("
|
| 522 |
story.append(Spacer(1, 2 * mm))
|
| 523 |
|
| 524 |
for result in results:
|
|
@@ -558,7 +571,7 @@ def generate_pdf_report(
|
|
| 558 |
story.append(Spacer(1, 2 * mm))
|
| 559 |
|
| 560 |
conf_header = [
|
| 561 |
-
Paragraph("<b>
|
| 562 |
Paragraph("<b>Temporal</b>", styles["body"]),
|
| 563 |
Paragraph("<b>Baseline Depth</b>", styles["body"]),
|
| 564 |
Paragraph("<b>Spatial Compl.</b>", styles["body"]),
|
|
@@ -616,7 +629,7 @@ def generate_pdf_report(
|
|
| 616 |
"remote sensing data. Results are intended to support humanitarian situation analysis "
|
| 617 |
"and should be interpreted alongside ground-truth information and expert judgement. "
|
| 618 |
"Temporal coverage, cloud contamination, and sensor resolution may affect the "
|
| 619 |
-
"reliability of individual
|
| 620 |
)
|
| 621 |
story.append(Paragraph("Disclaimer", styles["section_heading"]))
|
| 622 |
story.append(Paragraph(disclaimer, styles["body_muted"]))
|
|
|
|
| 24 |
|
| 25 |
from app.models import AOI, TimeRange, ProductResult, StatusLevel
|
| 26 |
|
| 27 |
+
# Plain-language display names — non-tech readers see these, not the raw IDs.
|
| 28 |
_DISPLAY_NAMES: dict[str, str] = {
|
| 29 |
+
"ndvi": "Vegetation health",
|
| 30 |
+
"water": "Water bodies",
|
| 31 |
+
"sar": "Ground surface change",
|
| 32 |
+
"buildup": "Built-up areas",
|
| 33 |
}
|
| 34 |
|
| 35 |
|
|
|
|
| 238 |
elements.append(Paragraph("<b>What this means</b>", styles["body_muted"]))
|
| 239 |
elements.append(Paragraph(interpretation, styles["body"]))
|
| 240 |
|
| 241 |
+
# What to verify on the ground (only when there is a non-GREEN finding)
|
| 242 |
+
from app.outputs.narrative import get_verify_suggestion
|
| 243 |
+
verify = get_verify_suggestion(result.product_id, result.status)
|
| 244 |
+
if verify:
|
| 245 |
+
elements.append(Paragraph("<b>What to verify on the ground</b>", styles["body_muted"]))
|
| 246 |
+
elements.append(Paragraph(verify, styles["body"]))
|
| 247 |
+
|
| 248 |
# Data quality line
|
| 249 |
+
data_source_label = (result.data_source or "unknown").capitalize()
|
| 250 |
+
if data_source_label.lower() == "satellite":
|
| 251 |
+
data_source_label = "Satellite imagery"
|
| 252 |
quality_line = (
|
| 253 |
f"Confidence: <b>{result.confidence.value.capitalize()}</b> | "
|
| 254 |
f"Trend: <b>{result.trend.value.capitalize()}</b> | "
|
|
|
|
| 430 |
green_count = sum(1 for r in results if r.status == StatusLevel.GREEN)
|
| 431 |
total = len(results)
|
| 432 |
count_line = (
|
| 433 |
+
f"This report covers <b>{total}</b> indicator(s) for <b>{aoi.name}</b> "
|
| 434 |
f"over the period {time_range.start} to {time_range.end}. "
|
| 435 |
+
f"<b><font color='{_RED_HEX}'>{red_count}</font></b> at RED (action recommended), "
|
| 436 |
+
f"<b><font color='{_AMBER_HEX}'>{amber_count}</font></b> at AMBER (worth monitoring), "
|
| 437 |
+
f"<b><font color='{_GREEN_HEX}'>{green_count}</font></b> at GREEN (within normal range)."
|
| 438 |
)
|
| 439 |
story.append(Paragraph(count_line, styles["body"]))
|
| 440 |
|
|
|
|
| 444 |
|
| 445 |
# Compact summary table
|
| 446 |
summary_header = [
|
| 447 |
+
Paragraph("<b>Indicator</b>", styles["body"]),
|
| 448 |
Paragraph("<b>Status</b>", styles["body"]),
|
| 449 |
Paragraph("<b>Trend</b>", styles["body"]),
|
| 450 |
Paragraph("<b>Confidence</b>", styles["body"]),
|
| 451 |
+
Paragraph("<b>Anomaly months</b>", styles["body"]),
|
| 452 |
Paragraph("<b>Headline</b>", styles["body"]),
|
| 453 |
]
|
| 454 |
summary_rows = [summary_header]
|
|
|
|
| 464 |
alignment=TA_CENTER,
|
| 465 |
),
|
| 466 |
)
|
| 467 |
+
# Total months observed (extracted from chart_data dates if present)
|
| 468 |
+
total_months = len(result.chart_data.get("dates", [])) or 12
|
| 469 |
summary_rows.append([
|
| 470 |
Paragraph(label, styles["body_muted"]),
|
| 471 |
status_cell,
|
| 472 |
Paragraph(result.trend.value.capitalize(), styles["body_muted"]),
|
| 473 |
Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
|
| 474 |
+
Paragraph(f"{result.anomaly_months}/{total_months}", styles["body_muted"]),
|
| 475 |
+
Paragraph(result.headline[:90], styles["body_muted"]),
|
| 476 |
])
|
| 477 |
|
| 478 |
ov_col_w = PAGE_W - 2 * MARGIN
|
|
|
|
| 531 |
# ================================================================== #
|
| 532 |
# SECTION 3: Indicator Deep Dives #
|
| 533 |
# ================================================================== #
|
| 534 |
+
story.append(Paragraph("Indicator Detail", styles["section_heading"]))
|
| 535 |
story.append(Spacer(1, 2 * mm))
|
| 536 |
|
| 537 |
for result in results:
|
|
|
|
| 571 |
story.append(Spacer(1, 2 * mm))
|
| 572 |
|
| 573 |
conf_header = [
|
| 574 |
+
Paragraph("<b>Indicator</b>", styles["body"]),
|
| 575 |
Paragraph("<b>Temporal</b>", styles["body"]),
|
| 576 |
Paragraph("<b>Baseline Depth</b>", styles["body"]),
|
| 577 |
Paragraph("<b>Spatial Compl.</b>", styles["body"]),
|
|
|
|
| 629 |
"remote sensing data. Results are intended to support humanitarian situation analysis "
|
| 630 |
"and should be interpreted alongside ground-truth information and expert judgement. "
|
| 631 |
"Temporal coverage, cloud contamination, and sensor resolution may affect the "
|
| 632 |
+
"reliability of individual indicators."
|
| 633 |
)
|
| 634 |
story.append(Paragraph("Disclaimer", styles["section_heading"]))
|
| 635 |
story.append(Paragraph(disclaimer, styles["body_muted"]))
|