KSvend Claude Opus 4.6 (1M context) commited on
Commit
b0128ec
·
1 Parent(s): ffb57c8

fix: aspect ratio, confidence factors, GREEN trend alignment, compound signal gating

Browse files

- maps: geographic aspect (cos-lat) replaces aspect="auto" that caused skewed rendering
- report: PIL-based _fit_image preserves PNG aspect ratio in PDF layout
- report: coordinate-derived AOI display name when name is missing
- report: drop headline truncation in summary table
- report: 4-factor confidence breakdown incl. anomaly consistency
- narrative: drift/gating-aware interpretations via get_interpretation_for_result
- confidence: continuous scoring, new anomaly_consistency factor, expected_months
- base: status-aware trend (GREEN -> STABLE), evidence-gated classification
- worker: skip GREEN and baseline-drift indicators before compound signal detection

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

app/analysis/confidence.py CHANGED
@@ -1,8 +1,16 @@
1
- """Three-factor confidence scoring for EO products.
2
 
3
- Factors: temporal coverage, baseline depth, spatial completeness.
4
- Observation density was removed without per-scene metadata from
5
- openEO, a hardcoded value adds false precision.
 
 
 
 
 
 
 
 
6
  """
7
  from __future__ import annotations
8
 
@@ -11,46 +19,98 @@ from typing import Any
11
  from app.models import ConfidenceLevel
12
 
13
 
14
- def score_temporal_coverage(valid_months: int) -> float:
15
- if valid_months >= 10:
16
- return 1.0
17
- if valid_months >= 7:
18
- return 0.75
19
- if valid_months >= 4:
20
- return 0.5
21
- return 0.25
22
 
23
 
24
- def score_baseline_depth(years_with_data: int) -> float:
25
- if years_with_data >= 5:
26
- return 1.0
27
- if years_with_data >= 4:
28
- return 0.75
29
- if years_with_data >= 2:
30
- return 0.5
31
- return 0.25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
 
34
  def score_spatial_completeness(fraction: float) -> float:
35
- if fraction > 0.9:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  return 1.0
37
- if fraction > 0.75:
38
- return 0.75
39
- if fraction >= 0.5:
40
- return 0.5
41
- return 0.25
42
 
43
 
44
  def compute_confidence(
45
  valid_months: int,
46
- baseline_years_with_data: int,
47
- spatial_completeness: float,
 
 
 
 
48
  ) -> dict[str, Any]:
49
- temporal = score_temporal_coverage(valid_months)
50
- baseline = score_baseline_depth(baseline_years_with_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  spatial = score_spatial_completeness(spatial_completeness)
52
 
53
- score = temporal * 0.35 + baseline * 0.35 + spatial * 0.3
 
 
 
 
 
 
 
 
 
 
54
 
55
  if score > 0.7:
56
  level = ConfidenceLevel.HIGH
@@ -63,8 +123,9 @@ def compute_confidence(
63
  "level": level,
64
  "score": round(score, 3),
65
  "factors": {
66
- "temporal": temporal,
67
- "baseline_depth": baseline,
68
- "spatial_completeness": spatial,
 
69
  },
70
  }
 
1
+ """Continuous four-factor confidence scoring for EO indicators.
2
 
3
+ Factors:
4
+ - **temporal**: fraction of the analysis period with valid monthly data
5
+ - **baseline_depth**: fraction of the expected baseline with valid data
6
+ - **spatial_completeness**: fraction of AOI pixels that are not nodata
7
+ - **anomaly_consistency**: penalty when anomaly months ≈ total months
8
+ (high anomaly fraction signals baseline drift, not per-month signal)
9
+
10
+ All factors are continuous 0..1 — the previous stepped version saturated
11
+ at 1.0 for realistic analyses, producing "1.00 / 1.00 / 1.00 High" on
12
+ every indicator. The new version returns finer-grained values so readers
13
+ can compare relative reliability across indicators.
14
  """
15
  from __future__ import annotations
16
 
 
19
  from app.models import ConfidenceLevel
20
 
21
 
22
+ def _clamp(v: float, lo: float = 0.0, hi: float = 1.0) -> float:
23
+ """Clamp a float into a range."""
24
+ if v < lo:
25
+ return lo
26
+ if v > hi:
27
+ return hi
28
+ return v
 
29
 
30
 
31
+ def score_temporal_coverage(valid_months: int, expected_months: int | None = None) -> float:
32
+ """Fraction of analysis months with valid observations.
33
+
34
+ If ``expected_months`` is not provided, assume 12 months (legacy calls).
35
+ Returns a continuous value in [0, 1].
36
+ """
37
+ if expected_months is None or expected_months <= 0:
38
+ expected_months = 12
39
+ return _clamp(valid_months / expected_months)
40
+
41
+
42
+ def score_baseline_depth(
43
+ baseline_valid_months: int,
44
+ baseline_years: int = 5,
45
+ ) -> float:
46
+ """Fraction of the expected baseline that has valid monthly data.
47
+
48
+ For a 5-year baseline we expect 60 monthly composites. Missing data
49
+ (cloud cover, sensor gaps) reduces this score proportionally.
50
+ """
51
+ expected = max(1, baseline_years * 12)
52
+ return _clamp(baseline_valid_months / expected)
53
 
54
 
55
  def score_spatial_completeness(fraction: float) -> float:
56
+ """Fraction of AOI pixels that are valid (non-nodata).
57
+
58
+ Returned unchanged — already continuous.
59
+ """
60
+ return _clamp(fraction)
61
+
62
+
63
+ def score_anomaly_consistency(anomaly_months: int, total_months: int) -> float:
64
+ """Penalty when anomaly months approach the total.
65
+
66
+ When ~everything is flagged anomalous, that indicates baseline drift or
67
+ regime shift rather than meaningful per-month signal — so our confidence
68
+ in the *per-month* reading drops. Returns 1.0 when anomaly fraction is
69
+ near zero, drops linearly, reaching 0 when 100% of months are anomalous.
70
+ """
71
+ if total_months <= 0:
72
  return 1.0
73
+ frac = anomaly_months / total_months
74
+ return _clamp(1.0 - frac)
 
 
 
75
 
76
 
77
  def compute_confidence(
78
  valid_months: int,
79
+ baseline_years_with_data: int = 5,
80
+ spatial_completeness: float = 1.0,
81
+ *,
82
+ expected_months: int | None = None,
83
+ baseline_valid_months: int | None = None,
84
+ anomaly_months: int = 0,
85
  ) -> dict[str, Any]:
86
+ """Return a four-factor confidence dict for an indicator.
87
+
88
+ Backwards-compatible: old callers passing (valid_months,
89
+ baseline_years_with_data, spatial_completeness) still work. New callers
90
+ should also pass ``expected_months`` and ``baseline_valid_months`` for
91
+ better differentiation.
92
+ """
93
+ temporal = score_temporal_coverage(valid_months, expected_months)
94
+
95
+ # Prefer the more accurate baseline_valid_months when provided; fall
96
+ # back to years × 12 for legacy call sites.
97
+ if baseline_valid_months is None:
98
+ baseline_valid_months = baseline_years_with_data * 12
99
+ baseline = score_baseline_depth(baseline_valid_months, baseline_years=5)
100
+
101
  spatial = score_spatial_completeness(spatial_completeness)
102
 
103
+ total_anom_months = expected_months if expected_months else valid_months
104
+ consistency = score_anomaly_consistency(anomaly_months, total_anom_months)
105
+
106
+ # Weighted composite — temporal and baseline dominate; consistency and
107
+ # spatial are secondary.
108
+ score = (
109
+ temporal * 0.30
110
+ + baseline * 0.30
111
+ + spatial * 0.20
112
+ + consistency * 0.20
113
+ )
114
 
115
  if score > 0.7:
116
  level = ConfidenceLevel.HIGH
 
123
  "level": level,
124
  "score": round(score, 3),
125
  "factors": {
126
+ "temporal": round(temporal, 2),
127
+ "baseline_depth": round(baseline, 2),
128
+ "spatial_completeness": round(spatial, 2),
129
+ "anomaly_consistency": round(consistency, 2),
130
  },
131
  }
app/eo_products/base.py CHANGED
@@ -224,9 +224,23 @@ class BaseProduct(abc.ABC):
224
  return dates
225
 
226
  @staticmethod
227
- def _compute_trend_zscore(monthly_zscores: list[float]) -> "TrendDirection":
228
- """Compute trend from direction of monthly z-scores."""
229
- from app.models import TrendDirection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  valid = [z for z in monthly_zscores if z != 0.0]
231
  if len(valid) < 2:
232
  return TrendDirection.STABLE
 
224
  return dates
225
 
226
  @staticmethod
227
+ def _compute_trend_zscore(
228
+ monthly_zscores: list[float],
229
+ *,
230
+ status: "StatusLevel | None" = None,
231
+ ) -> "TrendDirection":
232
+ """Compute trend from the direction of monthly z-scores.
233
+
234
+ If ``status`` is provided and is GREEN, the trend is forced to
235
+ STABLE — we do not describe within-normal variation as
236
+ "improving" or "deteriorating" because it creates contradictory
237
+ narratives (e.g. "within normal range, trend improving").
238
+ """
239
+ from app.models import TrendDirection, StatusLevel
240
+
241
+ if status is not None and status == StatusLevel.GREEN:
242
+ return TrendDirection.STABLE
243
+
244
  valid = [z for z in monthly_zscores if z != 0.0]
245
  if len(valid) < 2:
246
  return TrendDirection.STABLE
app/eo_products/buildup.py CHANGED
@@ -353,10 +353,14 @@ class BuiltupProduct(BaseProduct):
353
  self._zscore_raster = change_raster.astype(np.float32) * 3.0
354
  self._hotspot_mask = np.abs(change_raster) > 0.5
355
 
 
356
  conf = compute_confidence(
357
  valid_months=n_current_months,
358
  baseline_years_with_data=max(1, n_baseline_months // 12),
359
  spatial_completeness=spatial_completeness,
 
 
 
360
  )
361
 
362
  chart_data = {
 
353
  self._zscore_raster = change_raster.astype(np.float32) * 3.0
354
  self._hotspot_mask = np.abs(change_raster) > 0.5
355
 
356
+ expected_months = max(1, n_current_months)
357
  conf = compute_confidence(
358
  valid_months=n_current_months,
359
  baseline_years_with_data=max(1, n_baseline_months // 12),
360
  spatial_completeness=spatial_completeness,
361
+ expected_months=expected_months,
362
+ baseline_valid_months=n_baseline_months,
363
+ anomaly_months=0,
364
  )
365
 
366
  chart_data = {
app/eo_products/ndvi.py CHANGED
@@ -192,20 +192,26 @@ class NdviProduct(BaseProduct):
192
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
193
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
194
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
195
  conf = compute_confidence(
196
  valid_months=n_current_bands,
197
  baseline_years_with_data=int(mean_baseline_years),
198
  spatial_completeness=spatial_completeness,
 
 
 
199
  )
200
  confidence = conf["level"]
201
  confidence_factors = conf["factors"]
202
 
203
  status = self._classify_zscore(
204
- z_current, hotspot_pct,
205
- anomaly_months=anomaly_months,
206
- total_months=n_current_bands,
207
- )
208
- trend = self._compute_trend_zscore(monthly_zscores)
209
 
210
  chart_data = self._build_seasonal_chart_data(
211
  current_stats["monthly_means"], seasonal_stats, time_range, monthly_zscores,
@@ -393,10 +399,16 @@ class NdviProduct(BaseProduct):
393
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
394
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
395
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
396
  conf = compute_confidence(
397
  valid_months=n_current_bands,
398
  baseline_years_with_data=int(mean_baseline_years),
399
  spatial_completeness=spatial_completeness,
 
 
 
400
  )
401
  confidence = conf["level"]
402
  confidence_factors = conf["factors"]
@@ -406,7 +418,7 @@ class NdviProduct(BaseProduct):
406
  anomaly_months=anomaly_months,
407
  total_months=n_current_bands,
408
  )
409
- trend = self._compute_trend_zscore(monthly_zscores)
410
  chart_data = self._build_seasonal_chart_data(
411
  current_stats["monthly_means"], seasonal_stats, time_range, monthly_zscores,
412
  )
 
192
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
193
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
194
  if m in seasonal_stats) / max(baseline_depth, 1))
195
+ expected_months = max(
196
+ 1, ((time_range.end - time_range.start).days // 30) + 1
197
+ )
198
  conf = compute_confidence(
199
  valid_months=n_current_bands,
200
  baseline_years_with_data=int(mean_baseline_years),
201
  spatial_completeness=spatial_completeness,
202
+ expected_months=expected_months,
203
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
204
+ anomaly_months=anomaly_months,
205
  )
206
  confidence = conf["level"]
207
  confidence_factors = conf["factors"]
208
 
209
  status = self._classify_zscore(
210
+ z_current, hotspot_pct,
211
+ anomaly_months=anomaly_months,
212
+ total_months=n_current_bands,
213
+ )
214
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
215
 
216
  chart_data = self._build_seasonal_chart_data(
217
  current_stats["monthly_means"], seasonal_stats, time_range, monthly_zscores,
 
399
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
400
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
401
  if m in seasonal_stats) / max(baseline_depth, 1))
402
+ expected_months = max(
403
+ 1, ((time_range.end - time_range.start).days // 30) + 1
404
+ )
405
  conf = compute_confidence(
406
  valid_months=n_current_bands,
407
  baseline_years_with_data=int(mean_baseline_years),
408
  spatial_completeness=spatial_completeness,
409
+ expected_months=expected_months,
410
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
411
+ anomaly_months=anomaly_months,
412
  )
413
  confidence = conf["level"]
414
  confidence_factors = conf["factors"]
 
418
  anomaly_months=anomaly_months,
419
  total_months=n_current_bands,
420
  )
421
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
422
  chart_data = self._build_seasonal_chart_data(
423
  current_stats["monthly_means"], seasonal_stats, time_range, monthly_zscores,
424
  )
app/eo_products/sar.py CHANGED
@@ -205,11 +205,16 @@ class SarProduct(BaseProduct):
205
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
206
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
207
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
208
  conf = compute_confidence(
209
  valid_months=n_current_bands,
210
-
211
  baseline_years_with_data=int(mean_baseline_years),
212
  spatial_completeness=spatial_completeness,
 
 
 
213
  )
214
  confidence = conf["level"]
215
  confidence_factors = conf["factors"]
@@ -249,7 +254,7 @@ class SarProduct(BaseProduct):
249
  anomaly_months=anomaly_months,
250
  total_months=n_current_bands,
251
  )
252
- trend = self._compute_trend_zscore(monthly_zscores)
253
  headline = self._generate_headline(
254
  status=status,
255
  z_current=z_current,
@@ -510,10 +515,16 @@ class SarProduct(BaseProduct):
510
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
511
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
512
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
513
  conf = compute_confidence(
514
  valid_months=n_current_bands,
515
  baseline_years_with_data=int(mean_baseline_years),
516
  spatial_completeness=spatial_completeness,
 
 
 
517
  )
518
  confidence = conf["level"]
519
  confidence_factors = conf["factors"]
@@ -549,7 +560,7 @@ class SarProduct(BaseProduct):
549
  anomaly_months=anomaly_months,
550
  total_months=n_current_bands,
551
  )
552
- trend = self._compute_trend_zscore(monthly_zscores)
553
  headline = self._generate_headline(
554
  status=status,
555
  z_current=z_current,
 
205
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
206
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
207
  if m in seasonal_stats) / max(baseline_depth, 1))
208
+ expected_months = max(
209
+ 1, ((time_range.end - time_range.start).days // 30) + 1
210
+ )
211
  conf = compute_confidence(
212
  valid_months=n_current_bands,
 
213
  baseline_years_with_data=int(mean_baseline_years),
214
  spatial_completeness=spatial_completeness,
215
+ expected_months=expected_months,
216
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
217
+ anomaly_months=anomaly_months,
218
  )
219
  confidence = conf["level"]
220
  confidence_factors = conf["factors"]
 
254
  anomaly_months=anomaly_months,
255
  total_months=n_current_bands,
256
  )
257
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
258
  headline = self._generate_headline(
259
  status=status,
260
  z_current=z_current,
 
515
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
516
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
517
  if m in seasonal_stats) / max(baseline_depth, 1))
518
+ expected_months = max(
519
+ 1, ((time_range.end - time_range.start).days // 30) + 1
520
+ )
521
  conf = compute_confidence(
522
  valid_months=n_current_bands,
523
  baseline_years_with_data=int(mean_baseline_years),
524
  spatial_completeness=spatial_completeness,
525
+ expected_months=expected_months,
526
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
527
+ anomaly_months=anomaly_months,
528
  )
529
  confidence = conf["level"]
530
  confidence_factors = conf["factors"]
 
560
  anomaly_months=anomaly_months,
561
  total_months=n_current_bands,
562
  )
563
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
564
  headline = self._generate_headline(
565
  status=status,
566
  z_current=z_current,
app/eo_products/water.py CHANGED
@@ -196,11 +196,16 @@ class WaterProduct(BaseProduct):
196
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
197
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
198
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
199
  conf = compute_confidence(
200
  valid_months=n_current_bands,
201
-
202
  baseline_years_with_data=int(mean_baseline_years),
203
  spatial_completeness=spatial_completeness,
 
 
 
204
  )
205
  confidence = conf["level"]
206
  confidence_factors = conf["factors"]
@@ -211,7 +216,7 @@ class WaterProduct(BaseProduct):
211
  total_months=n_current_bands,
212
  min_coverage_pct=current_frac * 100.0,
213
  )
214
- trend = self._compute_trend_zscore(monthly_zscores)
215
 
216
  baseline_seasonal_fractions = self._build_seasonal_water_fractions(
217
  baseline_stats["monthly_water_fractions"], BASELINE_YEARS,
@@ -275,11 +280,21 @@ class WaterProduct(BaseProduct):
275
  hotspot_pct=round(hotspot_pct, 1),
276
  confidence_factors=confidence_factors,
277
  summary=(
278
- f"Water covers {current_frac*100:.1f}% of the AOI (mean MNDWI {current_mean:.3f}, "
279
- f"z-score {z_current:+.1f} vs seasonal baseline). "
280
- f"{anomaly_months} of {n_current_bands} months show significant anomalies. "
281
- f"{hotspot_pct:.0f}% of AOI has statistically significant change. "
282
- f"Pixel-level MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
 
 
 
 
 
 
 
 
 
 
283
  ),
284
  methodology=(
285
  f"Sentinel-2 L2A pixel-level MNDWI = (B03 \u2212 B11) / (B03 + B11). "
@@ -287,6 +302,8 @@ class WaterProduct(BaseProduct):
287
  f"Monthly median composites at {WATER_RESOLUTION_M}m native resolution. "
288
  f"Baseline: {BASELINE_YEARS}-year seasonal baselines (per calendar month). "
289
  f"Anomaly detection via z-scores (threshold: \u00b1{ZSCORE_THRESHOLD}). "
 
 
290
  f"Processed server-side via CDSE openEO batch jobs."
291
  ),
292
  limitations=[
@@ -407,10 +424,16 @@ class WaterProduct(BaseProduct):
407
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
408
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
409
  if m in seasonal_stats) / max(baseline_depth, 1))
 
 
 
410
  conf = compute_confidence(
411
  valid_months=n_current_bands,
412
  baseline_years_with_data=int(mean_baseline_years),
413
  spatial_completeness=spatial_completeness,
 
 
 
414
  )
415
  confidence = conf["level"]
416
  confidence_factors = conf["factors"]
@@ -421,7 +444,7 @@ class WaterProduct(BaseProduct):
421
  total_months=n_current_bands,
422
  min_coverage_pct=current_frac * 100.0,
423
  )
424
- trend = self._compute_trend_zscore(monthly_zscores)
425
  baseline_seasonal_fractions = self._build_seasonal_water_fractions(
426
  baseline_stats["monthly_water_fractions"], BASELINE_YEARS,
427
  )
@@ -467,11 +490,21 @@ class WaterProduct(BaseProduct):
467
  hotspot_pct=round(hotspot_pct, 1),
468
  confidence_factors=confidence_factors,
469
  summary=(
470
- f"Water covers {current_frac*100:.1f}% of the AOI (mean MNDWI {current_mean:.3f}, "
471
- f"z-score {z_current:+.1f} vs seasonal baseline). "
472
- f"{anomaly_months} of {n_current_bands} months show significant anomalies. "
473
- f"{hotspot_pct:.0f}% of AOI has statistically significant change. "
474
- f"Pixel-level MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
 
 
 
 
 
 
 
 
 
 
475
  ),
476
  methodology=(
477
  f"Sentinel-2 L2A pixel-level MNDWI = (B03 \u2212 B11) / (B03 + B11). "
 
196
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
197
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
198
  if m in seasonal_stats) / max(baseline_depth, 1))
199
+ expected_months = max(
200
+ 1, ((time_range.end - time_range.start).days // 30) + 1
201
+ )
202
  conf = compute_confidence(
203
  valid_months=n_current_bands,
 
204
  baseline_years_with_data=int(mean_baseline_years),
205
  spatial_completeness=spatial_completeness,
206
+ expected_months=expected_months,
207
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
208
+ anomaly_months=anomaly_months,
209
  )
210
  confidence = conf["level"]
211
  confidence_factors = conf["factors"]
 
216
  total_months=n_current_bands,
217
  min_coverage_pct=current_frac * 100.0,
218
  )
219
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
220
 
221
  baseline_seasonal_fractions = self._build_seasonal_water_fractions(
222
  baseline_stats["monthly_water_fractions"], BASELINE_YEARS,
 
280
  hotspot_pct=round(hotspot_pct, 1),
281
  confidence_factors=confidence_factors,
282
  summary=(
283
+ (
284
+ f"Water covers only {current_frac*100:.1f}% of the AOI — too small to interpret "
285
+ f"per-pixel anomalies. The raw z-score ({z_current:+.1f}) and the "
286
+ f"{hotspot_pct:.0f}% pixel-level change are dominated by noise (shadows, "
287
+ f"dark surfaces, wet soil) rather than real water bodies. Pixel-level "
288
+ f"MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
289
+ )
290
+ if current_frac * 100.0 < 0.5
291
+ else (
292
+ f"Water covers {current_frac*100:.1f}% of the AOI (mean MNDWI {current_mean:.3f}, "
293
+ f"z-score {z_current:+.1f} vs seasonal baseline). "
294
+ f"{anomaly_months} of {n_current_bands} months show significant anomalies. "
295
+ f"{hotspot_pct:.0f}% of AOI has statistically significant change. "
296
+ f"Pixel-level MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
297
+ )
298
  ),
299
  methodology=(
300
  f"Sentinel-2 L2A pixel-level MNDWI = (B03 \u2212 B11) / (B03 + B11). "
 
302
  f"Monthly median composites at {WATER_RESOLUTION_M}m native resolution. "
303
  f"Baseline: {BASELINE_YEARS}-year seasonal baselines (per calendar month). "
304
  f"Anomaly detection via z-scores (threshold: \u00b1{ZSCORE_THRESHOLD}). "
305
+ f"Coverage gate: indicators with <0.5% water area are forced to GREEN — "
306
+ f"pixel-level z-scores are dominated by noise in near-dry landscapes. "
307
  f"Processed server-side via CDSE openEO batch jobs."
308
  ),
309
  limitations=[
 
424
  if m in seasonal_stats and seasonal_stats[m]["n_years"] > 0)
425
  mean_baseline_years = (sum(seasonal_stats[m]["n_years"] for m in range(1, 13)
426
  if m in seasonal_stats) / max(baseline_depth, 1))
427
+ expected_months = max(
428
+ 1, ((time_range.end - time_range.start).days // 30) + 1
429
+ )
430
  conf = compute_confidence(
431
  valid_months=n_current_bands,
432
  baseline_years_with_data=int(mean_baseline_years),
433
  spatial_completeness=spatial_completeness,
434
+ expected_months=expected_months,
435
+ baseline_valid_months=baseline_stats.get("valid_months", 0),
436
+ anomaly_months=anomaly_months,
437
  )
438
  confidence = conf["level"]
439
  confidence_factors = conf["factors"]
 
444
  total_months=n_current_bands,
445
  min_coverage_pct=current_frac * 100.0,
446
  )
447
+ trend = self._compute_trend_zscore(monthly_zscores, status=status)
448
  baseline_seasonal_fractions = self._build_seasonal_water_fractions(
449
  baseline_stats["monthly_water_fractions"], BASELINE_YEARS,
450
  )
 
490
  hotspot_pct=round(hotspot_pct, 1),
491
  confidence_factors=confidence_factors,
492
  summary=(
493
+ (
494
+ f"Water covers only {current_frac*100:.1f}% of the AOI — too small to interpret "
495
+ f"per-pixel anomalies. The raw z-score ({z_current:+.1f}) and the "
496
+ f"{hotspot_pct:.0f}% pixel-level change are dominated by noise (shadows, "
497
+ f"dark surfaces, wet soil) rather than real water bodies. Pixel-level "
498
+ f"MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
499
+ )
500
+ if current_frac * 100.0 < 0.5
501
+ else (
502
+ f"Water covers {current_frac*100:.1f}% of the AOI (mean MNDWI {current_mean:.3f}, "
503
+ f"z-score {z_current:+.1f} vs seasonal baseline). "
504
+ f"{anomaly_months} of {n_current_bands} months show significant anomalies. "
505
+ f"{hotspot_pct:.0f}% of AOI has statistically significant change. "
506
+ f"Pixel-level MNDWI analysis at {WATER_RESOLUTION_M}m resolution."
507
+ )
508
  ),
509
  methodology=(
510
  f"Sentinel-2 L2A pixel-level MNDWI = (B03 \u2212 B11) / (B03 + B11). "
app/outputs/maps.py CHANGED
@@ -257,7 +257,8 @@ def render_raster_map(
257
  rgb_max = max(rgb.max(), 1.0)
258
  scale = 3000.0 if rgb_max > 255 else 255.0
259
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
260
- ax.imshow(rgb_normalized, extent=extent, aspect="auto", zorder=0)
 
261
 
262
  # Render indicator raster overlay
263
  if indicator_path is not None:
@@ -267,13 +268,14 @@ def render_raster_map(
267
  ind_extent = [src.bounds.left, src.bounds.right, src.bounds.bottom, src.bounds.top]
268
  if extent is None:
269
  extent = ind_extent
 
270
  masked = np.ma.masked_where(
271
  (data == nodata) if nodata is not None else np.zeros_like(data, dtype=bool),
272
  data,
273
  )
274
  im = ax.imshow(
275
  masked, extent=ind_extent, cmap=cmap, alpha=alpha,
276
- vmin=vmin, vmax=vmax, aspect="auto", zorder=1,
277
  )
278
  cbar = fig.colorbar(im, ax=ax, fraction=0.03, pad=0.04, shrink=0.85)
279
  cbar.set_label(label, fontsize=7, color=INK_MUTED)
@@ -283,6 +285,7 @@ def render_raster_map(
283
  if extent is not None:
284
  ax.set_xlim(extent[0], extent[1])
285
  ax.set_ylim(extent[2], extent[3])
 
286
  color = STATUS_COLORS[status]
287
  _draw_aoi_rect(ax, aoi, color)
288
 
@@ -301,6 +304,21 @@ def render_raster_map(
301
  plt.close(fig)
302
 
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  def render_hotspot_map(
305
  *,
306
  true_color_path: str | None,
@@ -316,12 +334,17 @@ def render_hotspot_map(
316
 
317
  Only pixels where |z-score| > threshold are shown; non-significant
318
  pixels are transparent, letting the true-color base show through.
 
 
 
319
  """
320
  import rasterio
321
 
322
  fig, ax = plt.subplots(figsize=(6, 5), dpi=200, facecolor=SHELL)
323
  ax.set_facecolor(SHELL)
324
 
 
 
325
  # True-color base layer
326
  if true_color_path is not None:
327
  with rasterio.open(true_color_path) as src:
@@ -330,22 +353,23 @@ def render_hotspot_map(
330
  rgb_max = max(rgb.max(), 1.0)
331
  scale = 3000.0 if rgb_max > 255 else 255.0
332
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
333
- ax.imshow(rgb_normalized, extent=tc_extent, aspect="auto", zorder=0)
334
 
335
  # Hotspot overlay — only significant pixels, masked elsewhere
336
  masked_z = np.ma.masked_where(~hotspot_mask, zscore_raster)
337
  vmax = min(float(np.nanmax(np.abs(zscore_raster))), 5.0)
338
  im = ax.imshow(
339
  masked_z, extent=extent, cmap="RdBu_r", alpha=0.8,
340
- vmin=-vmax, vmax=vmax, aspect="auto", zorder=1,
341
  )
342
  cbar = fig.colorbar(im, ax=ax, fraction=0.03, pad=0.04, shrink=0.85)
343
  cbar.set_label(f"{label} (decline \u2190 \u2192 increase)", fontsize=7, color=INK_MUTED)
344
  cbar.ax.tick_params(labelsize=6, colors=INK_MUTED)
345
 
346
- # AOI outline
347
  ax.set_xlim(extent[0], extent[1])
348
  ax.set_ylim(extent[2], extent[3])
 
349
  color = STATUS_COLORS[status]
350
  _draw_aoi_rect(ax, aoi, color)
351
 
@@ -398,7 +422,9 @@ def render_overview_map(
398
  rgb_max = max(rgb.max(), 1.0)
399
  scale = 3000.0 if rgb_max > 255 else 255.0
400
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
401
- ax.imshow(rgb_normalized, extent=extent, aspect="auto")
 
 
402
 
403
  # AOI outline
404
  _draw_aoi_rect(ax, aoi, INK)
 
257
  rgb_max = max(rgb.max(), 1.0)
258
  scale = 3000.0 if rgb_max > 255 else 255.0
259
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
260
+ geo_aspect = _geographic_aspect(extent)
261
+ ax.imshow(rgb_normalized, extent=extent, aspect=geo_aspect, zorder=0)
262
 
263
  # Render indicator raster overlay
264
  if indicator_path is not None:
 
268
  ind_extent = [src.bounds.left, src.bounds.right, src.bounds.bottom, src.bounds.top]
269
  if extent is None:
270
  extent = ind_extent
271
+ geo_aspect = _geographic_aspect(extent)
272
  masked = np.ma.masked_where(
273
  (data == nodata) if nodata is not None else np.zeros_like(data, dtype=bool),
274
  data,
275
  )
276
  im = ax.imshow(
277
  masked, extent=ind_extent, cmap=cmap, alpha=alpha,
278
+ vmin=vmin, vmax=vmax, aspect=geo_aspect, zorder=1,
279
  )
280
  cbar = fig.colorbar(im, ax=ax, fraction=0.03, pad=0.04, shrink=0.85)
281
  cbar.set_label(label, fontsize=7, color=INK_MUTED)
 
285
  if extent is not None:
286
  ax.set_xlim(extent[0], extent[1])
287
  ax.set_ylim(extent[2], extent[3])
288
+ ax.set_aspect(_geographic_aspect(extent))
289
  color = STATUS_COLORS[status]
290
  _draw_aoi_rect(ax, aoi, color)
291
 
 
304
  plt.close(fig)
305
 
306
 
307
+ def _geographic_aspect(extent: list[float]) -> float:
308
+ """Return matplotlib aspect ratio that preserves geographic scale.
309
+
310
+ For an extent [west, east, south, north] at mid-latitude, 1° of
311
+ longitude is shorter than 1° of latitude by cos(lat). Setting the
312
+ axis aspect to 1/cos(lat) makes equal degrees display as equal
313
+ kilometres.
314
+ """
315
+ west, east, south, north = extent
316
+ mid_lat = (south + north) / 2.0
317
+ # Guard against division by zero at poles
318
+ cos_lat = max(np.cos(np.radians(mid_lat)), 1e-3)
319
+ return 1.0 / cos_lat
320
+
321
+
322
  def render_hotspot_map(
323
  *,
324
  true_color_path: str | None,
 
334
 
335
  Only pixels where |z-score| > threshold are shown; non-significant
336
  pixels are transparent, letting the true-color base show through.
337
+
338
+ Uses a geographic (cos-lat-corrected) axis aspect so the image is
339
+ never stretched when it's later placed into the PDF.
340
  """
341
  import rasterio
342
 
343
  fig, ax = plt.subplots(figsize=(6, 5), dpi=200, facecolor=SHELL)
344
  ax.set_facecolor(SHELL)
345
 
346
+ geo_aspect = _geographic_aspect(extent)
347
+
348
  # True-color base layer
349
  if true_color_path is not None:
350
  with rasterio.open(true_color_path) as src:
 
353
  rgb_max = max(rgb.max(), 1.0)
354
  scale = 3000.0 if rgb_max > 255 else 255.0
355
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
356
+ ax.imshow(rgb_normalized, extent=tc_extent, aspect=geo_aspect, zorder=0)
357
 
358
  # Hotspot overlay — only significant pixels, masked elsewhere
359
  masked_z = np.ma.masked_where(~hotspot_mask, zscore_raster)
360
  vmax = min(float(np.nanmax(np.abs(zscore_raster))), 5.0)
361
  im = ax.imshow(
362
  masked_z, extent=extent, cmap="RdBu_r", alpha=0.8,
363
+ vmin=-vmax, vmax=vmax, aspect=geo_aspect, zorder=1,
364
  )
365
  cbar = fig.colorbar(im, ax=ax, fraction=0.03, pad=0.04, shrink=0.85)
366
  cbar.set_label(f"{label} (decline \u2190 \u2192 increase)", fontsize=7, color=INK_MUTED)
367
  cbar.ax.tick_params(labelsize=6, colors=INK_MUTED)
368
 
369
+ # AOI outline and axis limits
370
  ax.set_xlim(extent[0], extent[1])
371
  ax.set_ylim(extent[2], extent[3])
372
+ ax.set_aspect(geo_aspect)
373
  color = STATUS_COLORS[status]
374
  _draw_aoi_rect(ax, aoi, color)
375
 
 
422
  rgb_max = max(rgb.max(), 1.0)
423
  scale = 3000.0 if rgb_max > 255 else 255.0
424
  rgb_normalized = np.clip(rgb / scale, 0, 1).transpose(1, 2, 0)
425
+ geo_aspect = _geographic_aspect(extent)
426
+ ax.imshow(rgb_normalized, extent=extent, aspect=geo_aspect)
427
+ ax.set_aspect(geo_aspect)
428
 
429
  # AOI outline
430
  _draw_aoi_rect(ax, aoi, INK)
app/outputs/narrative.py CHANGED
@@ -42,6 +42,47 @@ def get_verify_suggestion(product_id: str, status: StatusLevel) -> str:
42
  return ""
43
  return _VERIFY_SUGGESTIONS.get((product_id, status), "")
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # --- Direction-aware cross-indicator pattern rules ---
46
  #
47
  # Each rule describes a pattern of (indicator_id, required_direction) pairs.
 
42
  return ""
43
  return _VERIFY_SUGGESTIONS.get((product_id, status), "")
44
 
45
+
46
+ def get_interpretation_for_result(result: "ProductResult") -> str:
47
+ """Return an interpretation that knows about drift/gating diagnostics.
48
+
49
+ The plain status-based templates aren't enough for two cases:
50
+ - SAR baseline-drift flag: status is AMBER but the headline says
51
+ "baseline may be unreliable". We must not say "Radar signal shows
52
+ moderate ground-surface changes" — that contradicts the drift call.
53
+ - Water coverage gate: status is GREEN-by-gate but the underlying
54
+ z-score is large; we want to acknowledge that the indicator is
55
+ not actionable in a near-dry landscape rather than just "within
56
+ seasonal range".
57
+ """
58
+ pid = result.product_id
59
+ headline_lower = (result.headline or "").lower()
60
+
61
+ # SAR drift detection — keyed off the headline phrase set by sar.py
62
+ if pid == "sar" and "baseline may be unreliable" in headline_lower:
63
+ return (
64
+ "The radar baseline does not appear stable for this period — "
65
+ "most months diverge from the 5-year reference. This is more "
66
+ "consistent with sensor calibration changes, orbit-geometry "
67
+ "shifts, or a regional regime shift than a real per-month "
68
+ "anomaly pattern. Treat per-month z-scores as unreliable and "
69
+ "re-check with a shorter baseline window."
70
+ )
71
+
72
+ # Water coverage gate — keyed off the "0.0% of area" or "covered by water" phrase
73
+ if pid == "water" and result.status == StatusLevel.GREEN and "0.0%" in (result.headline or ""):
74
+ return (
75
+ "The area is essentially dry: the indicator is not meaningful "
76
+ "below ~0.5% water coverage. Pixel-level z-scores can still "
77
+ "fluctuate due to shadows, dark surfaces, or wet soil — but "
78
+ "they don't represent real water bodies."
79
+ )
80
+
81
+ return _INTERPRETATIONS.get(
82
+ (pid, result.status),
83
+ f"{pid.replace('_', ' ').title()} status is {result.status.value}.",
84
+ )
85
+
86
  # --- Direction-aware cross-indicator pattern rules ---
87
  #
88
  # Each rule describes a pattern of (indicator_id, required_direction) pairs.
app/outputs/report.py CHANGED
@@ -24,6 +24,59 @@ from reportlab.platypus.flowables import KeepTogether
24
 
25
  from app.models import AOI, TimeRange, ProductResult, StatusLevel
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # Plain-language display names — non-tech readers see these, not the raw IDs.
28
  _DISPLAY_NAMES: dict[str, str] = {
29
  "ndvi": "Vegetation health",
@@ -195,8 +248,8 @@ def _product_block(
195
  chart_exists = chart_path and os.path.exists(chart_path)
196
 
197
  if map_exists and chart_exists:
198
- map_img = Image(map_path, width=8 * cm, height=6 * cm)
199
- chart_img = Image(chart_path, width=8 * cm, height=6 * cm)
200
  img_table = Table(
201
  [[map_img, chart_img]],
202
  colWidths=[8.5 * cm, 8.5 * cm],
@@ -210,12 +263,12 @@ def _product_block(
210
  elements.append(img_table)
211
  elements.append(Spacer(1, 2 * mm))
212
  elif map_exists:
213
- img = Image(map_path, width=12 * cm, height=9 * cm)
214
  img.hAlign = "CENTER"
215
  elements.append(img)
216
  elements.append(Spacer(1, 2 * mm))
217
  elif chart_exists:
218
- img = Image(chart_path, width=12 * cm, height=9 * cm)
219
  img.hAlign = "CENTER"
220
  elements.append(img)
221
  elements.append(Spacer(1, 2 * mm))
@@ -223,8 +276,7 @@ def _product_block(
223
  # Hotspot change map (if available)
224
  hotspot_exists = hotspot_path and os.path.exists(hotspot_path)
225
  if hotspot_exists:
226
- from reportlab.platypus import Image as RLImage
227
- hotspot_img = RLImage(hotspot_path, width=14 * cm, height=5.5 * cm)
228
  hotspot_img.hAlign = "CENTER"
229
  elements.append(hotspot_img)
230
  elements.append(Spacer(1, 2 * mm))
@@ -233,8 +285,11 @@ def _product_block(
233
  elements.append(Paragraph("<b>What the data shows</b>", styles["body_muted"]))
234
  elements.append(Paragraph(result.summary, styles["body"]))
235
 
236
- # What this means
237
- interpretation = get_interpretation(result.product_id, result.status)
 
 
 
238
  elements.append(Paragraph("<b>What this means</b>", styles["body_muted"]))
239
  elements.append(Paragraph(interpretation, styles["body"]))
240
 
@@ -309,6 +364,8 @@ def generate_pdf_report(
309
  PAGE_W, PAGE_H = A4
310
  MARGIN = 2 * cm
311
 
 
 
312
  # ------------------------------------------------------------------ #
313
  # Page template with header rule and footer #
314
  # ------------------------------------------------------------------ #
@@ -322,7 +379,7 @@ def generate_pdf_report(
322
  canvas.setFont("Helvetica", 7)
323
  canvas.setFillColor(INK_MUTED)
324
  footer_text = (
325
- f"MERLx Aperture \u2014 Situation Report \u2014 {aoi.name} \u2014 "
326
  f"{time_range.start} to {time_range.end} \u2014 "
327
  f"Page {doc.page}"
328
  )
@@ -335,7 +392,7 @@ def generate_pdf_report(
335
  output_path,
336
  pagesize=A4,
337
  pageTemplates=[template],
338
- title=f"MERLx Aperture — Situation Report — {aoi.name}",
339
  author="MERLx Aperture",
340
  )
341
  doc.pageBackgrounds = [colors.white]
@@ -352,13 +409,12 @@ def generate_pdf_report(
352
  # SECTION 1: The Place #
353
  # ================================================================== #
354
  story.append(Paragraph("MERLx Aperture — Situation Report", styles["title"]))
355
- story.append(Paragraph(aoi.name, styles["subtitle"]))
356
  story.append(Spacer(1, 2 * mm))
357
 
358
- # Overview map (full width)
359
  if overview_map_path and os.path.exists(overview_map_path):
360
- from reportlab.platypus import Image
361
- img = Image(overview_map_path, width=14 * cm, height=10.5 * cm)
362
  img.hAlign = "CENTER"
363
  story.append(img)
364
  story.append(Spacer(1, 3 * mm))
@@ -430,7 +486,7 @@ def generate_pdf_report(
430
  green_count = sum(1 for r in results if r.status == StatusLevel.GREEN)
431
  total = len(results)
432
  count_line = (
433
- f"This report covers <b>{total}</b> indicator(s) for <b>{aoi.name}</b> "
434
  f"over the period {time_range.start} to {time_range.end}. "
435
  f"<b><font color='{_RED_HEX}'>{red_count}</font></b> at RED (action recommended), "
436
  f"<b><font color='{_AMBER_HEX}'>{amber_count}</font></b> at AMBER (worth monitoring), "
@@ -472,7 +528,7 @@ def generate_pdf_report(
472
  Paragraph(result.trend.value.capitalize(), styles["body_muted"]),
473
  Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
474
  Paragraph(f"{result.anomaly_months}/{total_months}", styles["body_muted"]),
475
- Paragraph(result.headline[:90], styles["body_muted"]),
476
  ])
477
 
478
  ov_col_w = PAGE_W - 2 * MARGIN
@@ -573,8 +629,9 @@ def generate_pdf_report(
573
  conf_header = [
574
  Paragraph("<b>Indicator</b>", styles["body"]),
575
  Paragraph("<b>Temporal</b>", styles["body"]),
576
- Paragraph("<b>Baseline Depth</b>", styles["body"]),
577
- Paragraph("<b>Spatial Compl.</b>", styles["body"]),
 
578
  Paragraph("<b>Overall</b>", styles["body"]),
579
  ]
580
  conf_rows = [conf_header]
@@ -586,6 +643,7 @@ def generate_pdf_report(
586
  Paragraph(f"{f.get('temporal', 0):.2f}", styles["body_muted"]),
587
  Paragraph(f"{f.get('baseline_depth', 0):.2f}", styles["body_muted"]),
588
  Paragraph(f"{f.get('spatial_completeness', 0):.2f}", styles["body_muted"]),
 
589
  Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
590
  ])
591
 
@@ -593,7 +651,7 @@ def generate_pdf_report(
593
  conf_col_w = PAGE_W - 2 * MARGIN
594
  conf_table = Table(
595
  conf_rows,
596
- colWidths=[conf_col_w * 0.20] + [conf_col_w * 0.20] * 4,
597
  )
598
  conf_table.setStyle(TableStyle([
599
  ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#E8E6E0")),
 
24
 
25
  from app.models import AOI, TimeRange, ProductResult, StatusLevel
26
 
27
+
28
+ def _display_aoi_name(aoi: AOI) -> str:
29
+ """Return a human-readable name for the AOI, or derive one from coordinates.
30
+
31
+ If the user didn't set a name (or passed "Unnamed area"), generate a
32
+ coordinate-anchored label from the bbox centroid, e.g.:
33
+ "AOI near 12.0°N 24.9°E"
34
+ The label is deterministic and self-documenting — no reverse geocoding.
35
+ """
36
+ raw = (aoi.name or "").strip()
37
+ if raw and raw.lower() not in ("unnamed area", "unnamed", "none"):
38
+ return raw
39
+ west, south, east, north = aoi.bbox
40
+ lat = (south + north) / 2.0
41
+ lon = (west + east) / 2.0
42
+ ns = "N" if lat >= 0 else "S"
43
+ ew = "E" if lon >= 0 else "W"
44
+ return f"AOI near {abs(lat):.1f}°{ns} {abs(lon):.1f}°{ew}"
45
+
46
+
47
+ def _image_aspect(path: str) -> float:
48
+ """Return PNG aspect ratio (width / height) using PIL, or 1.33 fallback."""
49
+ try:
50
+ from PIL import Image as PILImage
51
+ with PILImage.open(path) as im:
52
+ w, h = im.size
53
+ if h > 0:
54
+ return float(w) / float(h)
55
+ except Exception:
56
+ pass
57
+ return 4.0 / 3.0
58
+
59
+
60
+ def _fit_image(path: str, max_width_cm: float, max_height_cm: float):
61
+ """Return a ReportLab Image sized to fit within a box preserving aspect.
62
+
63
+ Without this, ReportLab forces whatever (width, height) you pass and
64
+ stretches the PNG — which is exactly the "skewed" bug users noticed
65
+ where the hotspot map was stretched horizontally in the PDF layout.
66
+ """
67
+ from reportlab.platypus import Image as RLImage
68
+ aspect = _image_aspect(path) # width / height
69
+ box_aspect = max_width_cm / max_height_cm
70
+ if aspect >= box_aspect:
71
+ # Image is wider than box — constrain to width
72
+ w = max_width_cm * cm
73
+ h = w / aspect
74
+ else:
75
+ # Image is taller than box — constrain to height
76
+ h = max_height_cm * cm
77
+ w = h * aspect
78
+ return RLImage(path, width=w, height=h)
79
+
80
  # Plain-language display names — non-tech readers see these, not the raw IDs.
81
  _DISPLAY_NAMES: dict[str, str] = {
82
  "ndvi": "Vegetation health",
 
248
  chart_exists = chart_path and os.path.exists(chart_path)
249
 
250
  if map_exists and chart_exists:
251
+ map_img = _fit_image(map_path, max_width_cm=8, max_height_cm=6)
252
+ chart_img = _fit_image(chart_path, max_width_cm=8, max_height_cm=6)
253
  img_table = Table(
254
  [[map_img, chart_img]],
255
  colWidths=[8.5 * cm, 8.5 * cm],
 
263
  elements.append(img_table)
264
  elements.append(Spacer(1, 2 * mm))
265
  elif map_exists:
266
+ img = _fit_image(map_path, max_width_cm=12, max_height_cm=9)
267
  img.hAlign = "CENTER"
268
  elements.append(img)
269
  elements.append(Spacer(1, 2 * mm))
270
  elif chart_exists:
271
+ img = _fit_image(chart_path, max_width_cm=12, max_height_cm=9)
272
  img.hAlign = "CENTER"
273
  elements.append(img)
274
  elements.append(Spacer(1, 2 * mm))
 
276
  # Hotspot change map (if available)
277
  hotspot_exists = hotspot_path and os.path.exists(hotspot_path)
278
  if hotspot_exists:
279
+ hotspot_img = _fit_image(hotspot_path, max_width_cm=14, max_height_cm=7)
 
280
  hotspot_img.hAlign = "CENTER"
281
  elements.append(hotspot_img)
282
  elements.append(Spacer(1, 2 * mm))
 
285
  elements.append(Paragraph("<b>What the data shows</b>", styles["body_muted"]))
286
  elements.append(Paragraph(result.summary, styles["body"]))
287
 
288
+ # What this means — use the result-aware interpretation so SAR drift
289
+ # and water coverage gating produce honest narratives instead of the
290
+ # generic per-status templates.
291
+ from app.outputs.narrative import get_interpretation_for_result
292
+ interpretation = get_interpretation_for_result(result)
293
  elements.append(Paragraph("<b>What this means</b>", styles["body_muted"]))
294
  elements.append(Paragraph(interpretation, styles["body"]))
295
 
 
364
  PAGE_W, PAGE_H = A4
365
  MARGIN = 2 * cm
366
 
367
+ display_name = _display_aoi_name(aoi)
368
+
369
  # ------------------------------------------------------------------ #
370
  # Page template with header rule and footer #
371
  # ------------------------------------------------------------------ #
 
379
  canvas.setFont("Helvetica", 7)
380
  canvas.setFillColor(INK_MUTED)
381
  footer_text = (
382
+ f"MERLx Aperture \u2014 Situation Report \u2014 {display_name} \u2014 "
383
  f"{time_range.start} to {time_range.end} \u2014 "
384
  f"Page {doc.page}"
385
  )
 
392
  output_path,
393
  pagesize=A4,
394
  pageTemplates=[template],
395
+ title=f"MERLx Aperture — Situation Report — {display_name}",
396
  author="MERLx Aperture",
397
  )
398
  doc.pageBackgrounds = [colors.white]
 
409
  # SECTION 1: The Place #
410
  # ================================================================== #
411
  story.append(Paragraph("MERLx Aperture — Situation Report", styles["title"]))
412
+ story.append(Paragraph(display_name, styles["subtitle"]))
413
  story.append(Spacer(1, 2 * mm))
414
 
415
+ # Overview map (full width) — aspect-preserving
416
  if overview_map_path and os.path.exists(overview_map_path):
417
+ img = _fit_image(overview_map_path, max_width_cm=14, max_height_cm=10.5)
 
418
  img.hAlign = "CENTER"
419
  story.append(img)
420
  story.append(Spacer(1, 3 * mm))
 
486
  green_count = sum(1 for r in results if r.status == StatusLevel.GREEN)
487
  total = len(results)
488
  count_line = (
489
+ f"This report covers <b>{total}</b> indicator(s) for <b>{display_name}</b> "
490
  f"over the period {time_range.start} to {time_range.end}. "
491
  f"<b><font color='{_RED_HEX}'>{red_count}</font></b> at RED (action recommended), "
492
  f"<b><font color='{_AMBER_HEX}'>{amber_count}</font></b> at AMBER (worth monitoring), "
 
528
  Paragraph(result.trend.value.capitalize(), styles["body_muted"]),
529
  Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
530
  Paragraph(f"{result.anomaly_months}/{total_months}", styles["body_muted"]),
531
+ Paragraph(result.headline, styles["body_muted"]),
532
  ])
533
 
534
  ov_col_w = PAGE_W - 2 * MARGIN
 
629
  conf_header = [
630
  Paragraph("<b>Indicator</b>", styles["body"]),
631
  Paragraph("<b>Temporal</b>", styles["body"]),
632
+ Paragraph("<b>Baseline</b>", styles["body"]),
633
+ Paragraph("<b>Spatial</b>", styles["body"]),
634
+ Paragraph("<b>Consistency</b>", styles["body"]),
635
  Paragraph("<b>Overall</b>", styles["body"]),
636
  ]
637
  conf_rows = [conf_header]
 
643
  Paragraph(f"{f.get('temporal', 0):.2f}", styles["body_muted"]),
644
  Paragraph(f"{f.get('baseline_depth', 0):.2f}", styles["body_muted"]),
645
  Paragraph(f"{f.get('spatial_completeness', 0):.2f}", styles["body_muted"]),
646
+ Paragraph(f"{f.get('anomaly_consistency', 1.0):.2f}", styles["body_muted"]),
647
  Paragraph(result.confidence.value.capitalize(), styles["body_muted"]),
648
  ])
649
 
 
651
  conf_col_w = PAGE_W - 2 * MARGIN
652
  conf_table = Table(
653
  conf_rows,
654
+ colWidths=[conf_col_w * 0.25] + [conf_col_w * 0.15] * 5,
655
  )
656
  conf_table.setStyle(TableStyle([
657
  ("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#E8E6E0")),
app/worker.py CHANGED
@@ -262,17 +262,40 @@ async def process_job(job_id: str, db: Database, registry: ProductRegistry) -> N
262
  product_hotspot_paths[result.product_id] = hotspot_path
263
  output_files.append(hotspot_path)
264
 
265
- # Cross-indicator compound signal detection
 
 
 
 
 
266
  from app.analysis.compound import detect_compound_signals
267
  import numpy as np
 
 
 
 
 
 
 
 
 
 
268
 
269
  zscore_rasters = {}
270
  for result in job.results:
 
 
271
  product_obj = registry.get(result.product_id)
272
  z = getattr(product_obj, '_zscore_raster', None)
273
  if z is not None:
274
  zscore_rasters[result.product_id] = z
275
 
 
 
 
 
 
 
276
  compound_signals = []
277
  if len(zscore_rasters) >= 2:
278
  # Upsample to finest resolution for best spatial overlap detection
 
262
  product_hotspot_paths[result.product_id] = hotspot_path
263
  output_files.append(hotspot_path)
264
 
265
+ # Cross-indicator compound signal detection.
266
+ # Skip indicators that cannot contribute reliably:
267
+ # - GREEN status (no signal, including coverage-gated water)
268
+ # - Headlines flagged as baseline drift
269
+ # This prevents false-positive compound signals fired off pixel-level
270
+ # noise from indicators we already deemed unreliable at the AOI level.
271
  from app.analysis.compound import detect_compound_signals
272
  import numpy as np
273
+ from app.models import StatusLevel
274
+
275
+ unreliable_pids: set[str] = set()
276
+ for result in job.results:
277
+ if result.status == StatusLevel.GREEN:
278
+ unreliable_pids.add(result.product_id)
279
+ continue
280
+ headline_lower = (result.headline or "").lower()
281
+ if "baseline may be unreliable" in headline_lower:
282
+ unreliable_pids.add(result.product_id)
283
 
284
  zscore_rasters = {}
285
  for result in job.results:
286
+ if result.product_id in unreliable_pids:
287
+ continue
288
  product_obj = registry.get(result.product_id)
289
  z = getattr(product_obj, '_zscore_raster', None)
290
  if z is not None:
291
  zscore_rasters[result.product_id] = z
292
 
293
+ if unreliable_pids:
294
+ logger.info(
295
+ "Compound signal detection skipping unreliable indicators: %s",
296
+ sorted(unreliable_pids),
297
+ )
298
+
299
  compound_signals = []
300
  if len(zscore_rasters) >= 2:
301
  # Upsample to finest resolution for best spatial overlap detection