nakas commited on
Commit
849c5fa
·
1 Parent(s): af6f4c2

fix: robust snow PoE fetch by scanning available asnow*gt*sfc vars; read each var individually to avoid pydap buffer issue; auto-generate labels

Browse files
Files changed (3) hide show
  1. app.py +1 -11
  2. nbm_client.py +35 -26
  3. plot_utils.py +22 -2
app.py CHANGED
@@ -92,18 +92,8 @@ def run_forecast(lat, lon, hours=24):
92
  # Try to fetch snow-related probabilities akin to NBM Viewer
93
  try:
94
  t_idx, prob_map = fetch_point_probabilities(dataset_url, lat, lon, hours=hours)
95
- label_map = {
96
- "asnow762gtaccsfc": ">= 0.3 in",
97
- "asnow254gtaccsfc": ">= 0.1 in",
98
- "asnow1270gtsfc": ">= 0.5 in",
99
- "asnow381gtsfc": ">= 1.5 in",
100
- "asnow508gtsfc": ">= 2.0 in",
101
- "asnow635gtsfc": ">= 2.5 in",
102
- "asnow1016gtsfc": ">= 4.0 in",
103
- "apcp254gtsfc": "P(precip > 10mm)",
104
- }
105
  if len(prob_map) > 0:
106
- snow_prob_fig = make_snow_prob_fig(t_idx, prob_map, label_map)
107
  except Exception as e:
108
  print(f"Probability fetch/plot error: {e}")
109
  snow_prob_fig = None
 
92
  # Try to fetch snow-related probabilities akin to NBM Viewer
93
  try:
94
  t_idx, prob_map = fetch_point_probabilities(dataset_url, lat, lon, hours=hours)
 
 
 
 
 
 
 
 
 
 
95
  if len(prob_map) > 0:
96
+ snow_prob_fig = make_snow_prob_fig(t_idx, prob_map)
97
  except Exception as e:
98
  print(f"Probability fetch/plot error: {e}")
99
  snow_prob_fig = None
nbm_client.py CHANGED
@@ -257,39 +257,48 @@ def fetch_point_probabilities(
257
  ilat = _nearest_index(lat_vals, lat)
258
  ilon = _nearest_index(lon_vals, lon)
259
 
260
- # Default set focused on snow thresholds commonly used in NBM viewer
261
- default_vars = [
262
- "asnow254gtaccsfc", # > 0.1 in
263
- "asnow381gtsfc", # > 1.5 in (0.0381 m)
264
- "asnow508gtsfc", # > 2.0 in (0.0508 m)
265
- "asnow635gtsfc", # > 2.5 in (0.0635 m)
266
- "asnow762gtaccsfc", # > 0.3 in (0.00762 m)
267
- "asnow1270gtsfc", # > 0.5 in (0.0127 m)
268
- "asnow1016gtsfc", # > 4.0 in (0.1016 m)
269
- "apcp254gtsfc", # > 10 mm liquid (approx 0.4 in) example precip prob
270
- ]
271
- sel_vars = variables if variables else default_vars
272
- present = [v for v in sel_vars if v in ds.variables]
273
- if not present:
274
- logger.warning("No requested probability variables present; returning empty")
 
 
 
 
 
275
  return pd.DatetimeIndex([]), {}
276
 
277
- subset = ds[present].isel(lat=ilat, lon=ilon)
 
 
278
  full_index = _to_datetime_index(subset["time"]) # full timeline
279
  step_hours = _infer_step_hours(full_index)
280
  n_req = int(np.ceil(max(1, float(hours)) / step_hours))
281
  n = min(len(full_index), n_req)
282
  t_index = full_index[:n]
283
- subset = subset.isel(time=slice(0, n)).load()
284
-
285
  out: Dict[str, pd.Series] = {}
286
- for v in present:
287
- vals = subset[v].values
288
- # Many probability fields are 0-1 or 0-100; try to normalize to percent
289
- # Heuristic: if max <= 1.0 -> scale by 100.
290
- mx = float(np.nanmax(vals)) if np.isfinite(vals).any() else 0.0
291
- if mx <= 1.0:
292
- vals = vals * 100.0
293
- out[v] = pd.Series(np.round(vals, 1), index=t_index)
 
 
 
 
294
 
295
  return t_index, out
 
257
  ilat = _nearest_index(lat_vals, lat)
258
  ilon = _nearest_index(lon_vals, lon)
259
 
260
+ # Build list of snow probability-of-exceedance vars available in this dataset
261
+ # Accept common thresholds; names vary between hours (sometimes include 'acc')
262
+ desired_threshold_ids = {127, 254, 381, 508, 635, 762, 1016}
263
+ candidate = [v for v in ds.variables if v.startswith("asnow") and v.endswith("sfc") and "gt" in v]
264
+ def _extract_id(v: str) -> Optional[int]:
265
+ import re as _re
266
+ m = _re.search(r"asnow(\d+)", v)
267
+ return int(m.group(1)) if m else None
268
+ filtered: List[str] = []
269
+ for v in candidate:
270
+ vid = _extract_id(v)
271
+ if vid in desired_threshold_ids:
272
+ filtered.append(v)
273
+
274
+ # If user provided explicit list, use intersection
275
+ if variables:
276
+ filtered = [v for v in variables if v in ds.variables]
277
+
278
+ if not filtered:
279
+ logger.warning("No snow probability variables found; returning empty")
280
  return pd.DatetimeIndex([]), {}
281
 
282
+ # Use time coordinate from any available variable
283
+ tvar = filtered[0]
284
+ subset = ds[tvar].isel(lat=ilat, lon=ilon)
285
  full_index = _to_datetime_index(subset["time"]) # full timeline
286
  step_hours = _infer_step_hours(full_index)
287
  n_req = int(np.ceil(max(1, float(hours)) / step_hours))
288
  n = min(len(full_index), n_req)
289
  t_index = full_index[:n]
 
 
290
  out: Dict[str, pd.Series] = {}
291
+ # Read each variable individually to avoid pydap multi-variable buffer bugs
292
+ for v in filtered:
293
+ try:
294
+ arr = ds[v].isel(lat=ilat, lon=ilon, time=slice(0, n)).load()
295
+ vals = np.asarray(arr.values)
296
+ # Normalize to percent if needed
297
+ mx = float(np.nanmax(vals)) if np.isfinite(vals).any() else 0.0
298
+ if mx <= 1.0:
299
+ vals = vals * 100.0
300
+ out[v] = pd.Series(np.round(vals.astype(float), 1), index=t_index)
301
+ except Exception as ex:
302
+ logger.warning(f"Skipping {v} due to read/convert error: {ex}")
303
 
304
  return t_index, out
plot_utils.py CHANGED
@@ -116,8 +116,29 @@ def make_snow_prob_fig(
116
  "#bcbd22",
117
  "#17becf",
118
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  for i, key in enumerate(order):
120
- label = label_map.get(key, key) if label_map else key
121
  fig.add_trace(
122
  go.Bar(
123
  x=time_index,
@@ -135,4 +156,3 @@ def make_snow_prob_fig(
135
  legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
136
  )
137
  return fig
138
-
 
116
  "#bcbd22",
117
  "#17becf",
118
  ]
119
+ def _auto_label(k: str) -> str:
120
+ # Try to decode asnow threshold to inches
121
+ import re as _re
122
+ m = _re.search(r"asnow(\d+)", k)
123
+ if m:
124
+ val = int(m.group(1))
125
+ # Known special labels
126
+ special = {127: 0.5, 254: 0.1, 381: 1.5, 508: 2.0, 635: 2.5, 762: 0.3, 1016: 4.0}
127
+ if val in special:
128
+ inc = special[val]
129
+ else:
130
+ # Fallback: interpret as meters with 1e4 divisor -> inches
131
+ inc = round((val / 10000.0) / 0.0254, 2)
132
+ return f">= {inc:g} in"
133
+ # apcp threshold label
134
+ m2 = _re.search(r"apcp(\d+)", k)
135
+ if m2:
136
+ mm = int(m2.group(1)) / 10.0 # rough fallback
137
+ return f"P(precip > {mm:g} mm)"
138
+ return k
139
+
140
  for i, key in enumerate(order):
141
+ label = label_map.get(key, _auto_label(key)) if label_map else _auto_label(key)
142
  fig.add_trace(
143
  go.Bar(
144
  x=time_index,
 
156
  legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
157
  )
158
  return fig