DavMelchi commited on
Commit
d1d53ee
·
1 Parent(s): 0782ac5

Add double-click drill-down navigation, complaint sites filtering, multi-KPI comparison with normalization, and LRU caching for drill-down figures with pagination support across all tables

Browse files
Files changed (1) hide show
  1. panel_app/kpi_health_check_panel.py +609 -6
panel_app/kpi_health_check_panel.py CHANGED
@@ -1,6 +1,8 @@
1
  import io
2
  import os
3
  import sys
 
 
4
  from datetime import date, timedelta
5
 
6
  import numpy as np
@@ -13,9 +15,11 @@ ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
  if ROOT_DIR not in sys.path:
14
  sys.path.insert(0, ROOT_DIR)
15
 
 
16
  from process_kpi.kpi_health_check.engine import (
17
  evaluate_health_check,
18
  is_bad,
 
19
  window_bounds,
20
  )
21
  from process_kpi.kpi_health_check.export import build_export_bytes
@@ -62,6 +66,21 @@ current_top_anomalies_df: pd.DataFrame | None = None
62
  current_top_anomalies_raw: pd.DataFrame | None = None
63
  current_export_bytes: bytes | None = None
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  _applying_profile: bool = False
66
  _loading_datasets: bool = False
67
  _updating_drilldown: bool = False
@@ -101,10 +120,177 @@ def _schedule_drilldown_update(fn) -> None:
101
  _wrapped()
102
 
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  file_2g = pn.widgets.FileInput(name="2G KPI report", accept=".csv,.zip")
105
  file_3g = pn.widgets.FileInput(name="3G KPI report", accept=".csv,.zip")
106
  file_lte = pn.widgets.FileInput(name="LTE KPI report", accept=".csv,.zip")
107
 
 
 
 
 
 
108
  analysis_range = pn.widgets.DateRangePicker(name="Analysis date range (optional)")
109
  baseline_days = pn.widgets.IntInput(name="Baseline window (days)", value=30)
110
  recent_days = pn.widgets.IntInput(name="Recent window (days)", value=7)
@@ -160,6 +346,23 @@ rules_table = pn.widgets.Tabulator(
160
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
161
  )
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  try:
164
  rules_table.editable = True
165
  except Exception: # noqa: BLE001
@@ -193,10 +396,24 @@ rat_select = pn.widgets.RadioButtonGroup(
193
  name="RAT", options=["2G", "3G", "LTE"], value="LTE"
194
  )
195
  kpi_select = pn.widgets.Select(name="KPI", options=[])
 
 
 
 
 
 
 
 
 
196
 
197
  site_kpi_table = pn.widgets.Tabulator(
198
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
199
  )
 
 
 
 
 
200
  trend_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
201
  heatmap_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
202
  hist_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
@@ -284,6 +501,12 @@ def _update_kpi_options() -> None:
284
  kpi_select.options = kpis
285
  if kpis and kpi_select.value not in kpis:
286
  kpi_select.value = kpis[0]
 
 
 
 
 
 
287
  finally:
288
  _updating_drilldown = False
289
 
@@ -386,10 +609,53 @@ def _update_site_view(event=None) -> None:
386
  hist_plot_pane.object = None
387
  return
388
 
389
- title = f"{rat} - {kpi} - site {int(code)}"
390
- fig = px.line(s, x="date_only", y=kpi, markers=True)
391
- fig.update_layout(template="plotly_white", title=title)
392
- trend_plot_pane.object = fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
 
394
  rules_df = (
395
  rules_table.value
@@ -429,6 +695,16 @@ def _update_site_view(event=None) -> None:
429
  )
430
  hist_plot_pane.object = _build_baseline_recent_hist(d, int(code), str(kpi))
431
 
 
 
 
 
 
 
 
 
 
 
432
 
433
  def _apply_city_filter(df: pd.DataFrame) -> pd.DataFrame:
434
  if df is None or df.empty:
@@ -439,6 +715,127 @@ def _apply_city_filter(df: pd.DataFrame) -> pd.DataFrame:
439
  return df[df["City"].astype(str).str.contains(q, case=False, na=False)].copy()
440
 
441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  def _infer_rule_row(rules_df: pd.DataFrame, rat: str, kpi: str) -> dict:
443
  if rules_df is None or rules_df.empty:
444
  return {}
@@ -710,6 +1107,12 @@ def _refresh_filtered_results(event=None) -> None:
710
 
711
  if current_multirat_raw is not None and not current_multirat_raw.empty:
712
  m = _apply_city_filter(current_multirat_raw)
 
 
 
 
 
 
713
  score_col = (
714
  "criticality_score_weighted"
715
  if "criticality_score_weighted" in m.columns
@@ -729,6 +1132,12 @@ def _refresh_filtered_results(event=None) -> None:
729
 
730
  if current_top_anomalies_raw is not None and not current_top_anomalies_raw.empty:
731
  t = _apply_city_filter(current_top_anomalies_raw)
 
 
 
 
 
 
732
  if top_rat_filter.value:
733
  t = t[t["RAT"].isin(list(top_rat_filter.value))]
734
  if top_status_filter.value and "status" in t.columns:
@@ -795,6 +1204,7 @@ def _current_profile_config() -> dict:
795
  cfg["min_criticality"] = int(min_criticality.value)
796
  cfg["min_anomaly_score"] = int(min_anomaly_score.value)
797
  cfg["city_filter"] = str(city_filter.value or "")
 
798
  cfg["top_rat_filter"] = list(top_rat_filter.value) if top_rat_filter.value else []
799
  cfg["top_status_filter"] = (
800
  list(top_status_filter.value) if top_status_filter.value else []
@@ -806,6 +1216,10 @@ def _current_profile_config() -> dict:
806
  "site_code": int(site_select.value) if site_select.value is not None else None,
807
  "rat": str(rat_select.value or ""),
808
  "kpi": str(kpi_select.value or ""),
 
 
 
 
809
  }
810
  return cfg
811
 
@@ -844,6 +1258,15 @@ def _apply_profile_config(cfg: dict) -> None:
844
  except Exception: # noqa: BLE001
845
  pass
846
 
 
 
 
 
 
 
 
 
 
847
  try:
848
  city_filter.value = str(cfg.get("city_filter", "") or "")
849
  except Exception: # noqa: BLE001
@@ -901,6 +1324,19 @@ def _apply_profile_config(cfg: dict) -> None:
901
  kpi_select.value = kpi
902
  except Exception: # noqa: BLE001
903
  pass
 
 
 
 
 
 
 
 
 
 
 
 
 
904
  finally:
905
  _applying_profile = False
906
 
@@ -996,6 +1432,8 @@ def _apply_preset(event=None) -> None:
996
  status_pane.object = f"Preset applied: {preset_select.value}"
997
  current_export_bytes = None
998
 
 
 
999
 
1000
  def _save_current_rules_as_preset(event=None) -> None:
1001
  try:
@@ -1031,6 +1469,8 @@ def _delete_selected_preset(event=None) -> None:
1031
  status_pane.alert_type = "success"
1032
  status_pane.object = f"Preset deleted: {name}"
1033
  current_export_bytes = None
 
 
1034
  except Exception as exc: # noqa: BLE001
1035
  status_pane.alert_type = "danger"
1036
  status_pane.object = f"Error deleting preset: {exc}"
@@ -1256,7 +1696,7 @@ def run_health_check(event=None) -> None:
1256
 
1257
  _refresh_filtered_results()
1258
 
1259
- current_export_bytes = _build_export_bytes()
1260
 
1261
  _update_site_view()
1262
 
@@ -1316,6 +1756,7 @@ profile_delete_button.on_click(_delete_profile)
1316
 
1317
  _refresh_presets()
1318
  _refresh_profiles()
 
1319
 
1320
 
1321
  def _on_rat_change(event=None) -> None:
@@ -1333,21 +1774,181 @@ def _on_drilldown_change(event=None) -> None:
1333
  rat_select.param.watch(_on_rat_change, "value")
1334
  site_select.param.watch(_on_drilldown_change, "value")
1335
  kpi_select.param.watch(_on_drilldown_change, "value")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336
 
1337
  min_criticality.param.watch(_refresh_filtered_results, "value")
1338
  min_anomaly_score.param.watch(_refresh_filtered_results, "value")
1339
  city_filter.param.watch(_refresh_filtered_results, "value")
 
1340
  top_rat_filter.param.watch(_refresh_filtered_results, "value")
1341
  top_status_filter.param.watch(_refresh_filtered_results, "value")
1342
 
 
 
1343
  export_button.callback = _export_callback
1344
 
1345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346
  # Page layout components (used by the multipage portal)
1347
  sidebar = pn.Column(
1348
  file_2g,
1349
  file_3g,
1350
  file_lte,
 
1351
  "---",
1352
  analysis_range,
1353
  baseline_days,
@@ -1359,6 +1960,7 @@ sidebar = pn.Column(
1359
  min_criticality,
1360
  min_anomaly_score,
1361
  city_filter,
 
1362
  top_rat_filter,
1363
  top_status_filter,
1364
  "---",
@@ -1394,7 +1996,8 @@ main = pn.Column(
1394
  top_anomalies_table,
1395
  pn.layout.Divider(),
1396
  pn.pane.Markdown("## Drill-down"),
1397
- pn.Row(site_select, rat_select, kpi_select),
 
1398
  pn.Row(
1399
  pn.Column(site_kpi_table, sizing_mode="stretch_width"),
1400
  pn.Column(trend_plot_pane, sizing_mode="stretch_both"),
 
1
  import io
2
  import os
3
  import sys
4
+ import time
5
+ from collections import OrderedDict
6
  from datetime import date, timedelta
7
 
8
  import numpy as np
 
15
  if ROOT_DIR not in sys.path:
16
  sys.path.insert(0, ROOT_DIR)
17
 
18
+ from panel_app.convert_to_excel_panel import write_dfs_to_excel
19
  from process_kpi.kpi_health_check.engine import (
20
  evaluate_health_check,
21
  is_bad,
22
+ max_consecutive_days,
23
  window_bounds,
24
  )
25
  from process_kpi.kpi_health_check.export import build_export_bytes
 
66
  current_top_anomalies_raw: pd.DataFrame | None = None
67
  current_export_bytes: bytes | None = None
68
 
69
+ complaint_sites: set[int] = set()
70
+
71
+ _daily_version: int = 0
72
+ _rules_version: int = 0
73
+ _healthcheck_version: int = 0
74
+
75
+ _DRILLDOWN_CACHE_MAX: int = 64
76
+ _drilldown_fig_cache: "OrderedDict[tuple, tuple]" = OrderedDict()
77
+
78
+ _DOUBLE_CLICK_S: float = 0.35
79
+ _last_click_state: dict[str, tuple[float, int | None]] = {
80
+ "top": (0.0, None),
81
+ "multirat": (0.0, None),
82
+ }
83
+
84
  _applying_profile: bool = False
85
  _loading_datasets: bool = False
86
  _updating_drilldown: bool = False
 
120
  _wrapped()
121
 
122
 
123
+ def _invalidate_drilldown_cache(
124
+ *,
125
+ data_changed: bool = False,
126
+ rules_changed: bool = False,
127
+ healthcheck_changed: bool = False,
128
+ ) -> None:
129
+ global _daily_version, _rules_version, _healthcheck_version
130
+ if data_changed:
131
+ _daily_version += 1
132
+ if rules_changed:
133
+ _rules_version += 1
134
+ if healthcheck_changed:
135
+ _healthcheck_version += 1
136
+ try:
137
+ _drilldown_fig_cache.clear()
138
+ except Exception: # noqa: BLE001
139
+ pass
140
+
141
+
142
+ def _drilldown_cache_key(site_code: int, rat: str, kpi: str) -> tuple:
143
+ ar = analysis_range.value
144
+ ar_key = (
145
+ str(ar[0]) if ar and len(ar) == 2 and ar[0] else None,
146
+ str(ar[1]) if ar and len(ar) == 2 and ar[1] else None,
147
+ )
148
+ compare_kpis_key = tuple(sorted([str(x) for x in (kpi_compare_select.value or [])]))
149
+ norm_key = str(kpi_compare_norm.value or "None")
150
+ return (
151
+ int(_daily_version),
152
+ int(_rules_version),
153
+ int(_healthcheck_version),
154
+ int(site_code),
155
+ str(rat or ""),
156
+ str(kpi or ""),
157
+ compare_kpis_key,
158
+ norm_key,
159
+ ar_key,
160
+ int(baseline_days.value),
161
+ int(recent_days.value),
162
+ float(rel_threshold_pct.value),
163
+ int(min_consecutive_days.value),
164
+ )
165
+
166
+
167
+ def _drilldown_cache_get(key: tuple):
168
+ v = _drilldown_fig_cache.get(key)
169
+ if v is not None:
170
+ _drilldown_fig_cache.move_to_end(key)
171
+ return v
172
+
173
+
174
+ def _drilldown_cache_set(key: tuple, value) -> None:
175
+ _drilldown_fig_cache[key] = value
176
+ _drilldown_fig_cache.move_to_end(key)
177
+ while len(_drilldown_fig_cache) > int(_DRILLDOWN_CACHE_MAX):
178
+ _drilldown_fig_cache.popitem(last=False)
179
+
180
+
181
+ def _table_row_as_dict(table: pn.widgets.Tabulator, row_idx: int) -> dict | None:
182
+ try:
183
+ v = getattr(table, "current_view", None)
184
+ df = v if isinstance(v, pd.DataFrame) else table.value
185
+ if not isinstance(df, pd.DataFrame) or df.empty:
186
+ return None
187
+ if row_idx < 0 or row_idx >= len(df):
188
+ return None
189
+ return dict(df.iloc[int(row_idx)].to_dict())
190
+ except Exception: # noqa: BLE001
191
+ return None
192
+
193
+
194
+ def _apply_drilldown_selection(*, site_code, rat=None, kpi=None) -> None:
195
+ try:
196
+ if site_code is None:
197
+ return
198
+ try:
199
+ site_code_int = int(site_code)
200
+ except Exception: # noqa: BLE001
201
+ return
202
+
203
+ if rat and rat in list(rat_select.options or []):
204
+ _set_widget_value(rat_select, str(rat))
205
+
206
+ _update_kpi_options()
207
+
208
+ if site_select.options and site_code_int not in site_select.options.values():
209
+ pass
210
+ _set_widget_value(site_select, site_code_int)
211
+
212
+ if kpi and str(kpi) in list(kpi_select.options or []):
213
+ _set_widget_value(kpi_select, str(kpi))
214
+
215
+ if kpi and str(kpi) in list(kpi_compare_select.options or []):
216
+ cur = list(kpi_compare_select.value or [])
217
+ if str(kpi) not in cur:
218
+ _set_widget_value(
219
+ kpi_compare_select, [str(kpi)] + cur if cur else [str(kpi)]
220
+ )
221
+
222
+ _schedule_drilldown_update(_update_site_view)
223
+ except Exception: # noqa: BLE001
224
+ return
225
+
226
+
227
+ def _handle_double_click(table_key: str, table: pn.widgets.Tabulator, event) -> None:
228
+ try:
229
+ row = getattr(event, "row", None)
230
+ if row is None:
231
+ return
232
+ row = int(row)
233
+ except Exception: # noqa: BLE001
234
+ return
235
+
236
+ now = float(time.monotonic())
237
+ last_t, last_row = _last_click_state.get(table_key, (0.0, None))
238
+ if last_row == row and (now - float(last_t)) <= float(_DOUBLE_CLICK_S):
239
+ _last_click_state[table_key] = (0.0, None)
240
+ data = _table_row_as_dict(table, row)
241
+ if not data:
242
+ return
243
+
244
+ if table_key == "top":
245
+ _apply_drilldown_selection(
246
+ site_code=data.get("site_code"),
247
+ rat=data.get("RAT"),
248
+ kpi=data.get("KPI"),
249
+ )
250
+ try:
251
+ status_pane.alert_type = "primary"
252
+ status_pane.object = f"Drill-down: site {int(data.get('site_code'))} | {data.get('RAT')} | {data.get('KPI')}"
253
+ except Exception: # noqa: BLE001
254
+ pass
255
+ return
256
+
257
+ if table_key == "multirat":
258
+ site_code = data.get("site_code")
259
+ best_rat = rat_select.value
260
+ try:
261
+ best_score = -1
262
+ for r in list(rat_select.options or []):
263
+ p = pd.to_numeric(data.get(f"persistent_{r}", 0), errors="coerce")
264
+ d = pd.to_numeric(data.get(f"degraded_{r}", 0), errors="coerce")
265
+ p = int(p) if pd.notna(p) else 0
266
+ d = int(d) if pd.notna(d) else 0
267
+ score = p * 2 + d
268
+ if score > best_score:
269
+ best_score = score
270
+ best_rat = r
271
+ except Exception: # noqa: BLE001
272
+ best_rat = rat_select.value
273
+
274
+ _apply_drilldown_selection(site_code=site_code, rat=best_rat)
275
+ try:
276
+ status_pane.alert_type = "primary"
277
+ status_pane.object = f"Drill-down: site {int(site_code)} | {best_rat}"
278
+ except Exception: # noqa: BLE001
279
+ pass
280
+ return
281
+ else:
282
+ _last_click_state[table_key] = (now, row)
283
+
284
+
285
  file_2g = pn.widgets.FileInput(name="2G KPI report", accept=".csv,.zip")
286
  file_3g = pn.widgets.FileInput(name="3G KPI report", accept=".csv,.zip")
287
  file_lte = pn.widgets.FileInput(name="LTE KPI report", accept=".csv,.zip")
288
 
289
+ complaint_sites_file = pn.widgets.FileInput(
290
+ name="Complaint sites list (optional)", accept=".csv,.txt,.xlsx"
291
+ )
292
+ only_complaint_sites = pn.widgets.Checkbox(name="Only complaint sites", value=False)
293
+
294
  analysis_range = pn.widgets.DateRangePicker(name="Analysis date range (optional)")
295
  baseline_days = pn.widgets.IntInput(name="Baseline window (days)", value=30)
296
  recent_days = pn.widgets.IntInput(name="Recent window (days)", value=7)
 
346
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
347
  )
348
 
349
+
350
+ def _set_tabulator_pagination(table: pn.widgets.Tabulator, page_size: int = 50) -> None:
351
+ try:
352
+ table.pagination = "local"
353
+ table.page_size = int(page_size)
354
+ return
355
+ except Exception: # noqa: BLE001
356
+ pass
357
+ try:
358
+ cfg = dict(table.configuration or {})
359
+ cfg["pagination"] = "local"
360
+ cfg["paginationSize"] = int(page_size)
361
+ table.configuration = cfg
362
+ except Exception: # noqa: BLE001
363
+ pass
364
+
365
+
366
  try:
367
  rules_table.editable = True
368
  except Exception: # noqa: BLE001
 
396
  name="RAT", options=["2G", "3G", "LTE"], value="LTE"
397
  )
398
  kpi_select = pn.widgets.Select(name="KPI", options=[])
399
+ kpi_compare_select = pn.widgets.MultiChoice(name="Compare KPIs", options=[], value=[])
400
+ kpi_compare_norm = pn.widgets.Select(
401
+ name="Normalization", options=["None", "Min-Max", "Z-score"], value="None"
402
+ )
403
+ drilldown_export_button = pn.widgets.FileDownload(
404
+ label="Download drill-down",
405
+ filename="KPI_Drilldown.xlsx",
406
+ button_type="primary",
407
+ )
408
 
409
  site_kpi_table = pn.widgets.Tabulator(
410
  height=260, sizing_mode="stretch_width", layout="fit_data_table"
411
  )
412
+
413
+ _set_tabulator_pagination(site_summary_table, page_size=50)
414
+ _set_tabulator_pagination(multirat_summary_table, page_size=50)
415
+ _set_tabulator_pagination(top_anomalies_table, page_size=50)
416
+ _set_tabulator_pagination(site_kpi_table, page_size=50)
417
  trend_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
418
  heatmap_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
419
  hist_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
 
501
  kpi_select.options = kpis
502
  if kpis and kpi_select.value not in kpis:
503
  kpi_select.value = kpis[0]
504
+
505
+ kpi_compare_select.options = kpis
506
+ cur = list(kpi_compare_select.value or [])
507
+ cur2 = [str(x) for x in cur if str(x) in kpis]
508
+ if cur2 != cur:
509
+ kpi_compare_select.value = cur2
510
  finally:
511
  _updating_drilldown = False
512
 
 
609
  hist_plot_pane.object = None
610
  return
611
 
612
+ cache_key = _drilldown_cache_key(int(code), str(rat), str(kpi))
613
+ cached = _drilldown_cache_get(cache_key)
614
+ if cached is not None:
615
+ trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object = cached
616
+ return
617
+
618
+ selected_kpis = [str(x) for x in (kpi_compare_select.value or []) if str(x)]
619
+ if not selected_kpis:
620
+ selected_kpis = [str(kpi)] if kpi else []
621
+ else:
622
+ if kpi and str(kpi) not in selected_kpis:
623
+ selected_kpis = [str(kpi)] + selected_kpis
624
+
625
+ selected_kpis = [x for x in selected_kpis if x in s.columns]
626
+ if not selected_kpis:
627
+ trend_plot_pane.object = None
628
+ else:
629
+ plot_df = s[["date_only"] + selected_kpis].copy()
630
+ for col in selected_kpis:
631
+ plot_df[col] = pd.to_numeric(plot_df[col], errors="coerce")
632
+
633
+ norm_mode = str(kpi_compare_norm.value or "None")
634
+ if norm_mode != "None":
635
+ for col in selected_kpis:
636
+ vals = pd.to_numeric(plot_df[col], errors="coerce")
637
+ if norm_mode == "Min-Max":
638
+ vmin = float(vals.min()) if vals.notna().any() else 0.0
639
+ vmax = float(vals.max()) if vals.notna().any() else 0.0
640
+ denom = vmax - vmin
641
+ plot_df[col] = (vals - vmin) / denom if denom else 0.0
642
+ elif norm_mode == "Z-score":
643
+ mean = float(vals.mean()) if vals.notna().any() else 0.0
644
+ std = float(vals.std()) if vals.notna().any() else 0.0
645
+ plot_df[col] = (vals - mean) / std if std else 0.0
646
+
647
+ df_long = plot_df.melt(
648
+ id_vars=["date_only"],
649
+ value_vars=selected_kpis,
650
+ var_name="KPI",
651
+ value_name="value",
652
+ )
653
+ title = f"{rat} - site {int(code)}"
654
+ if norm_mode != "None" and len(selected_kpis) > 1:
655
+ title = f"{title} (compare, {norm_mode})"
656
+ fig = px.line(df_long, x="date_only", y="value", color="KPI", markers=True)
657
+ fig.update_layout(template="plotly_white", title=title)
658
+ trend_plot_pane.object = fig
659
 
660
  rules_df = (
661
  rules_table.value
 
695
  )
696
  hist_plot_pane.object = _build_baseline_recent_hist(d, int(code), str(kpi))
697
 
698
+ try:
699
+ drilldown_export_button.filename = f"KPI_Drilldown_{rat}_site_{int(code)}.xlsx"
700
+ except Exception: # noqa: BLE001
701
+ pass
702
+
703
+ _drilldown_cache_set(
704
+ cache_key,
705
+ (trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object),
706
+ )
707
+
708
 
709
  def _apply_city_filter(df: pd.DataFrame) -> pd.DataFrame:
710
  if df is None or df.empty:
 
715
  return df[df["City"].astype(str).str.contains(q, case=False, na=False)].copy()
716
 
717
 
718
+ def _extract_site_codes_from_any(df: pd.DataFrame) -> set[int]:
719
+ if df is None or df.empty:
720
+ return set()
721
+
722
+ candidates = [
723
+ "site_code",
724
+ "site",
725
+ "site id",
726
+ "site_id",
727
+ "code",
728
+ "code_site",
729
+ "id",
730
+ ]
731
+ col = None
732
+ cols_lower = {str(c).strip().lower(): c for c in df.columns}
733
+ for c in candidates:
734
+ if c in cols_lower:
735
+ col = cols_lower[c]
736
+ break
737
+ if col is None:
738
+ col = df.columns[0]
739
+
740
+ raw = df[col].astype(str).str.strip()
741
+ nums = pd.to_numeric(raw, errors="coerce")
742
+ if nums.isna().all():
743
+ extracted = raw.str.extract(r"(\d{3,})")[0]
744
+ nums = pd.to_numeric(extracted, errors="coerce")
745
+
746
+ out = nums.dropna().astype(int).tolist()
747
+ return set(out)
748
+
749
+
750
+ def _load_complaint_sites_from_bytes(content: bytes, filename: str) -> set[int]:
751
+ name = str(filename or "").lower()
752
+ if name.endswith(".xlsx") or name.endswith(".xls"):
753
+ try:
754
+ df = pd.read_excel(io.BytesIO(content))
755
+ except Exception: # noqa: BLE001
756
+ return set()
757
+ return _extract_site_codes_from_any(df)
758
+
759
+ if name.endswith(".txt"):
760
+ try:
761
+ lines = content.decode("utf-8", errors="ignore").splitlines()
762
+ except Exception: # noqa: BLE001
763
+ return set()
764
+ df = pd.DataFrame({"site_code": lines})
765
+ return _extract_site_codes_from_any(df)
766
+
767
+ try:
768
+ df = pd.read_csv(io.BytesIO(content), sep=None, engine="python")
769
+ except Exception: # noqa: BLE001
770
+ try:
771
+ df = pd.read_csv(io.BytesIO(content))
772
+ except Exception: # noqa: BLE001
773
+ return set()
774
+ return _extract_site_codes_from_any(df)
775
+
776
+
777
+ def _refresh_complaint_sites(event=None) -> None:
778
+ global complaint_sites
779
+
780
+ if complaint_sites_file.value:
781
+ complaint_sites = _load_complaint_sites_from_bytes(
782
+ complaint_sites_file.value, complaint_sites_file.filename or ""
783
+ )
784
+ else:
785
+ default_paths = [
786
+ os.path.join(ROOT_DIR, "data", "complaint_sites.csv"),
787
+ os.path.join(ROOT_DIR, "data", "complaint_sites.xlsx"),
788
+ os.path.join(ROOT_DIR, "data", "sites_plaintes.csv"),
789
+ os.path.join(ROOT_DIR, "data", "sites_plaintes.xlsx"),
790
+ ]
791
+ loaded = False
792
+ for p in default_paths:
793
+ if not os.path.exists(p):
794
+ continue
795
+ try:
796
+ if p.lower().endswith((".xlsx", ".xls")):
797
+ df = pd.read_excel(p)
798
+ else:
799
+ df = pd.read_csv(p, sep=None, engine="python")
800
+ complaint_sites = _extract_site_codes_from_any(df)
801
+ loaded = True
802
+ break
803
+ except Exception: # noqa: BLE001
804
+ continue
805
+ if not loaded:
806
+ complaint_sites = set()
807
+
808
+ _apply_complaint_flags()
809
+
810
+ _invalidate_drilldown_cache(healthcheck_changed=True)
811
+
812
+
813
+ def _apply_complaint_flags() -> None:
814
+ global current_multirat_raw, current_top_anomalies_raw
815
+ if complaint_sites:
816
+ if current_multirat_raw is not None and not current_multirat_raw.empty:
817
+ current_multirat_raw["is_complaint_site"] = (
818
+ current_multirat_raw["site_code"].astype(int).isin(complaint_sites)
819
+ )
820
+ if (
821
+ current_top_anomalies_raw is not None
822
+ and not current_top_anomalies_raw.empty
823
+ ):
824
+ current_top_anomalies_raw["is_complaint_site"] = (
825
+ current_top_anomalies_raw["site_code"].astype(int).isin(complaint_sites)
826
+ )
827
+ else:
828
+ if current_multirat_raw is not None and not current_multirat_raw.empty:
829
+ current_multirat_raw["is_complaint_site"] = False
830
+ if (
831
+ current_top_anomalies_raw is not None
832
+ and not current_top_anomalies_raw.empty
833
+ ):
834
+ current_top_anomalies_raw["is_complaint_site"] = False
835
+
836
+ _refresh_filtered_results()
837
+
838
+
839
  def _infer_rule_row(rules_df: pd.DataFrame, rat: str, kpi: str) -> dict:
840
  if rules_df is None or rules_df.empty:
841
  return {}
 
1107
 
1108
  if current_multirat_raw is not None and not current_multirat_raw.empty:
1109
  m = _apply_city_filter(current_multirat_raw)
1110
+ if (
1111
+ bool(only_complaint_sites.value)
1112
+ and "is_complaint_site" in m.columns
1113
+ and not m.empty
1114
+ ):
1115
+ m = m[m["is_complaint_site"] == True] # noqa: E712
1116
  score_col = (
1117
  "criticality_score_weighted"
1118
  if "criticality_score_weighted" in m.columns
 
1132
 
1133
  if current_top_anomalies_raw is not None and not current_top_anomalies_raw.empty:
1134
  t = _apply_city_filter(current_top_anomalies_raw)
1135
+ if (
1136
+ bool(only_complaint_sites.value)
1137
+ and "is_complaint_site" in t.columns
1138
+ and not t.empty
1139
+ ):
1140
+ t = t[t["is_complaint_site"] == True] # noqa: E712
1141
  if top_rat_filter.value:
1142
  t = t[t["RAT"].isin(list(top_rat_filter.value))]
1143
  if top_status_filter.value and "status" in t.columns:
 
1204
  cfg["min_criticality"] = int(min_criticality.value)
1205
  cfg["min_anomaly_score"] = int(min_anomaly_score.value)
1206
  cfg["city_filter"] = str(city_filter.value or "")
1207
+ cfg["only_complaint_sites"] = bool(only_complaint_sites.value)
1208
  cfg["top_rat_filter"] = list(top_rat_filter.value) if top_rat_filter.value else []
1209
  cfg["top_status_filter"] = (
1210
  list(top_status_filter.value) if top_status_filter.value else []
 
1216
  "site_code": int(site_select.value) if site_select.value is not None else None,
1217
  "rat": str(rat_select.value or ""),
1218
  "kpi": str(kpi_select.value or ""),
1219
+ "compare_kpis": (
1220
+ list(kpi_compare_select.value) if kpi_compare_select.value else []
1221
+ ),
1222
+ "compare_norm": str(kpi_compare_norm.value or "None"),
1223
  }
1224
  return cfg
1225
 
 
1258
  except Exception: # noqa: BLE001
1259
  pass
1260
 
1261
+ try:
1262
+ if (
1263
+ "only_complaint_sites" in cfg
1264
+ and cfg["only_complaint_sites"] is not None
1265
+ ):
1266
+ only_complaint_sites.value = bool(cfg["only_complaint_sites"])
1267
+ except Exception: # noqa: BLE001
1268
+ pass
1269
+
1270
  try:
1271
  city_filter.value = str(cfg.get("city_filter", "") or "")
1272
  except Exception: # noqa: BLE001
 
1324
  kpi_select.value = kpi
1325
  except Exception: # noqa: BLE001
1326
  pass
1327
+ try:
1328
+ norm = str(drill.get("compare_norm", "None") or "None")
1329
+ if norm in list(kpi_compare_norm.options):
1330
+ kpi_compare_norm.value = norm
1331
+ except Exception: # noqa: BLE001
1332
+ pass
1333
+ try:
1334
+ ck = drill.get("compare_kpis", [])
1335
+ if isinstance(ck, list):
1336
+ opts = list(kpi_compare_select.options or [])
1337
+ kpi_compare_select.value = [str(x) for x in ck if str(x) in opts]
1338
+ except Exception: # noqa: BLE001
1339
+ pass
1340
  finally:
1341
  _applying_profile = False
1342
 
 
1432
  status_pane.object = f"Preset applied: {preset_select.value}"
1433
  current_export_bytes = None
1434
 
1435
+ _invalidate_drilldown_cache(rules_changed=True)
1436
+
1437
 
1438
  def _save_current_rules_as_preset(event=None) -> None:
1439
  try:
 
1469
  status_pane.alert_type = "success"
1470
  status_pane.object = f"Preset deleted: {name}"
1471
  current_export_bytes = None
1472
+
1473
+ _invalidate_drilldown_cache(data_changed=True, rules_changed=True)
1474
  except Exception as exc: # noqa: BLE001
1475
  status_pane.alert_type = "danger"
1476
  status_pane.object = f"Error deleting preset: {exc}"
 
1696
 
1697
  _refresh_filtered_results()
1698
 
1699
+ current_export_bytes = None
1700
 
1701
  _update_site_view()
1702
 
 
1756
 
1757
  _refresh_presets()
1758
  _refresh_profiles()
1759
+ _refresh_complaint_sites()
1760
 
1761
 
1762
  def _on_rat_change(event=None) -> None:
 
1774
  rat_select.param.watch(_on_rat_change, "value")
1775
  site_select.param.watch(_on_drilldown_change, "value")
1776
  kpi_select.param.watch(_on_drilldown_change, "value")
1777
+ kpi_compare_select.param.watch(_on_drilldown_change, "value")
1778
+ kpi_compare_norm.param.watch(_on_drilldown_change, "value")
1779
+
1780
+
1781
+ def _on_rules_table_change(event=None) -> None:
1782
+ global current_export_bytes
1783
+ if _applying_profile or _loading_datasets:
1784
+ return
1785
+ current_export_bytes = None
1786
+ _invalidate_drilldown_cache(rules_changed=True)
1787
+
1788
+
1789
+ rules_table.param.watch(_on_rules_table_change, "value")
1790
+
1791
+ try:
1792
+ top_anomalies_table.on_click(
1793
+ lambda e: _handle_double_click("top", top_anomalies_table, e)
1794
+ )
1795
+ except Exception: # noqa: BLE001
1796
+ pass
1797
+
1798
+ try:
1799
+ multirat_summary_table.on_click(
1800
+ lambda e: _handle_double_click("multirat", multirat_summary_table, e)
1801
+ )
1802
+ except Exception: # noqa: BLE001
1803
+ pass
1804
 
1805
  min_criticality.param.watch(_refresh_filtered_results, "value")
1806
  min_anomaly_score.param.watch(_refresh_filtered_results, "value")
1807
  city_filter.param.watch(_refresh_filtered_results, "value")
1808
+ only_complaint_sites.param.watch(_refresh_filtered_results, "value")
1809
  top_rat_filter.param.watch(_refresh_filtered_results, "value")
1810
  top_status_filter.param.watch(_refresh_filtered_results, "value")
1811
 
1812
+ complaint_sites_file.param.watch(_refresh_complaint_sites, "value")
1813
+
1814
  export_button.callback = _export_callback
1815
 
1816
 
1817
+ def _build_drilldown_export_bytes() -> bytes:
1818
+ rat = rat_select.value
1819
+ code = site_select.value
1820
+ if rat is None or code is None:
1821
+ return b""
1822
+ daily = current_daily_by_rat.get(rat)
1823
+ if daily is None or daily.empty:
1824
+ return b""
1825
+ d = _filtered_daily(daily)
1826
+ if d is None or d.empty:
1827
+ return b""
1828
+
1829
+ s = d[d["site_code"] == int(code)].copy().sort_values("date_only")
1830
+ if s.empty:
1831
+ return b""
1832
+
1833
+ selected_kpis = [str(x) for x in (kpi_compare_select.value or []) if str(x)]
1834
+ if not selected_kpis:
1835
+ selected_kpis = [str(kpi_select.value)] if kpi_select.value else []
1836
+ else:
1837
+ if kpi_select.value and str(kpi_select.value) not in selected_kpis:
1838
+ selected_kpis = [str(kpi_select.value)] + selected_kpis
1839
+
1840
+ selected_kpis = [k for k in selected_kpis if k in s.columns]
1841
+ base_cols = ["date_only"]
1842
+ daily_cols = base_cols + selected_kpis
1843
+ daily_out = s[daily_cols].copy() if selected_kpis else s[["date_only"]].copy()
1844
+
1845
+ rules_df = (
1846
+ rules_table.value
1847
+ if isinstance(rules_table.value, pd.DataFrame)
1848
+ else pd.DataFrame()
1849
+ )
1850
+ windows = _compute_site_windows(d)
1851
+ if windows is None:
1852
+ summary_out = pd.DataFrame()
1853
+ else:
1854
+ baseline_start, baseline_end, recent_start, recent_end = windows
1855
+ rows = []
1856
+ for k in selected_kpis:
1857
+ rule = _infer_rule_row(rules_df, str(rat), str(k))
1858
+ direction = str(rule.get("direction", "higher_is_better"))
1859
+ sla_raw = rule.get("sla", None)
1860
+ try:
1861
+ sla_val = float(sla_raw) if pd.notna(sla_raw) else None
1862
+ except Exception: # noqa: BLE001
1863
+ sla_val = None
1864
+
1865
+ sk = s[["date_only", k]].copy()
1866
+ sk[k] = pd.to_numeric(sk[k], errors="coerce")
1867
+ sk = sk.dropna(subset=[k])
1868
+
1869
+ baseline_mask = (sk["date_only"] >= baseline_start) & (
1870
+ sk["date_only"] <= baseline_end
1871
+ )
1872
+ recent_mask = (sk["date_only"] >= recent_start) & (
1873
+ sk["date_only"] <= recent_end
1874
+ )
1875
+
1876
+ baseline_med = (
1877
+ float(sk.loc[baseline_mask, k].median())
1878
+ if baseline_mask.any()
1879
+ else None
1880
+ )
1881
+ recent_med = (
1882
+ float(sk.loc[recent_mask, k].median()) if recent_mask.any() else None
1883
+ )
1884
+
1885
+ bad_flags = []
1886
+ recent_vals = sk.loc[recent_mask, ["date_only", k]].sort_values("date_only")
1887
+ for _, r in recent_vals.iterrows():
1888
+ v = r.get(k)
1889
+ bad_flags.append(
1890
+ bool(
1891
+ is_bad(
1892
+ float(v) if pd.notna(v) else None,
1893
+ baseline_med,
1894
+ direction,
1895
+ float(rel_threshold_pct.value),
1896
+ sla_val,
1897
+ )
1898
+ )
1899
+ )
1900
+
1901
+ rows.append(
1902
+ {
1903
+ "RAT": str(rat),
1904
+ "site_code": int(code),
1905
+ "KPI": str(k),
1906
+ "direction": direction,
1907
+ "sla": sla_val,
1908
+ "baseline_median": baseline_med,
1909
+ "recent_median": recent_med,
1910
+ "bad_days_recent": int(sum(bad_flags)),
1911
+ "max_streak_recent": int(max_consecutive_days(bad_flags)),
1912
+ }
1913
+ )
1914
+ summary_out = pd.DataFrame(rows)
1915
+
1916
+ status_df = (
1917
+ current_status_df
1918
+ if isinstance(current_status_df, pd.DataFrame)
1919
+ else pd.DataFrame()
1920
+ )
1921
+ if not status_df.empty:
1922
+ status_out = status_df[
1923
+ (status_df["site_code"] == int(code)) & (status_df["RAT"] == str(rat))
1924
+ ].copy()
1925
+ else:
1926
+ status_out = pd.DataFrame()
1927
+
1928
+ return write_dfs_to_excel(
1929
+ [summary_out, daily_out, status_out],
1930
+ ["Summary", "Daily", "KPI_Status"],
1931
+ index=False,
1932
+ )
1933
+
1934
+
1935
+ def _drilldown_export_callback() -> io.BytesIO:
1936
+ try:
1937
+ b = _build_drilldown_export_bytes()
1938
+ except Exception: # noqa: BLE001
1939
+ b = b""
1940
+ return io.BytesIO(b or b"")
1941
+
1942
+
1943
+ drilldown_export_button.callback = _drilldown_export_callback
1944
+
1945
+
1946
  # Page layout components (used by the multipage portal)
1947
  sidebar = pn.Column(
1948
  file_2g,
1949
  file_3g,
1950
  file_lte,
1951
+ complaint_sites_file,
1952
  "---",
1953
  analysis_range,
1954
  baseline_days,
 
1960
  min_criticality,
1961
  min_anomaly_score,
1962
  city_filter,
1963
+ only_complaint_sites,
1964
  top_rat_filter,
1965
  top_status_filter,
1966
  "---",
 
1996
  top_anomalies_table,
1997
  pn.layout.Divider(),
1998
  pn.pane.Markdown("## Drill-down"),
1999
+ pn.Row(site_select, rat_select),
2000
+ pn.Row(kpi_select, kpi_compare_select, kpi_compare_norm, drilldown_export_button),
2001
  pn.Row(
2002
  pn.Column(site_kpi_table, sizing_mode="stretch_width"),
2003
  pn.Column(trend_plot_pane, sizing_mode="stretch_both"),