DavMelchi commited on
Commit
32ee5a2
·
1 Parent(s): 7a1468d

Add map filtering controls with status, RAT, and search capabilities including auto-fit bounds, city/site search, dominant status classification, and interactive center override with fit-to-points functionality

Browse files
panel_app/kpi_health_check_drilldown_plots.py CHANGED
@@ -2,6 +2,7 @@ import pandas as pd
2
  import plotly.graph_objects as go
3
  from plotly.subplots import make_subplots
4
 
 
5
  def build_drilldown_plot(
6
  df: pd.DataFrame,
7
  kpis: list[str],
@@ -25,7 +26,7 @@ def build_drilldown_plot(
25
  # Sort by date
26
  # Sort by date
27
  plot_df = df.sort_values("date_only").copy()
28
-
29
  title_text = f"{rat} - Site {site_code}"
30
  # If single KPI, be explicit in title
31
  if len(valid_kpis) == 1:
@@ -34,18 +35,19 @@ def build_drilldown_plot(
34
  # Subplot for Timeline (Streak) - Row 2
35
  # Row 1: Main Trend
36
  fig = make_subplots(
37
- rows=2, cols=1,
38
- shared_xaxes=True,
 
39
  vertical_spacing=0.05,
40
  row_heights=[0.85, 0.15],
41
- subplot_titles=(title_text, "Status Check")
42
  )
43
 
44
  for kpi in valid_kpis:
45
  # Data preparation
46
  x_data = plot_df["date_only"]
47
  y_data = pd.to_numeric(plot_df[kpi], errors="coerce")
48
-
49
  # Add Trace
50
  fig.add_trace(
51
  go.Scatter(
@@ -55,7 +57,8 @@ def build_drilldown_plot(
55
  name=kpi,
56
  legendgroup=kpi, # Allows grouping logic if needed
57
  ),
58
- row=1, col=1
 
59
  )
60
 
61
  # Add SLA line if available
@@ -65,15 +68,23 @@ def build_drilldown_plot(
65
  # Note: This implies rules_df is filtered for the correct RAT
66
  rule = rules_df[rules_df["KPI"] == kpi]
67
  if not rule.empty:
 
 
 
 
 
 
 
68
  sla_val = pd.to_numeric(rule.iloc[0].get("sla"), errors="coerce")
69
  if pd.notna(sla_val):
70
  fig.add_hline(
71
- y=sla_val,
72
- line_dash="dot",
73
- line_color="red",
74
  annotation_text=f"SLA {kpi}",
75
  annotation_position="bottom right",
76
- row=1, col=1
 
77
  )
78
  except Exception:
79
  pass
@@ -90,16 +101,17 @@ def build_drilldown_plot(
90
  mode="markers",
91
  opacity=0,
92
  showlegend=False,
93
- hoverinfo="skip"
94
  ),
95
- row=2, col=1
 
96
  )
97
-
98
  fig.update_layout(
99
  template="plotly_white",
100
  height=500,
101
  margin=dict(l=50, r=50, t=50, b=50),
102
- legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1)
103
  )
104
-
105
  return fig
 
2
  import plotly.graph_objects as go
3
  from plotly.subplots import make_subplots
4
 
5
+
6
  def build_drilldown_plot(
7
  df: pd.DataFrame,
8
  kpis: list[str],
 
26
  # Sort by date
27
  # Sort by date
28
  plot_df = df.sort_values("date_only").copy()
29
+
30
  title_text = f"{rat} - Site {site_code}"
31
  # If single KPI, be explicit in title
32
  if len(valid_kpis) == 1:
 
35
  # Subplot for Timeline (Streak) - Row 2
36
  # Row 1: Main Trend
37
  fig = make_subplots(
38
+ rows=2,
39
+ cols=1,
40
+ shared_xaxes=True,
41
  vertical_spacing=0.05,
42
  row_heights=[0.85, 0.15],
43
+ subplot_titles=(title_text, "Status Check"),
44
  )
45
 
46
  for kpi in valid_kpis:
47
  # Data preparation
48
  x_data = plot_df["date_only"]
49
  y_data = pd.to_numeric(plot_df[kpi], errors="coerce")
50
+
51
  # Add Trace
52
  fig.add_trace(
53
  go.Scatter(
 
57
  name=kpi,
58
  legendgroup=kpi, # Allows grouping logic if needed
59
  ),
60
+ row=1,
61
+ col=1,
62
  )
63
 
64
  # Add SLA line if available
 
68
  # Note: This implies rules_df is filtered for the correct RAT
69
  rule = rules_df[rules_df["KPI"] == kpi]
70
  if not rule.empty:
71
+ pol = (
72
+ str(rule.iloc[0].get("policy", "enforce") or "enforce")
73
+ .strip()
74
+ .lower()
75
+ )
76
+ if pol == "notify":
77
+ continue
78
  sla_val = pd.to_numeric(rule.iloc[0].get("sla"), errors="coerce")
79
  if pd.notna(sla_val):
80
  fig.add_hline(
81
+ y=sla_val,
82
+ line_dash="dot",
83
+ line_color="red",
84
  annotation_text=f"SLA {kpi}",
85
  annotation_position="bottom right",
86
+ row=1,
87
+ col=1,
88
  )
89
  except Exception:
90
  pass
 
101
  mode="markers",
102
  opacity=0,
103
  showlegend=False,
104
+ hoverinfo="skip",
105
  ),
106
+ row=2,
107
+ col=1,
108
  )
109
+
110
  fig.update_layout(
111
  template="plotly_white",
112
  height=500,
113
  margin=dict(l=50, r=50, t=50, b=50),
114
+ legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
115
  )
116
+
117
  return fig
panel_app/kpi_health_check_panel.py CHANGED
@@ -46,7 +46,11 @@ from process_kpi.kpi_health_check.profiles import (
46
  load_profile,
47
  save_profile,
48
  )
49
- from process_kpi.kpi_health_check.rules import infer_kpi_direction, infer_kpi_sla
 
 
 
 
50
 
51
  pn.extension("plotly", "tabulator")
52
 
@@ -472,6 +476,33 @@ delta_table = pn.widgets.Tabulator(
472
  map_show_site_codes = pn.widgets.Checkbox(name="Show site codes", value=True)
473
  map_max_labels = pn.widgets.IntInput(name="Max labels", value=200, step=50)
474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475
  site_select = pn.widgets.AutocompleteInput(
476
  name="Select a site (Type to search)",
477
  options={},
@@ -536,7 +567,9 @@ function format(cell, formatterParams, onRendered){
536
  var bg = '#455a64';
537
  if(s === 'PERSISTENT_DEGRADED') bg = '#b71c1c';
538
  else if(s === 'DEGRADED') bg = '#e53935';
 
539
  else if(s === 'RESOLVED') bg = '#2e7d32';
 
540
  else if(s === 'NO_DATA') bg = '#616161';
541
  else if(s === 'OK') bg = '#1565c0';
542
 
@@ -552,9 +585,15 @@ function format(cell, formatterParams, onRendered){
552
  } else if(s === 'DEGRADED'){
553
  rowEl.style.borderLeft = '6px solid #e53935';
554
  rowEl.style.backgroundColor = '#fff5f5';
 
 
 
555
  } else if(s === 'RESOLVED'){
556
  rowEl.style.borderLeft = '6px solid #2e7d32';
557
  rowEl.style.backgroundColor = '#f1f8e9';
 
 
 
558
  } else if(s === 'NO_DATA'){
559
  rowEl.style.borderLeft = '6px solid #616161';
560
  rowEl.style.backgroundColor = '#f5f5f5';
@@ -597,6 +636,10 @@ hist_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG
597
  map_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
598
  map_message = pn.pane.Alert("", alert_type="info", visible=False)
599
 
 
 
 
 
600
  corr_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
601
  corr_message = pn.pane.Alert("", alert_type="info", visible=False)
602
 
@@ -624,6 +667,45 @@ def _coords_by_site() -> pd.DataFrame:
624
  return out
625
 
626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  def _map_df() -> pd.DataFrame:
628
  base = (
629
  current_multirat_df
@@ -653,6 +735,77 @@ def _map_df() -> pd.DataFrame:
653
  out["City"] = out["City_coord"]
654
  if "City" in out.columns and "City_coord" in out.columns:
655
  out["City"] = out["City"].where(out["City"].notna(), out["City_coord"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656
  return out
657
 
658
 
@@ -687,11 +840,31 @@ def _build_map_fig(df_map: pd.DataFrame) -> go.Figure | None:
687
  size = (tmp["_score"].clip(lower=0) + 1.0).pow(0.5) * 6.0
688
  tmp["_size"] = size.clip(lower=6, upper=28)
689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
690
  hover_cols = [
691
  c
692
  for c in [
693
  "site_code",
694
  "City",
 
695
  score_col,
696
  "impacted_rats",
697
  "persistent_kpis_total",
@@ -699,16 +872,19 @@ def _build_map_fig(df_map: pd.DataFrame) -> go.Figure | None:
699
  ]
700
  if c and c in tmp.columns
701
  ]
 
702
  fig = px.scatter_mapbox(
703
  tmp,
704
  lat="Latitude",
705
  lon="Longitude",
706
- color="_score" if score_col is not None else None,
707
  size="_size",
708
  size_max=28,
709
  zoom=4,
710
  hover_data=hover_cols,
711
  custom_data=["site_code"],
 
 
712
  )
713
 
714
  try:
@@ -740,8 +916,49 @@ def _build_map_fig(df_map: pd.DataFrame) -> go.Figure | None:
740
  margin=dict(l=10, r=10, t=10, b=10),
741
  height=700,
742
  )
743
- if score_col is not None:
744
- fig.update_layout(coloraxis_colorbar=dict(title="Score"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745
  return fig
746
 
747
 
@@ -755,6 +972,14 @@ def _refresh_map_view(event=None) -> None:
755
  map_message.visible = True
756
  return
757
 
 
 
 
 
 
 
 
 
758
  fig = _build_map_fig(df_map)
759
  if fig is None:
760
  map_pane.object = None
@@ -769,6 +994,125 @@ def _refresh_map_view(event=None) -> None:
769
  map_pane.object = fig
770
 
771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
772
  def _on_map_click(event) -> None:
773
  try:
774
  cd = event.new
@@ -1457,9 +1801,11 @@ def _update_site_view(event=None) -> None:
1457
  and "KPI" in site_df.columns
1458
  ):
1459
  sev = {
1460
- "PERSISTENT_DEGRADED": 3,
1461
- "DEGRADED": 2,
 
1462
  "RESOLVED": 1,
 
1463
  "OK": 0,
1464
  "NO_DATA": -1,
1465
  }
@@ -1739,12 +2085,15 @@ def _build_site_heatmap(
1739
  continue
1740
  rule = _infer_rule_row(rules_df, rat, kpi)
1741
  direction = str(rule.get("direction", "higher_is_better"))
 
1742
  sla_raw = rule.get("sla", None)
1743
  try:
1744
  sla_val = float(sla_raw) if pd.notna(sla_raw) else None
1745
  except Exception: # noqa: BLE001
1746
  sla_val = None
1747
 
 
 
1748
  s = site_daily[["date_only", kpi]].dropna(subset=[kpi])
1749
  baseline_mask = (s["date_only"] >= baseline_start) & (
1750
  s["date_only"] <= baseline_end
@@ -1766,7 +2115,7 @@ def _build_site_heatmap(
1766
  baseline_val,
1767
  direction,
1768
  float(rel_threshold_pct.value),
1769
- sla_val,
1770
  )
1771
  row_z.append(1 if bad else 0)
1772
  row_h.append(
@@ -2449,7 +2798,7 @@ def _apply_preset(event=None) -> None:
2449
  return
2450
 
2451
  key = ["RAT", "KPI"]
2452
- upd_cols = [c for c in ["direction", "sla"] if c in preset_df.columns]
2453
  preset_df2 = preset_df[key + upd_cols].copy()
2454
 
2455
  merged = pd.merge(cur, preset_df2, on=key, how="left", suffixes=("", "_preset"))
@@ -2550,6 +2899,17 @@ def load_datasets(event=None) -> None:
2550
  delta_table.value = pd.DataFrame()
2551
  map_pane.object = None
2552
  map_message.visible = False
 
 
 
 
 
 
 
 
 
 
 
2553
  site_kpi_table.value = pd.DataFrame()
2554
  trend_plot_pane.object = None
2555
  heatmap_plot_pane.object = None
@@ -2597,13 +2957,14 @@ def load_datasets(event=None) -> None:
2597
  )
2598
 
2599
  for kpi in kpi_cols:
2600
- direction = infer_kpi_direction(kpi)
2601
  rules_rows.append(
2602
  {
2603
  "RAT": rat,
2604
  "KPI": kpi,
2605
  "direction": direction,
2606
- "sla": infer_kpi_sla(kpi, direction),
 
2607
  }
2608
  )
2609
 
@@ -3026,6 +3387,11 @@ snapshot_download.callback = _snapshot_download_callback
3026
 
3027
  map_pane.param.watch(_on_map_click, "click_data")
3028
 
 
 
 
 
 
3029
  _refresh_presets()
3030
  _refresh_profiles()
3031
  _refresh_complaint_sites()
@@ -3073,6 +3439,11 @@ corr_window_select.param.watch(_on_drilldown_change, "value")
3073
  map_show_site_codes.param.watch(lambda e: _refresh_map_view(), "value")
3074
  map_max_labels.param.watch(lambda e: _refresh_map_view(), "value")
3075
 
 
 
 
 
 
3076
  analysis_range.param.watch(_on_drilldown_params_change, "value")
3077
  baseline_days.param.watch(_on_drilldown_params_change, "value")
3078
  recent_days.param.watch(_on_drilldown_params_change, "value")
@@ -3380,7 +3751,17 @@ _tab_delta = pn.Column(
3380
  _tab_map = pn.Column(
3381
  pn.pane.Markdown("## Map"),
3382
  map_message,
3383
- pn.Row(map_show_site_codes, map_max_labels),
 
 
 
 
 
 
 
 
 
 
3384
  pn.Column(map_pane, sizing_mode="stretch_both", min_height=700),
3385
  sizing_mode="stretch_both",
3386
  )
 
46
  load_profile,
47
  save_profile,
48
  )
49
+ from process_kpi.kpi_health_check.rules import (
50
+ infer_kpi_direction,
51
+ infer_kpi_policy,
52
+ infer_kpi_sla,
53
+ )
54
 
55
  pn.extension("plotly", "tabulator")
56
 
 
476
  map_show_site_codes = pn.widgets.Checkbox(name="Show site codes", value=True)
477
  map_max_labels = pn.widgets.IntInput(name="Max labels", value=200, step=50)
478
 
479
+ map_top_n = pn.widgets.IntInput(name="Top N sites (0=All)", value=0, step=100)
480
+ map_impacted_rat = pn.widgets.Select(
481
+ name="RAT impacted", options=["Any", "2G", "3G", "LTE"], value="Any"
482
+ )
483
+ map_status_filter = pn.widgets.Select(
484
+ name="Status",
485
+ options=[
486
+ "Any",
487
+ "PERSISTENT_DEGRADED",
488
+ "DEGRADED",
489
+ "NOTIFY",
490
+ "RESOLVED",
491
+ "NOTIFY_RESOLVED",
492
+ "OK",
493
+ "NO_DATA",
494
+ ],
495
+ value="Any",
496
+ )
497
+ map_auto_fit = pn.widgets.Checkbox(name="Auto fit", value=True)
498
+ map_fit_button = pn.widgets.Button(name="Fit to points", button_type="default")
499
+
500
+ map_search_input = pn.widgets.TextInput(name="Search (site_code or City)", value="")
501
+ map_search_go = pn.widgets.Button(name="Go", button_type="primary")
502
+ map_search_clear = pn.widgets.Button(name="Clear", button_type="default")
503
+ map_search_results = pn.widgets.Select(name="Matches", options={}, value=None)
504
+ map_search_results.visible = False
505
+
506
  site_select = pn.widgets.AutocompleteInput(
507
  name="Select a site (Type to search)",
508
  options={},
 
567
  var bg = '#455a64';
568
  if(s === 'PERSISTENT_DEGRADED') bg = '#b71c1c';
569
  else if(s === 'DEGRADED') bg = '#e53935';
570
+ else if(s === 'NOTIFY') bg = '#f9a825';
571
  else if(s === 'RESOLVED') bg = '#2e7d32';
572
+ else if(s === 'NOTIFY_RESOLVED') bg = '#2e7d32';
573
  else if(s === 'NO_DATA') bg = '#616161';
574
  else if(s === 'OK') bg = '#1565c0';
575
 
 
585
  } else if(s === 'DEGRADED'){
586
  rowEl.style.borderLeft = '6px solid #e53935';
587
  rowEl.style.backgroundColor = '#fff5f5';
588
+ } else if(s === 'NOTIFY'){
589
+ rowEl.style.borderLeft = '6px solid #f9a825';
590
+ rowEl.style.backgroundColor = '#fff8e1';
591
  } else if(s === 'RESOLVED'){
592
  rowEl.style.borderLeft = '6px solid #2e7d32';
593
  rowEl.style.backgroundColor = '#f1f8e9';
594
+ } else if(s === 'NOTIFY_RESOLVED'){
595
+ rowEl.style.borderLeft = '6px solid #2e7d32';
596
+ rowEl.style.backgroundColor = '#f1f8e9';
597
  } else if(s === 'NO_DATA'){
598
  rowEl.style.borderLeft = '6px solid #616161';
599
  rowEl.style.backgroundColor = '#f5f5f5';
 
636
  map_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
637
  map_message = pn.pane.Alert("", alert_type="info", visible=False)
638
 
639
+ _map_search_city: str = ""
640
+ _map_center_override: dict | None = None
641
+ _map_force_fit: bool = False
642
+
643
  corr_plot_pane = pn.pane.Plotly(sizing_mode="stretch_both", config=PLOTLY_CONFIG)
644
  corr_message = pn.pane.Alert("", alert_type="info", visible=False)
645
 
 
667
  return out
668
 
669
 
670
+ def _dominant_status_by_site() -> pd.DataFrame:
671
+ s = (
672
+ current_status_df
673
+ if isinstance(current_status_df, pd.DataFrame)
674
+ else pd.DataFrame()
675
+ )
676
+ if (
677
+ s is None
678
+ or s.empty
679
+ or "site_code" not in s.columns
680
+ or "status" not in s.columns
681
+ ):
682
+ return pd.DataFrame(columns=["site_code", "dominant_status"])
683
+
684
+ tmp = s[["site_code", "status"]].copy()
685
+ tmp["site_code"] = pd.to_numeric(tmp["site_code"], errors="coerce")
686
+ tmp = tmp.dropna(subset=["site_code"]).copy()
687
+ if tmp.empty:
688
+ return pd.DataFrame(columns=["site_code", "dominant_status"])
689
+ tmp["site_code"] = tmp["site_code"].astype(int)
690
+ tmp["status"] = tmp["status"].astype(str).str.strip().str.upper()
691
+
692
+ prio = {
693
+ "PERSISTENT_DEGRADED": 6,
694
+ "DEGRADED": 5,
695
+ "NOTIFY": 4,
696
+ "RESOLVED": 3,
697
+ "NOTIFY_RESOLVED": 2,
698
+ "OK": 1,
699
+ "NO_DATA": 0,
700
+ }
701
+ tmp["_prio"] = tmp["status"].map(prio).fillna(2).astype(int)
702
+ idx = tmp.groupby("site_code")["_prio"].idxmax()
703
+ out = tmp.loc[idx, ["site_code", "status"]].rename(
704
+ columns={"status": "dominant_status"}
705
+ )
706
+ return out
707
+
708
+
709
  def _map_df() -> pd.DataFrame:
710
  base = (
711
  current_multirat_df
 
735
  out["City"] = out["City_coord"]
736
  if "City" in out.columns and "City_coord" in out.columns:
737
  out["City"] = out["City"].where(out["City"].notna(), out["City_coord"])
738
+
739
+ dom = _dominant_status_by_site()
740
+ if dom is not None and not dom.empty and "site_code" in dom.columns:
741
+ out = pd.merge(out, dom, on="site_code", how="left")
742
+ if "dominant_status" not in out.columns:
743
+ out["dominant_status"] = "NO_DATA"
744
+ out["dominant_status"] = (
745
+ out["dominant_status"]
746
+ .astype(str)
747
+ .str.strip()
748
+ .str.upper()
749
+ .replace({"": "NO_DATA"})
750
+ )
751
+ out["dominant_status"] = out["dominant_status"].where(
752
+ out["dominant_status"].notna(), "NO_DATA"
753
+ )
754
+ return out
755
+
756
+
757
+ def _apply_map_filters(df_map: pd.DataFrame) -> pd.DataFrame:
758
+ if df_map is None or df_map.empty:
759
+ return pd.DataFrame()
760
+ out = df_map.copy()
761
+
762
+ if _map_search_city:
763
+ if "City" in out.columns:
764
+ q = str(_map_search_city).strip().lower()
765
+ out = out[
766
+ out["City"].astype(str).str.lower().str.contains(q, na=False)
767
+ ].copy()
768
+ else:
769
+ out = out.iloc[0:0].copy()
770
+
771
+ rat_imp = str(map_impacted_rat.value or "Any")
772
+ if rat_imp and rat_imp != "Any":
773
+ pcol = f"persistent_{rat_imp}"
774
+ dcol = f"degraded_{rat_imp}"
775
+ if pcol in out.columns or dcol in out.columns:
776
+ p = (
777
+ pd.to_numeric(out[pcol], errors="coerce").fillna(0)
778
+ if pcol in out.columns
779
+ else 0
780
+ )
781
+ d = (
782
+ pd.to_numeric(out[dcol], errors="coerce").fillna(0)
783
+ if dcol in out.columns
784
+ else 0
785
+ )
786
+ out = out[(p.astype(float) > 0) | (d.astype(float) > 0)].copy()
787
+ else:
788
+ out = out.iloc[0:0].copy()
789
+
790
+ st = str(map_status_filter.value or "Any")
791
+ if st and st != "Any" and "dominant_status" in out.columns:
792
+ out = out[out["dominant_status"].astype(str).str.upper() == st].copy()
793
+
794
+ score_col = (
795
+ "criticality_score_weighted"
796
+ if "criticality_score_weighted" in out.columns
797
+ else "criticality_score"
798
+ )
799
+ if score_col in out.columns:
800
+ out["_score"] = (
801
+ pd.to_numeric(out[score_col], errors="coerce").fillna(0).astype(float)
802
+ )
803
+ else:
804
+ out["_score"] = 0.0
805
+
806
+ topn = _coerce_int(map_top_n.value)
807
+ if topn is not None and int(topn) > 0:
808
+ out = out.sort_values(by=["_score"], ascending=False).head(int(topn)).copy()
809
  return out
810
 
811
 
 
840
  size = (tmp["_score"].clip(lower=0) + 1.0).pow(0.5) * 6.0
841
  tmp["_size"] = size.clip(lower=6, upper=28)
842
 
843
+ dom_order = [
844
+ "PERSISTENT_DEGRADED",
845
+ "DEGRADED",
846
+ "NOTIFY",
847
+ "RESOLVED",
848
+ "NOTIFY_RESOLVED",
849
+ "OK",
850
+ "NO_DATA",
851
+ ]
852
+ color_map = {
853
+ "PERSISTENT_DEGRADED": "#8b0000",
854
+ "DEGRADED": "#ff6f00",
855
+ "NOTIFY": "#f9a825",
856
+ "RESOLVED": "#2e7d32",
857
+ "NOTIFY_RESOLVED": "#2e7d32",
858
+ "OK": "#1565c0",
859
+ "NO_DATA": "#616161",
860
+ }
861
+
862
  hover_cols = [
863
  c
864
  for c in [
865
  "site_code",
866
  "City",
867
+ "dominant_status",
868
  score_col,
869
  "impacted_rats",
870
  "persistent_kpis_total",
 
872
  ]
873
  if c and c in tmp.columns
874
  ]
875
+
876
  fig = px.scatter_mapbox(
877
  tmp,
878
  lat="Latitude",
879
  lon="Longitude",
880
+ color="dominant_status" if "dominant_status" in tmp.columns else None,
881
  size="_size",
882
  size_max=28,
883
  zoom=4,
884
  hover_data=hover_cols,
885
  custom_data=["site_code"],
886
+ category_orders={"dominant_status": dom_order},
887
+ color_discrete_map=color_map,
888
  )
889
 
890
  try:
 
916
  margin=dict(l=10, r=10, t=10, b=10),
917
  height=700,
918
  )
919
+
920
+ global _map_center_override, _map_force_fit
921
+ do_fit = bool(map_auto_fit.value) or bool(_map_force_fit)
922
+ if (
923
+ _map_center_override
924
+ and "lat" in _map_center_override
925
+ and "lon" in _map_center_override
926
+ ):
927
+ try:
928
+ fig.update_layout(
929
+ mapbox_center=dict(
930
+ lat=float(_map_center_override["lat"]),
931
+ lon=float(_map_center_override["lon"]),
932
+ ),
933
+ mapbox_zoom=float(_map_center_override.get("zoom", 9.0)),
934
+ )
935
+ do_fit = False
936
+ except Exception: # noqa: BLE001
937
+ pass
938
+
939
+ if do_fit:
940
+ try:
941
+ lat_min = float(tmp["Latitude"].min())
942
+ lat_max = float(tmp["Latitude"].max())
943
+ lon_min = float(tmp["Longitude"].min())
944
+ lon_max = float(tmp["Longitude"].max())
945
+
946
+ lat_pad = max(0.01, abs(lat_max - lat_min) * 0.1)
947
+ lon_pad = max(0.01, abs(lon_max - lon_min) * 0.1)
948
+ fig.update_layout(
949
+ mapbox=dict(
950
+ bounds=dict(
951
+ west=lon_min - lon_pad,
952
+ east=lon_max + lon_pad,
953
+ south=lat_min - lat_pad,
954
+ north=lat_max + lat_pad,
955
+ )
956
+ )
957
+ )
958
+ except Exception: # noqa: BLE001
959
+ pass
960
+
961
+ _map_force_fit = False
962
  return fig
963
 
964
 
 
972
  map_message.visible = True
973
  return
974
 
975
+ df_map = _apply_map_filters(df_map)
976
+ if df_map is None or df_map.empty:
977
+ map_pane.object = None
978
+ map_message.alert_type = "warning"
979
+ map_message.object = "No sites to display after Map filters/search."
980
+ map_message.visible = True
981
+ return
982
+
983
  fig = _build_map_fig(df_map)
984
  if fig is None:
985
  map_pane.object = None
 
994
  map_pane.object = fig
995
 
996
 
997
+ def _on_map_fit_click(event=None) -> None:
998
+ global _map_force_fit, _map_center_override
999
+ _map_center_override = None
1000
+ _map_force_fit = True
1001
+ _refresh_map_view()
1002
+
1003
+
1004
+ def _on_map_search(event=None) -> None:
1005
+ global _map_search_city, _map_center_override
1006
+ q = str(map_search_input.value or "").strip()
1007
+
1008
+ _map_search_city = ""
1009
+ _map_center_override = None
1010
+ map_search_results.options = {}
1011
+ map_search_results.value = None
1012
+ map_search_results.visible = False
1013
+
1014
+ if not q:
1015
+ _refresh_map_view()
1016
+ return
1017
+
1018
+ code = _coerce_int(q)
1019
+ if code is not None:
1020
+ try:
1021
+ df_map = _map_df()
1022
+ if isinstance(df_map, pd.DataFrame) and not df_map.empty:
1023
+ m = df_map[df_map["site_code"] == int(code)]
1024
+ if not m.empty and "Latitude" in m.columns and "Longitude" in m.columns:
1025
+ lat = pd.to_numeric(m.iloc[0].get("Latitude"), errors="coerce")
1026
+ lon = pd.to_numeric(m.iloc[0].get("Longitude"), errors="coerce")
1027
+ if pd.notna(lat) and pd.notna(lon):
1028
+ _map_center_override = {
1029
+ "lat": float(lat),
1030
+ "lon": float(lon),
1031
+ "zoom": 10.5,
1032
+ }
1033
+ except Exception: # noqa: BLE001
1034
+ _map_center_override = None
1035
+
1036
+ _apply_drilldown_selection(site_code=int(code))
1037
+ _refresh_map_view()
1038
+ return
1039
+
1040
+ _map_search_city = q
1041
+ try:
1042
+ df_map = _map_df()
1043
+ df_map = _apply_map_filters(df_map)
1044
+ if (
1045
+ isinstance(df_map, pd.DataFrame)
1046
+ and not df_map.empty
1047
+ and "City" in df_map.columns
1048
+ ):
1049
+ m = df_map[
1050
+ df_map["City"].astype(str).str.lower().str.contains(q.lower(), na=False)
1051
+ ].copy()
1052
+ if not m.empty:
1053
+ score_col = (
1054
+ "criticality_score_weighted"
1055
+ if "criticality_score_weighted" in m.columns
1056
+ else "criticality_score"
1057
+ )
1058
+ if score_col in m.columns:
1059
+ m["_score"] = pd.to_numeric(m[score_col], errors="coerce").fillna(0)
1060
+ m = m.sort_values(by=["_score"], ascending=False)
1061
+ m = m.head(20)
1062
+ opts = {}
1063
+ for _, r in m.iterrows():
1064
+ sc = _coerce_int(r.get("site_code"))
1065
+ if sc is None:
1066
+ continue
1067
+ city = str(r.get("City") or "").strip()
1068
+ label = f"{int(sc)} - {city}" if city else f"{int(sc)}"
1069
+ opts[label] = int(sc)
1070
+ map_search_results.options = opts
1071
+ map_search_results.visible = bool(opts)
1072
+ except Exception: # noqa: BLE001
1073
+ map_search_results.options = {}
1074
+ map_search_results.visible = False
1075
+
1076
+ _refresh_map_view()
1077
+
1078
+
1079
+ def _on_map_search_pick(event=None) -> None:
1080
+ global _map_center_override
1081
+ code = _coerce_int(getattr(event, "new", None))
1082
+ if code is None:
1083
+ return
1084
+
1085
+ try:
1086
+ df_map = _map_df()
1087
+ if isinstance(df_map, pd.DataFrame) and not df_map.empty:
1088
+ m = df_map[df_map["site_code"] == int(code)]
1089
+ if not m.empty and "Latitude" in m.columns and "Longitude" in m.columns:
1090
+ lat = pd.to_numeric(m.iloc[0].get("Latitude"), errors="coerce")
1091
+ lon = pd.to_numeric(m.iloc[0].get("Longitude"), errors="coerce")
1092
+ if pd.notna(lat) and pd.notna(lon):
1093
+ _map_center_override = {
1094
+ "lat": float(lat),
1095
+ "lon": float(lon),
1096
+ "zoom": 10.5,
1097
+ }
1098
+ except Exception: # noqa: BLE001
1099
+ _map_center_override = None
1100
+
1101
+ _apply_drilldown_selection(site_code=int(code))
1102
+ _refresh_map_view()
1103
+
1104
+
1105
+ def _on_map_clear_search(event=None) -> None:
1106
+ global _map_search_city, _map_center_override
1107
+ _map_search_city = ""
1108
+ _map_center_override = None
1109
+ map_search_input.value = ""
1110
+ map_search_results.options = {}
1111
+ map_search_results.value = None
1112
+ map_search_results.visible = False
1113
+ _refresh_map_view()
1114
+
1115
+
1116
  def _on_map_click(event) -> None:
1117
  try:
1118
  cd = event.new
 
1801
  and "KPI" in site_df.columns
1802
  ):
1803
  sev = {
1804
+ "PERSISTENT_DEGRADED": 4,
1805
+ "DEGRADED": 3,
1806
+ "NOTIFY": 2,
1807
  "RESOLVED": 1,
1808
+ "NOTIFY_RESOLVED": 0,
1809
  "OK": 0,
1810
  "NO_DATA": -1,
1811
  }
 
2085
  continue
2086
  rule = _infer_rule_row(rules_df, rat, kpi)
2087
  direction = str(rule.get("direction", "higher_is_better"))
2088
+ policy = str(rule.get("policy", "enforce") or "enforce").strip().lower()
2089
  sla_raw = rule.get("sla", None)
2090
  try:
2091
  sla_val = float(sla_raw) if pd.notna(sla_raw) else None
2092
  except Exception: # noqa: BLE001
2093
  sla_val = None
2094
 
2095
+ sla_eval = None if policy == "notify" else sla_val
2096
+
2097
  s = site_daily[["date_only", kpi]].dropna(subset=[kpi])
2098
  baseline_mask = (s["date_only"] >= baseline_start) & (
2099
  s["date_only"] <= baseline_end
 
2115
  baseline_val,
2116
  direction,
2117
  float(rel_threshold_pct.value),
2118
+ sla_eval,
2119
  )
2120
  row_z.append(1 if bad else 0)
2121
  row_h.append(
 
2798
  return
2799
 
2800
  key = ["RAT", "KPI"]
2801
+ upd_cols = [c for c in ["direction", "sla", "policy"] if c in preset_df.columns]
2802
  preset_df2 = preset_df[key + upd_cols].copy()
2803
 
2804
  merged = pd.merge(cur, preset_df2, on=key, how="left", suffixes=("", "_preset"))
 
2899
  delta_table.value = pd.DataFrame()
2900
  map_pane.object = None
2901
  map_message.visible = False
2902
+ try:
2903
+ global _map_search_city, _map_center_override, _map_force_fit
2904
+ _map_search_city = ""
2905
+ _map_center_override = None
2906
+ _map_force_fit = False
2907
+ map_search_input.value = ""
2908
+ map_search_results.options = {}
2909
+ map_search_results.value = None
2910
+ map_search_results.visible = False
2911
+ except Exception: # noqa: BLE001
2912
+ pass
2913
  site_kpi_table.value = pd.DataFrame()
2914
  trend_plot_pane.object = None
2915
  heatmap_plot_pane.object = None
 
2957
  )
2958
 
2959
  for kpi in kpi_cols:
2960
+ direction = infer_kpi_direction(kpi, rat)
2961
  rules_rows.append(
2962
  {
2963
  "RAT": rat,
2964
  "KPI": kpi,
2965
  "direction": direction,
2966
+ "sla": infer_kpi_sla(kpi, direction, rat),
2967
+ "policy": infer_kpi_policy(kpi, rat),
2968
  }
2969
  )
2970
 
 
3387
 
3388
  map_pane.param.watch(_on_map_click, "click_data")
3389
 
3390
+ map_fit_button.on_click(_on_map_fit_click)
3391
+ map_search_go.on_click(_on_map_search)
3392
+ map_search_clear.on_click(_on_map_clear_search)
3393
+ map_search_results.param.watch(_on_map_search_pick, "value")
3394
+
3395
  _refresh_presets()
3396
  _refresh_profiles()
3397
  _refresh_complaint_sites()
 
3439
  map_show_site_codes.param.watch(lambda e: _refresh_map_view(), "value")
3440
  map_max_labels.param.watch(lambda e: _refresh_map_view(), "value")
3441
 
3442
+ map_top_n.param.watch(lambda e: _refresh_map_view(), "value")
3443
+ map_impacted_rat.param.watch(lambda e: _refresh_map_view(), "value")
3444
+ map_status_filter.param.watch(lambda e: _refresh_map_view(), "value")
3445
+ map_auto_fit.param.watch(lambda e: _refresh_map_view(), "value")
3446
+
3447
  analysis_range.param.watch(_on_drilldown_params_change, "value")
3448
  baseline_days.param.watch(_on_drilldown_params_change, "value")
3449
  recent_days.param.watch(_on_drilldown_params_change, "value")
 
3751
  _tab_map = pn.Column(
3752
  pn.pane.Markdown("## Map"),
3753
  map_message,
3754
+ pn.Row(
3755
+ map_search_input, map_search_go, map_search_clear, map_fit_button, map_auto_fit
3756
+ ),
3757
+ pn.Row(
3758
+ map_top_n,
3759
+ map_impacted_rat,
3760
+ map_status_filter,
3761
+ map_show_site_codes,
3762
+ map_max_labels,
3763
+ ),
3764
+ pn.Row(map_search_results),
3765
  pn.Column(map_pane, sizing_mode="stretch_both", min_height=700),
3766
  sizing_mode="stretch_both",
3767
  )
process_kpi/kpi_health_check/engine.py CHANGED
@@ -30,8 +30,8 @@ def is_bad(
30
 
31
  thr = float(rel_threshold_pct) / 100.0
32
  if direction == "higher_is_better":
33
- return bad or (value < baseline * (1.0 - thr))
34
- return bad or (value > baseline * (1.0 + thr))
35
 
36
 
37
  def max_consecutive_days(dates: list[date]) -> int:
@@ -83,12 +83,15 @@ def evaluate_health_check(
83
  for kpi in kpis:
84
  rule = rat_rules[rat_rules["KPI"] == kpi].iloc[0]
85
  direction = str(rule.get("direction", "higher_is_better"))
 
86
  sla = rule.get("sla", np.nan)
87
  try:
88
  sla_val = float(sla) if pd.notna(sla) else None
89
  except Exception:
90
  sla_val = None
91
 
 
 
92
  s = g_site[["date_only", kpi]].dropna(subset=[kpi])
93
  if s.empty:
94
  rows.append(
@@ -125,7 +128,7 @@ def evaluate_health_check(
125
  float(baseline) if pd.notna(baseline) else None,
126
  direction,
127
  rel_threshold_pct,
128
- sla_val,
129
  ):
130
  bad_dates.append(d)
131
 
@@ -137,7 +140,7 @@ def evaluate_health_check(
137
  float(baseline) if pd.notna(baseline) else None,
138
  direction,
139
  rel_threshold_pct,
140
- sla_val,
141
  )
142
 
143
  is_bad_current = is_bad_recent
@@ -149,19 +152,27 @@ def evaluate_health_check(
149
  float(baseline) if pd.notna(baseline) else None,
150
  direction,
151
  rel_threshold_pct,
152
- sla_val,
153
  )
154
 
155
  had_bad_recent = (len(bad_dates) > 0) or bool(is_bad_recent)
156
 
157
- if is_bad_current and persistent:
158
- status = "PERSISTENT_DEGRADED"
159
- elif is_bad_current:
160
- status = "DEGRADED"
161
- elif had_bad_recent:
162
- status = "RESOLVED"
 
163
  else:
164
- status = "OK"
 
 
 
 
 
 
 
165
 
166
  rows.append(
167
  {
@@ -171,6 +182,7 @@ def evaluate_health_check(
171
  "KPI": kpi,
172
  "direction": direction,
173
  "sla": sla_val,
 
174
  "baseline_median": baseline,
175
  "recent_median": recent,
176
  "bad_days_recent": len(bad_dates),
 
30
 
31
  thr = float(rel_threshold_pct) / 100.0
32
  if direction == "higher_is_better":
33
+ return bad or (value < baseline - abs(baseline) * thr)
34
+ return bad or (value > baseline + abs(baseline) * thr)
35
 
36
 
37
  def max_consecutive_days(dates: list[date]) -> int:
 
83
  for kpi in kpis:
84
  rule = rat_rules[rat_rules["KPI"] == kpi].iloc[0]
85
  direction = str(rule.get("direction", "higher_is_better"))
86
+ policy = str(rule.get("policy", "enforce") or "enforce").strip().lower()
87
  sla = rule.get("sla", np.nan)
88
  try:
89
  sla_val = float(sla) if pd.notna(sla) else None
90
  except Exception:
91
  sla_val = None
92
 
93
+ sla_eval = None if policy == "notify" else sla_val
94
+
95
  s = g_site[["date_only", kpi]].dropna(subset=[kpi])
96
  if s.empty:
97
  rows.append(
 
128
  float(baseline) if pd.notna(baseline) else None,
129
  direction,
130
  rel_threshold_pct,
131
+ sla_eval,
132
  ):
133
  bad_dates.append(d)
134
 
 
140
  float(baseline) if pd.notna(baseline) else None,
141
  direction,
142
  rel_threshold_pct,
143
+ sla_eval,
144
  )
145
 
146
  is_bad_current = is_bad_recent
 
152
  float(baseline) if pd.notna(baseline) else None,
153
  direction,
154
  rel_threshold_pct,
155
+ sla_eval,
156
  )
157
 
158
  had_bad_recent = (len(bad_dates) > 0) or bool(is_bad_recent)
159
 
160
+ if policy == "notify":
161
+ if is_bad_current:
162
+ status = "NOTIFY"
163
+ elif had_bad_recent:
164
+ status = "NOTIFY_RESOLVED"
165
+ else:
166
+ status = "OK"
167
  else:
168
+ if is_bad_current and persistent:
169
+ status = "PERSISTENT_DEGRADED"
170
+ elif is_bad_current:
171
+ status = "DEGRADED"
172
+ elif had_bad_recent:
173
+ status = "RESOLVED"
174
+ else:
175
+ status = "OK"
176
 
177
  rows.append(
178
  {
 
182
  "KPI": kpi,
183
  "direction": direction,
184
  "sla": sla_val,
185
+ "policy": policy,
186
  "baseline_median": baseline,
187
  "recent_median": recent,
188
  "bad_days_recent": len(bad_dates),
process_kpi/kpi_health_check/presets.py CHANGED
@@ -56,7 +56,7 @@ def save_preset(name: str, rules_df: pd.DataFrame) -> str:
56
  if df.empty:
57
  raise ValueError("Rules dataframe is empty")
58
 
59
- keep = [c for c in ["RAT", "KPI", "direction", "sla"] if c in df.columns]
60
  df = df[keep].copy()
61
 
62
  obj = {
 
56
  if df.empty:
57
  raise ValueError("Rules dataframe is empty")
58
 
59
+ keep = [c for c in ["RAT", "KPI", "direction", "sla", "policy"] if c in df.columns]
60
  df = df[keep].copy()
61
 
62
  obj = {
process_kpi/kpi_health_check/rules.py CHANGED
@@ -1,5 +1,78 @@
1
- def infer_kpi_direction(kpi: str) -> str:
2
- k = str(kpi).lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  lower_is_better = [
4
  "drop",
5
  "dcr",
@@ -18,8 +91,15 @@ def infer_kpi_direction(kpi: str) -> str:
18
  return "higher_is_better"
19
 
20
 
21
- def infer_kpi_sla(kpi: str, direction: str) -> float | None:
22
- k = str(kpi).lower()
 
 
 
 
 
 
 
23
  if direction == "higher_is_better" and any(
24
  x in k for x in ["availability", "cssr", "success", " sr"]
25
  ):
@@ -29,3 +109,14 @@ def infer_kpi_sla(kpi: str, direction: str) -> float | None:
29
  ):
30
  return 2.0
31
  return None
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from functools import lru_cache
5
+
6
+
7
+ def _norm(value: str) -> str:
8
+ s = str(value or "").strip().lower()
9
+ s = re.sub(r"[^0-9a-z]+", " ", s)
10
+ s = re.sub(r"\s+", " ", s).strip()
11
+ return s
12
+
13
+
14
+ def _project_root() -> str:
15
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
16
+
17
+
18
+ def _load_curated_rows() -> list[dict]:
19
+ path = os.path.join(
20
+ _project_root(), "data", "kpi_health_check_presets", "Test1.json"
21
+ )
22
+ if not os.path.exists(path):
23
+ return []
24
+ try:
25
+ with open(path, "r", encoding="utf-8") as f:
26
+ txt = f.read()
27
+ txt = re.sub(r"\bNaN\b", "null", txt)
28
+ obj = json.loads(txt)
29
+ rows = obj.get("rules", []) if isinstance(obj, dict) else []
30
+ return rows if isinstance(rows, list) else []
31
+ except Exception: # noqa: BLE001
32
+ return []
33
+
34
+
35
+ @lru_cache(maxsize=1)
36
+ def _curated_rules_map() -> dict[tuple[str, str], dict]:
37
+ out: dict[tuple[str, str], dict] = {}
38
+ for r in _load_curated_rows():
39
+ if not isinstance(r, dict):
40
+ continue
41
+ rat = _norm(r.get("RAT"))
42
+ kpi = _norm(r.get("KPI"))
43
+ if not rat or not kpi:
44
+ continue
45
+
46
+ direction = str(r.get("direction") or "").strip()
47
+ policy_raw = str(r.get("policy") or "").strip().lower()
48
+ policy = policy_raw if policy_raw in {"enforce", "notify"} else None
49
+ sla_raw = r.get("sla", None)
50
+ try:
51
+ sla = float(sla_raw) if sla_raw is not None else None
52
+ except Exception: # noqa: BLE001
53
+ sla = None
54
+
55
+ out[(rat, kpi)] = {
56
+ "direction": direction or None,
57
+ "sla": sla,
58
+ "policy": policy,
59
+ }
60
+ return out
61
+
62
+
63
+ def _curated_rule(kpi: str, rat: str | None = None) -> dict | None:
64
+ if not kpi or not rat:
65
+ return None
66
+ key = (_norm(rat), _norm(kpi))
67
+ return _curated_rules_map().get(key)
68
+
69
+
70
+ def infer_kpi_direction(kpi: str, rat: str | None = None) -> str:
71
+ curated = _curated_rule(kpi, rat)
72
+ if curated and curated.get("direction"):
73
+ return str(curated["direction"])
74
+
75
+ k = _norm(kpi)
76
  lower_is_better = [
77
  "drop",
78
  "dcr",
 
91
  return "higher_is_better"
92
 
93
 
94
+ def infer_kpi_sla(kpi: str, direction: str, rat: str | None = None) -> float | None:
95
+ curated = _curated_rule(kpi, rat)
96
+ if curated and curated.get("sla") is not None:
97
+ try:
98
+ return float(curated["sla"])
99
+ except Exception: # noqa: BLE001
100
+ pass
101
+
102
+ k = _norm(kpi)
103
  if direction == "higher_is_better" and any(
104
  x in k for x in ["availability", "cssr", "success", " sr"]
105
  ):
 
109
  ):
110
  return 2.0
111
  return None
112
+
113
+
114
+ def infer_kpi_policy(kpi: str, rat: str | None = None) -> str:
115
+ curated = _curated_rule(kpi, rat)
116
+ if curated and curated.get("policy"):
117
+ return str(curated["policy"])
118
+
119
+ k = _norm(kpi)
120
+ if "distance" in k:
121
+ return "notify"
122
+ return "enforce"