feat: Introduce KPI health check feature with panel application, drilldown plots, and rule presets.
Browse files
panel_app/kpi_health_check_drilldown_plots.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import plotly.graph_objects as go
|
| 3 |
+
from plotly.subplots import make_subplots
|
| 4 |
+
|
| 5 |
+
def build_drilldown_plot(
|
| 6 |
+
df: pd.DataFrame,
|
| 7 |
+
kpis: list[str],
|
| 8 |
+
rules_df: pd.DataFrame | None = None,
|
| 9 |
+
highlight_bad_days: bool = True,
|
| 10 |
+
show_sla: bool = True,
|
| 11 |
+
site_code: str | int = "",
|
| 12 |
+
rat: str = "",
|
| 13 |
+
) -> go.Figure | None:
|
| 14 |
+
"""
|
| 15 |
+
Builds the drill-down trend plot with native Plotly annotations.
|
| 16 |
+
"""
|
| 17 |
+
if df is None or df.empty or not kpis:
|
| 18 |
+
return None
|
| 19 |
+
|
| 20 |
+
# Filter columns that exist
|
| 21 |
+
valid_kpis = [k for k in kpis if k in df.columns]
|
| 22 |
+
if not valid_kpis:
|
| 23 |
+
return None
|
| 24 |
+
|
| 25 |
+
# Sort by date
|
| 26 |
+
# Sort by date
|
| 27 |
+
plot_df = df.sort_values("date_only").copy()
|
| 28 |
+
|
| 29 |
+
title_text = f"{rat} - Site {site_code}"
|
| 30 |
+
# If single KPI, be explicit in title
|
| 31 |
+
if len(valid_kpis) == 1:
|
| 32 |
+
title_text = f"{rat} - Site {site_code} - {valid_kpis[0]}"
|
| 33 |
+
|
| 34 |
+
# Subplot for Timeline (Streak) - Row 2
|
| 35 |
+
# Row 1: Main Trend
|
| 36 |
+
fig = make_subplots(
|
| 37 |
+
rows=2, cols=1,
|
| 38 |
+
shared_xaxes=True,
|
| 39 |
+
vertical_spacing=0.05,
|
| 40 |
+
row_heights=[0.85, 0.15],
|
| 41 |
+
subplot_titles=(title_text, "Status Check")
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
for kpi in valid_kpis:
|
| 45 |
+
# Data preparation
|
| 46 |
+
x_data = plot_df["date_only"]
|
| 47 |
+
y_data = pd.to_numeric(plot_df[kpi], errors="coerce")
|
| 48 |
+
|
| 49 |
+
# Add Trace
|
| 50 |
+
fig.add_trace(
|
| 51 |
+
go.Scatter(
|
| 52 |
+
x=x_data,
|
| 53 |
+
y=y_data,
|
| 54 |
+
mode="lines+markers",
|
| 55 |
+
name=kpi,
|
| 56 |
+
legendgroup=kpi, # Allows grouping logic if needed
|
| 57 |
+
),
|
| 58 |
+
row=1, col=1
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Add SLA line if available
|
| 62 |
+
if show_sla and rules_df is not None:
|
| 63 |
+
try:
|
| 64 |
+
# Find rule for this KPI
|
| 65 |
+
# Note: This implies rules_df is filtered for the correct RAT
|
| 66 |
+
rule = rules_df[rules_df["KPI"] == kpi]
|
| 67 |
+
if not rule.empty:
|
| 68 |
+
sla_val = pd.to_numeric(rule.iloc[0].get("sla"), errors="coerce")
|
| 69 |
+
if pd.notna(sla_val):
|
| 70 |
+
fig.add_hline(
|
| 71 |
+
y=sla_val,
|
| 72 |
+
line_dash="dot",
|
| 73 |
+
line_color="red",
|
| 74 |
+
annotation_text=f"SLA {kpi}",
|
| 75 |
+
annotation_position="bottom right",
|
| 76 |
+
row=1, col=1
|
| 77 |
+
)
|
| 78 |
+
except Exception:
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
# Timeline / Streak Subplot
|
| 82 |
+
# We add a dummy invisible trace to ensure the X-axis (shared) renders dates at the bottom
|
| 83 |
+
# even if we haven't computed 'is_bad' explicitly here yet.
|
| 84 |
+
# This addresses "je veux voir les dates sur l'axe en bas".
|
| 85 |
+
if not plot_df.empty:
|
| 86 |
+
fig.add_trace(
|
| 87 |
+
go.Scatter(
|
| 88 |
+
x=plot_df["date_only"],
|
| 89 |
+
y=[0] * len(plot_df),
|
| 90 |
+
mode="markers",
|
| 91 |
+
opacity=0,
|
| 92 |
+
showlegend=False,
|
| 93 |
+
hoverinfo="skip"
|
| 94 |
+
),
|
| 95 |
+
row=2, col=1
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
fig.update_layout(
|
| 99 |
+
template="plotly_white",
|
| 100 |
+
height=500,
|
| 101 |
+
margin=dict(l=50, r=50, t=50, b=50),
|
| 102 |
+
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
return fig
|
panel_app/kpi_health_check_panel.py
CHANGED
|
@@ -43,6 +43,9 @@ from process_kpi.kpi_health_check.profiles import (
|
|
| 43 |
save_profile,
|
| 44 |
)
|
| 45 |
from process_kpi.kpi_health_check.rules import infer_kpi_direction, infer_kpi_sla
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
pn.extension("plotly", "tabulator")
|
| 48 |
|
|
@@ -161,6 +164,9 @@ def _drilldown_cache_key(site_code: int, rat: str, kpi: str) -> tuple:
|
|
| 161 |
int(recent_days.value),
|
| 162 |
float(rel_threshold_pct.value),
|
| 163 |
int(min_consecutive_days.value),
|
|
|
|
|
|
|
|
|
|
| 164 |
)
|
| 165 |
|
| 166 |
|
|
@@ -400,6 +406,16 @@ kpi_compare_select = pn.widgets.MultiChoice(name="Compare KPIs", options=[], val
|
|
| 400 |
kpi_compare_norm = pn.widgets.Select(
|
| 401 |
name="Normalization", options=["None", "Min-Max", "Z-score"], value="None"
|
| 402 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 403 |
drilldown_export_button = pn.widgets.FileDownload(
|
| 404 |
label="Download drill-down",
|
| 405 |
filename="KPI_Drilldown.xlsx",
|
|
@@ -407,7 +423,7 @@ drilldown_export_button = pn.widgets.FileDownload(
|
|
| 407 |
)
|
| 408 |
|
| 409 |
site_kpi_table = pn.widgets.Tabulator(
|
| 410 |
-
height=
|
| 411 |
)
|
| 412 |
|
| 413 |
_set_tabulator_pagination(site_summary_table, page_size=50)
|
|
@@ -496,11 +512,29 @@ def _update_kpi_options() -> None:
|
|
| 496 |
if c not in {"site_code", "date_only", "Longitude", "Latitude", "City", "RAT"}
|
| 497 |
]
|
| 498 |
kpis = sorted([str(c) for c in kpis])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 499 |
_updating_drilldown = True
|
| 500 |
try:
|
| 501 |
-
|
| 502 |
-
if
|
| 503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 504 |
|
| 505 |
kpi_compare_select.options = kpis
|
| 506 |
cur = list(kpi_compare_select.value or [])
|
|
@@ -563,15 +597,40 @@ def _update_site_view(event=None) -> None:
|
|
| 563 |
)
|
| 564 |
except Exception: # noqa: BLE001
|
| 565 |
available_sites = set()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 566 |
if available_sites:
|
| 567 |
-
try:
|
| 568 |
-
code_int = int(code)
|
| 569 |
-
except Exception: # noqa: BLE001
|
| 570 |
-
code_int = None
|
| 571 |
if code_int is None or code_int not in available_sites:
|
| 572 |
new_code = next(iter(sorted(available_sites)))
|
|
|
|
|
|
|
| 573 |
_set_widget_value(site_select, new_code)
|
| 574 |
code = new_code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 575 |
|
| 576 |
status_df = (
|
| 577 |
current_status_df
|
|
@@ -582,7 +641,7 @@ def _update_site_view(event=None) -> None:
|
|
| 582 |
site_df = pd.DataFrame()
|
| 583 |
else:
|
| 584 |
site_df = status_df[
|
| 585 |
-
(status_df["site_code"] == int(
|
| 586 |
].copy()
|
| 587 |
site_kpi_table.value = site_df
|
| 588 |
|
|
@@ -615,47 +674,50 @@ def _update_site_view(event=None) -> None:
|
|
| 615 |
trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object = cached
|
| 616 |
return
|
| 617 |
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
vals = pd.to_numeric(plot_df[col], errors="coerce")
|
| 637 |
-
if norm_mode == "Min-Max":
|
| 638 |
-
vmin = float(vals.min()) if vals.notna().any() else 0.0
|
| 639 |
-
vmax = float(vals.max()) if vals.notna().any() else 0.0
|
| 640 |
-
denom = vmax - vmin
|
| 641 |
-
plot_df[col] = (vals - vmin) / denom if denom else 0.0
|
| 642 |
-
elif norm_mode == "Z-score":
|
| 643 |
-
mean = float(vals.mean()) if vals.notna().any() else 0.0
|
| 644 |
-
std = float(vals.std()) if vals.notna().any() else 0.0
|
| 645 |
-
plot_df[col] = (vals - mean) / std if std else 0.0
|
| 646 |
-
|
| 647 |
-
df_long = plot_df.melt(
|
| 648 |
-
id_vars=["date_only"],
|
| 649 |
-
value_vars=selected_kpis,
|
| 650 |
-
var_name="KPI",
|
| 651 |
-
value_name="value",
|
| 652 |
)
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 659 |
|
| 660 |
rules_df = (
|
| 661 |
rules_table.value
|
|
@@ -1772,6 +1834,8 @@ def _on_drilldown_change(event=None) -> None:
|
|
| 1772 |
|
| 1773 |
|
| 1774 |
rat_select.param.watch(_on_rat_change, "value")
|
|
|
|
|
|
|
| 1775 |
site_select.param.watch(_on_drilldown_change, "value")
|
| 1776 |
kpi_select.param.watch(_on_drilldown_change, "value")
|
| 1777 |
kpi_compare_select.param.watch(_on_drilldown_change, "value")
|
|
@@ -1997,15 +2061,12 @@ main = pn.Column(
|
|
| 1997 |
pn.layout.Divider(),
|
| 1998 |
pn.pane.Markdown("## Drill-down"),
|
| 1999 |
pn.Row(site_select, rat_select),
|
|
|
|
| 2000 |
pn.Row(kpi_select, kpi_compare_select, kpi_compare_norm, drilldown_export_button),
|
| 2001 |
-
pn.
|
| 2002 |
-
|
| 2003 |
-
|
| 2004 |
-
),
|
| 2005 |
-
pn.Row(
|
| 2006 |
-
pn.Column(heatmap_plot_pane, sizing_mode="stretch_both"),
|
| 2007 |
-
pn.Column(hist_plot_pane, sizing_mode="stretch_both"),
|
| 2008 |
-
),
|
| 2009 |
)
|
| 2010 |
|
| 2011 |
|
|
|
|
| 43 |
save_profile,
|
| 44 |
)
|
| 45 |
from process_kpi.kpi_health_check.rules import infer_kpi_direction, infer_kpi_sla
|
| 46 |
+
from process_kpi.kpi_health_check.kpi_groups import get_kpis_by_group, filter_kpis
|
| 47 |
+
from process_kpi.kpi_health_check.benchmarks import calculate_sla_metrics
|
| 48 |
+
from panel_app.kpi_health_check_drilldown_plots import build_drilldown_plot
|
| 49 |
|
| 50 |
pn.extension("plotly", "tabulator")
|
| 51 |
|
|
|
|
| 164 |
int(recent_days.value),
|
| 165 |
float(rel_threshold_pct.value),
|
| 166 |
int(min_consecutive_days.value),
|
| 167 |
+
# New cache keys
|
| 168 |
+
str(kpi_group_select.value),
|
| 169 |
+
str(kpi_group_mode.value),
|
| 170 |
)
|
| 171 |
|
| 172 |
|
|
|
|
| 406 |
kpi_compare_norm = pn.widgets.Select(
|
| 407 |
name="Normalization", options=["None", "Min-Max", "Z-score"], value="None"
|
| 408 |
)
|
| 409 |
+
|
| 410 |
+
# NEW WIDGETS
|
| 411 |
+
kpi_group_select = pn.widgets.Select(
|
| 412 |
+
name="KPI Group", options=["All (selected KPIs)"], value="All (selected KPIs)"
|
| 413 |
+
)
|
| 414 |
+
kpi_group_mode = pn.widgets.Select(
|
| 415 |
+
name="Group Mode",
|
| 416 |
+
options=["Filter KPI list only (recommended)", "Add top 12 KPIs to compare"],
|
| 417 |
+
value="Filter KPI list only (recommended)"
|
| 418 |
+
)
|
| 419 |
drilldown_export_button = pn.widgets.FileDownload(
|
| 420 |
label="Download drill-down",
|
| 421 |
filename="KPI_Drilldown.xlsx",
|
|
|
|
| 423 |
)
|
| 424 |
|
| 425 |
site_kpi_table = pn.widgets.Tabulator(
|
| 426 |
+
height=520, sizing_mode="stretch_width", layout="fit_data_table"
|
| 427 |
)
|
| 428 |
|
| 429 |
_set_tabulator_pagination(site_summary_table, page_size=50)
|
|
|
|
| 512 |
if c not in {"site_code", "date_only", "Longitude", "Latitude", "City", "RAT"}
|
| 513 |
]
|
| 514 |
kpis = sorted([str(c) for c in kpis])
|
| 515 |
+
|
| 516 |
+
# Apply Grouping if needed
|
| 517 |
+
groups = get_kpis_by_group(kpis)
|
| 518 |
+
group_options = ["All (selected KPIs)"] + sorted([g for g in groups.keys() if g != "Other"])
|
| 519 |
+
if "Other" in groups:
|
| 520 |
+
group_options.append("Other")
|
| 521 |
+
|
| 522 |
_updating_drilldown = True
|
| 523 |
try:
|
| 524 |
+
kpi_group_select.options = group_options
|
| 525 |
+
if kpi_group_select.value not in group_options:
|
| 526 |
+
kpi_group_select.value = group_options[0]
|
| 527 |
+
|
| 528 |
+
# Filter KPIs based on group
|
| 529 |
+
filtered_kpis = filter_kpis(
|
| 530 |
+
kpis,
|
| 531 |
+
kpi_group_select.value,
|
| 532 |
+
mode=kpi_group_mode.value
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
kpi_select.options = filtered_kpis
|
| 536 |
+
if filtered_kpis and kpi_select.value not in filtered_kpis:
|
| 537 |
+
kpi_select.value = filtered_kpis[0]
|
| 538 |
|
| 539 |
kpi_compare_select.options = kpis
|
| 540 |
cur = list(kpi_compare_select.value or [])
|
|
|
|
| 597 |
)
|
| 598 |
except Exception: # noqa: BLE001
|
| 599 |
available_sites = set()
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
# Robustly resolve code_int from site_select.value
|
| 603 |
+
# AutocompleteInput might return the Label (str) or Value (int) depending on usage
|
| 604 |
+
code_int = None
|
| 605 |
+
if code is not None:
|
| 606 |
+
# 1. Try if code is already a known value (int)
|
| 607 |
+
if hasattr(site_select, "options") and isinstance(site_select.options, dict):
|
| 608 |
+
# Check if it matches a Key (Label)
|
| 609 |
+
if code in site_select.options:
|
| 610 |
+
code_int = site_select.options[code]
|
| 611 |
+
# Check if it is a Value in the dict
|
| 612 |
+
elif code in site_select.options.values():
|
| 613 |
+
code_int = code
|
| 614 |
+
|
| 615 |
+
# 2. If not checking opts or not found, try direct cast
|
| 616 |
+
if code_int is None:
|
| 617 |
+
try:
|
| 618 |
+
code_int = int(code)
|
| 619 |
+
except Exception:
|
| 620 |
+
code_int = None
|
| 621 |
+
|
| 622 |
if available_sites:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 623 |
if code_int is None or code_int not in available_sites:
|
| 624 |
new_code = next(iter(sorted(available_sites)))
|
| 625 |
+
# We must set the WIDGET to a value it accepts.
|
| 626 |
+
# If options are {Label: ID}, and we set ID, it usually works.
|
| 627 |
_set_widget_value(site_select, new_code)
|
| 628 |
code = new_code
|
| 629 |
+
code_int = new_code
|
| 630 |
+
else:
|
| 631 |
+
# If valid, ensure we stick with the int representation for downstream logic
|
| 632 |
+
# but don't force widget update if not needed prevents loops
|
| 633 |
+
pass
|
| 634 |
|
| 635 |
status_df = (
|
| 636 |
current_status_df
|
|
|
|
| 641 |
site_df = pd.DataFrame()
|
| 642 |
else:
|
| 643 |
site_df = status_df[
|
| 644 |
+
(status_df["site_code"] == int(code_int if code_int is not None else 0)) & (status_df["RAT"] == rat)
|
| 645 |
].copy()
|
| 646 |
site_kpi_table.value = site_df
|
| 647 |
|
|
|
|
| 674 |
trend_plot_pane.object, heatmap_plot_pane.object, hist_plot_pane.object = cached
|
| 675 |
return
|
| 676 |
|
| 677 |
+
# Determine KPIs to plot based on group mode
|
| 678 |
+
kpis_to_plot = []
|
| 679 |
+
|
| 680 |
+
# 1. Start with explicitly selected 'Compare KPIs'
|
| 681 |
+
selected = [str(x) for x in (kpi_compare_select.value or []) if str(x)]
|
| 682 |
+
|
| 683 |
+
# 2. Add the primary selected KPI if not present
|
| 684 |
+
if kpi and str(kpi) not in selected:
|
| 685 |
+
selected = [str(kpi)] + selected
|
| 686 |
+
|
| 687 |
+
# 3. Handle Group Mode "Add top 12 KPIs"
|
| 688 |
+
# If mode is "Add top...", we fetch from group and append
|
| 689 |
+
if "Add top" in str(kpi_group_mode.value):
|
| 690 |
+
from_group = filter_kpis(
|
| 691 |
+
d.columns.tolist(),
|
| 692 |
+
kpi_group_select.value,
|
| 693 |
+
mode="Top-N",
|
| 694 |
+
top_n=12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 695 |
)
|
| 696 |
+
# Merge unique
|
| 697 |
+
for gk in from_group:
|
| 698 |
+
if gk not in selected:
|
| 699 |
+
selected.append(gk)
|
| 700 |
+
|
| 701 |
+
# Safeguard: Limit to 15 max to prevent browser crash
|
| 702 |
+
kpis_to_plot = selected[:15]
|
| 703 |
+
|
| 704 |
+
# Build Plot using new module
|
| 705 |
+
# We need the rules for this RAT/KPIs to show SLA
|
| 706 |
+
relevant_rules = pd.DataFrame()
|
| 707 |
+
if isinstance(rules_table.value, pd.DataFrame) and not rules_table.value.empty:
|
| 708 |
+
r = rules_table.value
|
| 709 |
+
relevant_rules = r[r["RAT"] == rat]
|
| 710 |
+
|
| 711 |
+
fig = build_drilldown_plot(
|
| 712 |
+
df=d[d["site_code"] == int(code)],
|
| 713 |
+
kpis=kpis_to_plot,
|
| 714 |
+
rules_df=relevant_rules,
|
| 715 |
+
highlight_bad_days=True,
|
| 716 |
+
show_sla=True,
|
| 717 |
+
site_code=code,
|
| 718 |
+
rat=rat
|
| 719 |
+
)
|
| 720 |
+
trend_plot_pane.object = fig
|
| 721 |
|
| 722 |
rules_df = (
|
| 723 |
rules_table.value
|
|
|
|
| 1834 |
|
| 1835 |
|
| 1836 |
rat_select.param.watch(_on_rat_change, "value")
|
| 1837 |
+
kpi_group_select.param.watch(_on_rat_change, "value") # Updating group also needs to re-filter KPI options
|
| 1838 |
+
kpi_group_mode.param.watch(_on_drilldown_change, "value")
|
| 1839 |
site_select.param.watch(_on_drilldown_change, "value")
|
| 1840 |
kpi_select.param.watch(_on_drilldown_change, "value")
|
| 1841 |
kpi_compare_select.param.watch(_on_drilldown_change, "value")
|
|
|
|
| 2061 |
pn.layout.Divider(),
|
| 2062 |
pn.pane.Markdown("## Drill-down"),
|
| 2063 |
pn.Row(site_select, rat_select),
|
| 2064 |
+
pn.Row(kpi_group_select, kpi_group_mode),
|
| 2065 |
pn.Row(kpi_select, kpi_compare_select, kpi_compare_norm, drilldown_export_button),
|
| 2066 |
+
pn.Column(site_kpi_table, sizing_mode="stretch_width"),
|
| 2067 |
+
pn.Column(trend_plot_pane, sizing_mode="stretch_both", min_height=500),
|
| 2068 |
+
pn.Column(heatmap_plot_pane, sizing_mode="stretch_both", min_height=400),
|
| 2069 |
+
pn.Column(hist_plot_pane, sizing_mode="stretch_both", min_height=400),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2070 |
)
|
| 2071 |
|
| 2072 |
|
process_kpi/kpi_health_check/benchmarks.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def calculate_sla_metrics(
|
| 5 |
+
df: pd.DataFrame,
|
| 6 |
+
kpi: str,
|
| 7 |
+
rules_df: pd.DataFrame | None = None
|
| 8 |
+
) -> dict:
|
| 9 |
+
"""
|
| 10 |
+
Calculates simple metrics for the given KPI trace:
|
| 11 |
+
- SLA value (if exists)
|
| 12 |
+
- Median (recent window)
|
| 13 |
+
|
| 14 |
+
Returns a dict with: 'sla': float|None, 'median': float|None
|
| 15 |
+
"""
|
| 16 |
+
res = {"sla": None, "median": None}
|
| 17 |
+
|
| 18 |
+
if df is None or df.empty or kpi not in df.columns:
|
| 19 |
+
return res
|
| 20 |
+
|
| 21 |
+
# 1. Get SLA from rules
|
| 22 |
+
if rules_df is not None and not rules_df.empty:
|
| 23 |
+
# Assuming rules_df has 'KPI' and 'sla' columns
|
| 24 |
+
# We also need to match RAT? usually passed or handled outside.
|
| 25 |
+
# Here we do a simplistic lookup.
|
| 26 |
+
try:
|
| 27 |
+
row = rules_df[rules_df["KPI"] == kpi]
|
| 28 |
+
if not row.empty:
|
| 29 |
+
val = row.iloc[0].get("sla")
|
| 30 |
+
res["sla"] = float(val) if pd.notna(val) else None
|
| 31 |
+
except Exception:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
# 2. Calculate Median (entire passed df, usually it's the recent window)
|
| 35 |
+
try:
|
| 36 |
+
vals = pd.to_numeric(df[kpi], errors="coerce").dropna()
|
| 37 |
+
if not vals.empty:
|
| 38 |
+
res["median"] = float(vals.median())
|
| 39 |
+
except Exception:
|
| 40 |
+
pass
|
| 41 |
+
|
| 42 |
+
return res
|
process_kpi/kpi_health_check/kpi_groups.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
# Regex patterns for KPI classification
|
| 5 |
+
# Order matters: first match wins
|
| 6 |
+
PATTERNS = {
|
| 7 |
+
# Refined patterns based on user data
|
| 8 |
+
"Success Rate": r"(?i)(cssr|success|attach|setup|establ|answer|complete|connected|ho.*succ|\berab\b|\brrc\b.*(?:\bsr\b|rate|succ)|\basr\b|\bsr\b)",
|
| 9 |
+
"Fails/Drop/Block": r"(?i)(drop|dcr|fail|block|reject|deny|loss|lost|discard|congestion|accessibility.*fail|retention.*fail)",
|
| 10 |
+
"Throughput": r"(?i)(throughput|thp|thrput|debit|dl.*rate|ul.*rate|bitrate)",
|
| 11 |
+
"Traffic": r"(?i)(traffic|volume|erl|payload|gbytes|gb|load|usage|utilization|users|subscribers|ue|conn.*ue)",
|
| 12 |
+
"Availability": r"(?i)(availability|avail|unavailability|unavail|dispo|disponibil|uptime)",
|
| 13 |
+
"Latency": r"(?i)(latency|delay|\brt\b|rtt)",
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
def classify_kpi(kpi_name: str) -> str:
|
| 17 |
+
"""
|
| 18 |
+
Classifies a KPI name into a group based on regex patterns.
|
| 19 |
+
Returns 'Other' if no match found.
|
| 20 |
+
"""
|
| 21 |
+
kpi_str = str(kpi_name)
|
| 22 |
+
for group, pattern in PATTERNS.items():
|
| 23 |
+
if re.search(pattern, kpi_str):
|
| 24 |
+
return group
|
| 25 |
+
return "Other"
|
| 26 |
+
|
| 27 |
+
def get_kpis_by_group(all_kpis: list[str]) -> dict[str, list[str]]:
|
| 28 |
+
"""
|
| 29 |
+
Returns a dictionary mapping group names to lists of KPIs.
|
| 30 |
+
"""
|
| 31 |
+
groups = {g: [] for g in PATTERNS.keys()}
|
| 32 |
+
groups["Other"] = []
|
| 33 |
+
|
| 34 |
+
for kpi in sorted(all_kpis):
|
| 35 |
+
group = classify_kpi(kpi)
|
| 36 |
+
groups[group].append(kpi)
|
| 37 |
+
|
| 38 |
+
# Remove empty groups
|
| 39 |
+
return {k: v for k, v in groups.items() if v}
|
| 40 |
+
|
| 41 |
+
def filter_kpis(
|
| 42 |
+
all_kpis: list[str],
|
| 43 |
+
group: str,
|
| 44 |
+
mode: str = "Filter",
|
| 45 |
+
top_n: int = 12,
|
| 46 |
+
stats_df: pd.DataFrame | None = None
|
| 47 |
+
) -> list[str]:
|
| 48 |
+
"""
|
| 49 |
+
Filters KPIs based on the selected group and mode.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
all_kpis: List of available KPI names.
|
| 53 |
+
group: Selected group name (or 'All').
|
| 54 |
+
mode: 'Filter' or 'Top-N'.
|
| 55 |
+
top_n: Max KPIs to return if filtering needs truncation or specific selection.
|
| 56 |
+
stats_df: Optional DataFrame with 'site_code', 'KPI', 'is_bad', etc. for sorting.
|
| 57 |
+
"""
|
| 58 |
+
if not all_kpis:
|
| 59 |
+
return []
|
| 60 |
+
|
| 61 |
+
# 1. Filter by group
|
| 62 |
+
if group and group != "All (selected KPIs)":
|
| 63 |
+
# Handle "Success Rate (>= SLA...)" formatted names if passed from UI
|
| 64 |
+
clean_group = group.split(" (")[0]
|
| 65 |
+
# Basic mapping check - if the group name in UI has extra text, we match key prefix
|
| 66 |
+
target_group = "Other"
|
| 67 |
+
for k in PATTERNS.keys():
|
| 68 |
+
if k in group:
|
| 69 |
+
target_group = k
|
| 70 |
+
break
|
| 71 |
+
if "Other" in group:
|
| 72 |
+
target_group = "Other"
|
| 73 |
+
|
| 74 |
+
candidates = [k for k in all_kpis if classify_kpi(k) == target_group]
|
| 75 |
+
else:
|
| 76 |
+
candidates = list(all_kpis)
|
| 77 |
+
|
| 78 |
+
if not candidates:
|
| 79 |
+
return []
|
| 80 |
+
|
| 81 |
+
# 2. Sort/Limit if needed
|
| 82 |
+
# If we have stats, we can sort by "badness" or variance
|
| 83 |
+
# For now, simplistic alpha sort unless we have stats
|
| 84 |
+
if stats_df is not None and not stats_df.empty:
|
| 85 |
+
# TODO: Implement smart sorting based on stats if available
|
| 86 |
+
# For V1, we just return candidates sorted alphabetically
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
+
return sorted(candidates)
|