SMW / app.py
UCS2014's picture
Update app.py
cc2b0eb verified
# app.py — ST_GeoMech_SMW (STRICT headers, PCF units, no aliases)
import io, json, os, base64, math
from pathlib import Path
import streamlit as st
import pandas as pd
import numpy as np
import joblib
from datetime import datetime
# Matplotlib (static)
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import plotly.graph_objects as go
from sklearn.metrics import mean_squared_error # MAPE implemented manually
# =========================
# Constants / Config
# =========================
APP_NAME = "ST_GeoMech_SMW"
TAGLINE = "Real-Time Upper/Lower Mud Weight (MW) Limits For Safe Drilling"
# Hard fallback (never used if model/meta available)
FEATURES_DEFAULT = ["Q (gpm)","SPP (psi)","T (kft.lbf)","WOB (klbf)","ROP (ft/h)"]
TARGET_BO_DEFAULT = "BO_Actual"
TARGET_BD_DEFAULT = "BD_Actual"
PRED_BO = "BO_Pred"
PRED_BD = "BD_Pred"
# Units label (taken from meta; fallback to pcf)
X_UNITS = "MW (pcf)"
# Discover models/metas (supports /mnt/data uploads)
MODELS_DIR = Path("models")
ALT_DIR = Path("/mnt/data")
CANDS = {
"bo_model": ["bo_model.joblib"],
"bd_model": ["bd_model.joblib"],
"bo_meta": ["bo_meta.json"],
"bd_meta": ["bd_meta.json"],
}
COLORS = {"pred_bo":"#1f77b4","pred_bd":"#d62728","actual_bo":"#f2b702","actual_bd":"#2ca02c","ref":"#5a5a5a"}
# Plot sizing
CROSS_W, CROSS_H = 350, 350 # X-plot size
TRACK_H, TRACK_W = 1000, 500
FONT_SZ = 13
BOLD_FONT = "Arial Black, Arial, sans-serif"
PLAIN_FONT = "Arial, sans-serif"
# ===== Axis Titles (easy to edit) ===========================================
# Track plots: set either to a string to override, or leave as None to use defaults.
X_AXIS_TITLE_OVERRIDE = "Breakout Limit (pcf)" # e.g., "Mud Weight (pcf)"
Y_AXIS_TITLE_OVERRIDE = "Depth (ft)" # e.g., "Depth (ft)"
# Cross-plot axis titles. You can use {units} and it will fill from meta.
CROSS_TITLES = {
"bo": {"x": "Actual Breakout Limit (PCF)", "y": "Predicted Breakout Limit (PCF)"},
"bd": {"x": "Actual Breakdown Limit (PCF)", "y": "Predicted Breakdown Limit (PCF)"},
}
def _track_x_title() -> str:
return X_AXIS_TITLE_OVERRIDE or st.session_state.get("X_UNITS", "MW (pcf)")
def _track_y_title(default_ylab: str) -> str:
return Y_AXIS_TITLE_OVERRIDE or default_ylab
def _cross_titles(kind: str) -> tuple[str, str]:
units = st.session_state.get("X_UNITS", "MW (pcf)")
t = CROSS_TITLES.get(kind, {})
xlab = (t.get("x") or f"Actual {kind.upper()} ({units})").format(units=units)
ylab = (t.get("y") or f"Predicted {kind.upper()} ({units})").format(units=units)
return xlab, ylab
# ===========================================================================
# =========================
# Page / CSS
# =========================
st.set_page_config(page_title=APP_NAME, page_icon="logo.png", layout="wide")
st.markdown("""
<style>
.brand-logo { width: 200px; height: auto; object-fit: contain; }
.centered-container { display:flex; flex-direction:column; align-items:center; text-align:center; }
.st-message-box { background:#f0f2f6; color:#333; padding:10px; border-radius:10px; border:1px solid #e6e9ef; }
.st-message-box.st-success { background:#d4edda; color:#155724; border-color:#c3e6cb; }
.st-message-box.st-warning { background:#fff3cd; color:#856404; border-color:#ffeeba; }
.st-message-box.st-error { background:#f8d7da; color:#721c24; border-color:#f5c6cb; }
.main .block-container { overflow: unset !important; }
div[data-testid="stVerticalBlock"] { overflow: unset !important; }
div[data-testid="stExpander"] > details > summary {
position: sticky; top: 0; z-index: 10; background: #fff; border-bottom: 1px solid #eee;
}
div[data-testid="stExpander"] div[data-baseweb="tab-list"] {
position: sticky; top: 42px; z-index: 9; background: #fff; padding-top: 6px;
}
</style>
""", unsafe_allow_html=True)
TABLE_CENTER_CSS = [
dict(selector="th", props=[("text-align","center")]),
dict(selector="td", props=[("text-align","center")]),
]
# =========================
# Password gate
# =========================
def inline_logo(path="logo.png") -> str:
try:
p = Path(path)
if not p.exists(): return ""
return f"data:image/png;base64,{base64.b64encode(p.read_bytes()).decode('ascii')}"
except Exception:
return ""
def add_password_gate() -> None:
try:
required = st.secrets.get("APP_PASSWORD", "")
except Exception:
required = os.environ.get("APP_PASSWORD", "")
if not required:
st.warning("Set APP_PASSWORD in Secrets (or environment) and restart.")
st.stop()
if st.session_state.get("auth_ok", False):
return
st.sidebar.markdown(f"""
<div class="centered-container">
<img src="{inline_logo('logo.png')}" class="brand-logo">
<div style='font-weight:800;font-size:1.2rem; margin-top:10px;'>{APP_NAME}</div>
<div style='color:#667085;'>Smart Thinking • Secure Access</div>
</div>
""", unsafe_allow_html=True)
pwd = st.sidebar.text_input("Access key", type="password", placeholder="••••••••")
if st.sidebar.button("Unlock", type="primary"):
if pwd == required:
st.session_state.auth_ok = True; st.rerun()
else:
st.error("Incorrect key.")
st.stop()
add_password_gate()
# =========================
# Utilities
# =========================
def rmse(y_true, y_pred):
return float(np.sqrt(mean_squared_error(y_true, y_pred)))
def mape(y_true, y_pred, eps: float = 1e-8) -> float:
"""
Mean Absolute Percentage Error in PERCENT.
Rows where |actual| < eps are ignored to avoid division issues.
"""
a = np.asarray(y_true, dtype=float)
p = np.asarray(y_pred, dtype=float)
denom = np.where(np.abs(a) < eps, np.nan, np.abs(a))
pct = np.abs(a - p) / denom * 100.0
val = np.nanmean(pct)
return float(val) if np.isfinite(val) else float("nan")
def render_bo_bd_note():
st.markdown(
"""
<div class="st-message-box">
<b>What do BO and BD mean?</b><br>
<ul style="margin-top:6px;">
<li><b>BO (Breakout)</b> — the <i>lower</i> mud-weight bound to avoid compressive/shear failure of the wellbore (breakout). Drilling below BO increases breakout risk.</li>
<li><b>BD (Breakdown)</b> — the <i>upper</i> mud-weight bound to avoid tensile fracturing (hydraulic breakdown). Drilling above BD risks induced fractures and losses.</li>
</ul>
The safe mud-weight window is typically between <b>BO</b> and <b>BD</b>.
</div>
""",
unsafe_allow_html=True,
)
def pearson_r(y_true, y_pred):
a = np.asarray(y_true, dtype=float); p = np.asarray(y_pred, dtype=float)
if a.size < 2: return float("nan")
if np.all(a == a[0]) or np.all(p == p[0]): return float("nan")
return float(np.corrcoef(a, p)[0,1])
@st.cache_resource(show_spinner=False)
def load_model(path: str): return joblib.load(path)
@st.cache_data(show_spinner=False)
def parse_excel(data_bytes: bytes):
bio = io.BytesIO(data_bytes); xl = pd.ExcelFile(bio)
return {sh: xl.parse(sh) for sh in xl.sheet_names}
def read_book_bytes(b: bytes): return parse_excel(b) if b else {}
def _nice_tick0(xmin: float, step: float = 0.1) -> float:
return step * math.floor(xmin / step) if np.isfinite(xmin) else xmin
def df_centered_rounded(df: pd.DataFrame, hide_index=True, ndigits=2):
out = df.copy()
numcols = out.select_dtypes(include=[np.number]).columns
styler = (out.style
.format({c: f"{{:.{ndigits}f}}" for c in numcols})
.set_properties(**{"text-align":"center"})
.set_table_styles(TABLE_CENTER_CSS))
st.dataframe(styler, use_container_width=True, hide_index=hide_index)
def _make_X(df: pd.DataFrame, features: list[str]) -> pd.DataFrame:
X = df.reindex(columns=features, copy=False)
for c in X.columns: X[c] = pd.to_numeric(X[c], errors="coerce")
return X
def find_sheet(book, names):
low2orig = {k.lower(): k for k in book.keys()}
for nm in names:
if nm.lower() in low2orig: return low2orig[nm.lower()]
return None
# =========================
# Excel export helpers
# =========================
def _excel_engine() -> str:
try: import xlsxwriter; return "xlsxwriter"
except Exception: return "openpyxl"
def _excel_safe_name(name: str) -> str:
bad = '[]:*?/\\'; return ''.join('_' if ch in bad else ch for ch in str(name))[:31]
def _round_numeric(df: pd.DataFrame, ndigits: int = 3) -> pd.DataFrame:
out = df.copy()
for c in out.columns:
if pd.api.types.is_float_dtype(out[c]) or pd.api.types.is_integer_dtype(out[c]):
out[c] = pd.to_numeric(out[c], errors="coerce").round(ndigits)
return out
def _summary_table(df: pd.DataFrame, cols: list[str]) -> pd.DataFrame:
cols = [c for c in cols if c in df.columns]
if not cols: return pd.DataFrame()
tbl = (df[cols].agg(['min','max','mean','std'])
.T.rename(columns={"min":"Min","max":"Max","mean":"Mean","std":"Std"})
.reset_index(names="Field"))
return _round_numeric(tbl, 3)
def _train_ranges_df(ranges: dict[str, tuple[float, float]]) -> pd.DataFrame:
if not ranges: return pd.DataFrame()
df = pd.DataFrame(ranges).T.reset_index(); df.columns = ["Feature","Min","Max"]
return _round_numeric(df, 3)
def _excel_autofit(writer, sheet_name: str, df: pd.DataFrame, min_w: int = 8, max_w: int = 40):
try: import xlsxwriter
except Exception: return
ws = writer.sheets[sheet_name]
for i, col in enumerate(df.columns):
series = df[col].astype(str)
max_len = max([len(str(col))] + series.map(len).tolist())
ws.set_column(i, i, max(min_w, min(max_len + 2, max_w)))
ws.freeze_panes(1, 0)
def _add_sheet(sheets: dict, order: list, name: str, df: pd.DataFrame, ndigits: int):
if df is None or df.empty: return
sheets[name] = _round_numeric(df, ndigits); order.append(name)
def _available_sections() -> list[str]:
res = st.session_state.get("results", {})
sections = []
if "Train" in res: sections += ["Training","Training_Metrics_BO","Training_Metrics_BD","Training_Summary"]
if "Test" in res: sections += ["Testing","Testing_Metrics_BO","Testing_Metrics_BD","Testing_Summary"]
if "Validate" in res: sections += ["Validation","Validation_Metrics_BO","Validation_Metrics_BD","Validation_Summary","Validation_OOR"]
if "PredictOnly" in res: sections += ["Prediction","Prediction_Summary"]
if st.session_state.get("train_ranges"): sections += ["Training_Ranges"]
sections += ["Info"]
return sections
def build_export_workbook(selected: list[str], ndigits: int = 3, do_autofit: bool = True):
res = st.session_state.get("results", {})
if not res: return None, None, []
sheets, order = {}, []
if "Training" in selected and "Train" in res: _add_sheet(sheets, order, "Training", res["Train"], ndigits)
if "Training_Metrics_BO" in selected and res.get("m_train_bo"):
_add_sheet(sheets, order, "Training_Metrics_BO", pd.DataFrame([res["m_train_bo"]]), ndigits)
if "Training_Metrics_BD" in selected and res.get("m_train_bd"):
_add_sheet(sheets, order, "Training_Metrics_BD", pd.DataFrame([res["m_train_bd"]]), ndigits)
if "Training_Summary" in selected and "Train" in res:
tr_cols = st.session_state["FEATURES"] + [c for c in [st.session_state["TARGET_BO"], st.session_state["TARGET_BD"], PRED_BO, PRED_BD] if c in res["Train"].columns]
_add_sheet(sheets, order, "Training_Summary", _summary_table(res["Train"], tr_cols), ndigits)
if "Testing" in selected and "Test" in res: _add_sheet(sheets, order, "Testing", res["Test"], ndigits)
if "Testing_Metrics_BO" in selected and res.get("m_test_bo"):
_add_sheet(sheets, order, "Testing_Metrics_BO", pd.DataFrame([res["m_test_bo"]]), ndigits)
if "Testing_Metrics_BD" in selected and res.get("m_test_bd"):
_add_sheet(sheets, order, "Testing_Metrics_BD", pd.DataFrame([res["m_test_bd"]]), ndigits)
if "Testing_Summary" in selected and "Test" in res:
te_cols = st.session_state["FEATURES"] + [c for c in [st.session_state["TARGET_BO"], st.session_state["TARGET_BD"], PRED_BO, PRED_BD] if c in res["Test"].columns]
_add_sheet(sheets, order, "Testing_Summary", _summary_table(res["Test"], te_cols), ndigits)
if "Validation" in selected and "Validate" in res: _add_sheet(sheets, order, "Validation", res["Validate"], ndigits)
if "Validation_Metrics_BO" in selected and res.get("m_val_bo"):
_add_sheet(sheets, order, "Validation_Metrics_BO", pd.DataFrame([res["m_val_bo"]]), ndigits)
if "Validation_Metrics_BD" in selected and res.get("m_val_bd"):
_add_sheet(sheets, order, "Validation_Metrics_BD", pd.DataFrame([res["m_val_bd"]]), ndigits)
if "Validation_Summary" in selected and res.get("sv_val"):
_add_sheet(sheets, order, "Validation_Summary", pd.DataFrame([res["sv_val"]]), ndigits)
if "Validation_OOR" in selected and isinstance(res.get("oor_tbl"), pd.DataFrame) and not res["oor_tbl"].empty:
_add_sheet(sheets, order, "Validation_OOR", res["oor_tbl"].reset_index(drop=True), ndigits)
if "Prediction" in selected and "PredictOnly" in res: _add_sheet(sheets, order, "Prediction", res["PredictOnly"], ndigits)
if "Prediction_Summary" in selected and res.get("sv_pred"):
_add_sheet(sheets, order, "Prediction_Summary", pd.DataFrame([res["sv_pred"]]), ndigits)
if "Training_Ranges" in selected and st.session_state.get("train_ranges"):
rr = _train_ranges_df(st.session_state["train_ranges"]); _add_sheet(sheets, order, "Training_Ranges", rr, ndigits)
if "Info" in selected:
info = pd.DataFrame([
{"Key":"AppName","Value":APP_NAME},
{"Key":"Tagline","Value":TAGLINE},
{"Key":"Targets","Value":f'{st.session_state["TARGET_BO"]}, {st.session_state["TARGET_BD"]}'},
{"Key":"PredColumns","Value":f'{PRED_BO}, {PRED_BD}'},
{"Key":"Features","Value":", ".join(st.session_state["FEATURES"])},
{"Key":"Units","Value":st.session_state.get("X_UNITS","MW (pcf)")},
{"Key":"ExportedAt","Value":datetime.now().strftime("%Y-%m-%d %H:%M:%S")},
])
_add_sheet(sheets, order, "Info", info, ndigits)
if not order: return None, None, []
bio = io.BytesIO(); engine = _excel_engine()
with pd.ExcelWriter(bio, engine=engine) as writer:
for name in order:
df = sheets[name]; sheet = _excel_safe_name(name)
df.to_excel(writer, sheet_name=sheet, index=False)
_excel_autofit(writer, sheet, df)
bio.seek(0); fname = f"MW_Export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"
return bio.getvalue(), fname, order
def render_export_button(phase_key: str):
res = st.session_state.get("results", {})
if not res: return
st.divider(); st.markdown("### Export to Excel")
options = _available_sections()
selected = st.multiselect("Sheets to include", options=options, default=[], placeholder="Choose option(s)", key=f"sheets_{phase_key}")
if not selected:
st.caption("Select one or more sheets above to enable the export.")
st.download_button("⬇️ Export Excel", data=b"", file_name="MW_Export.xlsx",
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
disabled=True, key=f"download_{phase_key}")
return
data, fname, names = build_export_workbook(selected=selected, ndigits=3, do_autofit=True)
if names: st.caption("Will include: " + ", ".join(names))
st.download_button("⬇️ Export Excel", data=(data or b""), file_name=(fname or "MW_Export.xlsx"),
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
disabled=(data is None), key=f"download_{phase_key}")
# =========================
# Plots
# =========================
def cross_plot_static(actual, pred, xlabel, ylabel, color="#1f77b4"):
a = pd.Series(actual, dtype=float); p = pd.Series(pred, dtype=float)
lo = float(min(a.min(), p.min())); hi = float(max(a.max(), p.max()))
pad = 0.03 * (hi - lo if hi > lo else 1.0); lo2, hi2 = lo - pad, hi + pad
ticks = np.linspace(lo2, hi2, 5)
dpi = 110
fig, ax = plt.subplots(figsize=(CROSS_W/dpi, CROSS_H/dpi), dpi=dpi)
ax.scatter(a, p, s=14, c=color, alpha=0.9, linewidths=0)
ax.plot([lo2, hi2], [lo2, hi2], linestyle="--", linewidth=1.2, color=COLORS["ref"])
ax.set_xlim(lo2, hi2); ax.set_ylim(lo2, hi2)
ax.set_xticks(ticks); ax.set_yticks(ticks); ax.set_aspect("equal", adjustable="box")
fmt = FuncFormatter(lambda x, _: f"{x:.2f}")
ax.xaxis.set_major_formatter(fmt); ax.yaxis.set_major_formatter(fmt)
ax.set_xlabel(xlabel, fontweight="bold", fontsize=10, color="black")
ax.set_ylabel(ylabel, fontweight="bold", fontsize=10, color="black")
ax.tick_params(labelsize=6, colors="black")
ax.grid(True, linestyle=":", alpha=0.3)
for s in ax.spines.values(): s.set_linewidth(1.1); s.set_color("#444")
fig.subplots_adjust(left=0.16, bottom=0.16, right=0.98, top=0.98); return fig
def _depth_series(df):
depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
if depth_col is not None:
y = pd.to_numeric(df[depth_col], errors="coerce"); ylab_default = depth_col
rng = [float(y.max()), float(y.min())] # reversed
else:
y = pd.Series(np.arange(1, len(df) + 1)); ylab_default = "Point Index"
rng = [float(y.max()), float(y.min())]
# apply override for Y-axis title
ylab = _track_y_title(ylab_default)
return y, ylab, rng
def _x_range_for_tracks(df, cols):
x_series = pd.concat([pd.to_numeric(df[c], errors="coerce") for c in cols if c in df], ignore_index=True)
x_lo, x_hi = float(x_series.min()), float(x_series.max())
pad = 0.03 * (x_hi - x_lo if x_hi > x_lo else 1.0)
# Option A: keep name pad
pad = 0.03 * (x_hi - x_lo if x_hi > x_lo else 1.0)
xmin, xmax = x_lo - pad, x_hi + pad
tick0 = _nice_tick0(xmin, step=max((xmax - xmin)/10.0, 0.1))
return xmin, xmax, tick0
def track_plot_single(df, pred_col, actual_col=None, title_suffix=""):
y, ylab, y_range = _depth_series(df)
cols = [pred_col] + ([actual_col] if actual_col and actual_col in df.columns else [])
xmin, xmax, tick0 = _x_range_for_tracks(df, cols)
fig = go.Figure()
if pred_col in df.columns:
fig.add_trace(go.Scatter(x=df[pred_col], y=y, mode="lines",
line=dict(color=COLORS["pred_bo"] if pred_col==PRED_BO else COLORS["pred_bd"], width=1.8),
name=pred_col, hovertemplate=f"{pred_col}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
if actual_col and actual_col in df.columns:
fig.add_trace(go.Scatter(x=df[actual_col], y=y, mode="lines",
line=dict(color=COLORS["actual_bo"] if actual_col==st.session_state["TARGET_BO"] else COLORS["actual_bd"],
width=2.0, dash="dot"),
name=f"{actual_col} (actual)", hovertemplate=f"{actual_col}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
fig.update_layout(height=TRACK_H, width=TRACK_W, autosize=False, paper_bgcolor="#fff", plot_bgcolor="#fff",
margin=dict(l=64, r=16, t=36, b=48), hovermode="closest",
font=dict(size=FONT_SZ, color="#000"),
legend=dict(x=0.98, y=0.05, xanchor="right", yanchor="bottom",
bgcolor="rgba(255,255,255,0.75)", bordercolor="#ccc", borderwidth=1),
legend_title_text="", title=title_suffix)
fig.update_xaxes(
title_text=_track_x_title(),
title_font=dict(size=20, family=BOLD_FONT, color="#000"),
tickfont=dict(size=15, family=PLAIN_FONT, color="#000"),
side="top", range=[xmin, xmax], ticks="outside", tickformat=",.2f",
tickmode="auto", tick0=tick0, showline=True, linewidth=1.2, linecolor="#444",
mirror=True, showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
)
fig.update_yaxes(
title_text=ylab,
title_font=dict(size=20, family=BOLD_FONT, color="#000"),
tickfont=dict(size=15, family=PLAIN_FONT, color="#000"),
range=y_range, ticks="outside", showline=True, linewidth=1.2, linecolor="#444",
mirror=True, showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
)
return fig
def track_plot_combined(df):
y, ylab, y_range = _depth_series(df)
cols = [c for c in [PRED_BO, PRED_BD, st.session_state["TARGET_BO"], st.session_state["TARGET_BD"]] if c in df]
xmin, xmax, tick0 = _x_range_for_tracks(df, cols)
fig = go.Figure()
if PRED_BO in df.columns:
fig.add_trace(go.Scatter(x=df[PRED_BO], y=y, mode="lines",
line=dict(color=COLORS["pred_bo"], width=1.8), name=PRED_BO,
hovertemplate=f"{PRED_BO}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
if st.session_state["TARGET_BO"] in df.columns:
col = st.session_state["TARGET_BO"]
fig.add_trace(go.Scatter(x=df[col], y=y, mode="lines",
line=dict(color=COLORS["actual_bo"], width=2.0, dash="dot"), name=f"{col} (actual)",
hovertemplate=f"{col}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
if PRED_BD in df.columns:
fig.add_trace(go.Scatter(x=df[PRED_BD], y=y, mode="lines",
line=dict(color=COLORS["pred_bd"], width=1.8), name=PRED_BD,
hovertemplate=f"{PRED_BD}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
if st.session_state["TARGET_BD"] in df.columns:
col = st.session_state["TARGET_BD"]
fig.add_trace(go.Scatter(x=df[col], y=y, mode="lines",
line=dict(color=COLORS["actual_bd"], width=2.0, dash="dot"), name=f"{col} (actual)",
hovertemplate=f"{col}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"))
fig.update_layout(height=TRACK_H, width=TRACK_W, autosize=False, paper_bgcolor="#fff", plot_bgcolor="#fff",
margin=dict(l=64, r=16, t=36, b=48), hovermode="closest",
font=dict(size=FONT_SZ, color="#000"),
legend=dict(x=0.98, y=0.05, xanchor="right", yanchor="bottom",
bgcolor="rgba(255,255,255,0.75)", bordercolor="#ccc", borderwidth=1),
legend_title_text="", title="Combined (Breakout / Breakdown)")
fig.update_xaxes(
title_text=_track_x_title(),
title_font=dict(size=20, family=BOLD_FONT, color="#000"),
tickfont=dict(size=15, family=PLAIN_FONT, color="#000"),
side="top", range=[xmin, xmax], ticks="outside", tickformat=",.2f",
tickmode="auto", tick0=tick0, showline=True, linewidth=1.2, linecolor="#444",
mirror=True, showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
)
fig.update_yaxes(
title_text=ylab,
title_font=dict(size=20, family=BOLD_FONT, color="#000"),
tickfont=dict(size=15, family=PLAIN_FONT, color="#000"),
range=y_range, ticks="outside", showline=True, linewidth=1.2, linecolor="#444",
mirror=True, showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
)
return fig
def preview_tracks(df: pd.DataFrame, cols: list[str]):
cols = [c for c in cols if c in df.columns]; n = len(cols)
if n == 0:
fig, ax = plt.subplots(figsize=(4, 2)); ax.text(0.5, 0.5, "No selected columns", ha="center", va="center"); ax.axis("off"); return fig
depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
if depth_col is not None:
idx = pd.to_numeric(df[depth_col], errors="coerce"); y_label = depth_col
else:
idx = pd.Series(np.arange(1, len(df) + 1)); y_label = "Point Index"
cmap = plt.get_cmap("tab20"); col_colors = {col: cmap(i % cmap.N) for i, col in enumerate(cols)}
fig, axes = plt.subplots(1, n, figsize=(2.3 * n, 7.0), sharey=True, dpi=100)
if n == 1: axes = [axes]
for i, (ax, col) in enumerate(zip(axes, cols)):
x = pd.to_numeric(df[col], errors="coerce"); ax.plot(x, idx, '-', lw=1.8, color=col_colors[col])
ax.set_xlabel(col); ax.xaxis.set_label_position('top'); ax.xaxis.tick_top()
ax.set_ylim(float(idx.max()), float(idx.min())); ax.grid(True, linestyle=":", alpha=0.3)
if i == 0: ax.set_ylabel(y_label)
else: ax.tick_params(labelleft=False); ax.set_ylabel("")
fig.tight_layout(); return fig
# =========================
# Load models + metas
# =========================
def _first_in_dirs(names): # prefer uploaded over repo copy
for nm in names:
for d in [ALT_DIR, MODELS_DIR]: # ALT_DIR first
p = d / nm
if p.exists() and p.stat().st_size > 0:
return p
return None
def _load_meta(p: Path) -> dict:
if not p or not p.exists(): return {}
try: return json.loads(p.read_text(encoding="utf-8"))
except Exception: return {}
bo_model_path = _first_in_dirs(CANDS["bo_model"])
bd_model_path = _first_in_dirs(CANDS["bd_model"])
bo_meta_path = _first_in_dirs(CANDS["bo_meta"])
bd_meta_path = _first_in_dirs(CANDS["bd_meta"])
if not (bo_model_path and bd_model_path):
st.error("Models not found. Place `bo_model.joblib` and `bd_model.joblib` in `models/` or upload to `/mnt/data/`."); st.stop()
meta_bo = _load_meta(bo_meta_path)
meta_bd = _load_meta(bd_meta_path)
# Try load models with clear error if env mismatch
def _try_load_or_explain(p: Path, name: str):
try:
return load_model(str(p))
except Exception as e:
want_np = (meta_bo.get("versions",{}) or meta_bd.get("versions",{})).get("numpy", "N/A")
want_skl = (meta_bo.get("versions",{}) or meta_bd.get("versions",{})).get("scikit_learn", "N/A")
st.error(
f"Failed to load {name} at {p}.\n\n{e}\n\n"
f"If this mentions `numpy._core` or versions, install:\n"
f" • numpy {want_np}\n • scikit-learn {want_skl}"
)
st.stop()
def _unwrap(payload):
if isinstance(payload, dict) and "model" in payload:
return payload["model"], payload.get("model_info", {})
return payload, getattr(payload, "model_info", {})
payload_bo = _try_load_or_explain(bo_model_path, "BO model")
payload_bd = _try_load_or_explain(bd_model_path, "BD model")
model_bo, info_bo = _unwrap(payload_bo)
model_bd, info_bd = _unwrap(payload_bd)
features_bo = list((info_bo.get("features") or meta_bo.get("features") or FEATURES_DEFAULT))
features_bd = list((info_bd.get("features") or meta_bd.get("features") or FEATURES_DEFAULT))
features_union = list(dict.fromkeys(features_bo + [c for c in features_bd if c not in features_bo]))
TARGET_BO = str(meta_bo.get("target") or TARGET_BO_DEFAULT)
TARGET_BD = str(meta_bd.get("target") or TARGET_BD_DEFAULT)
X_UNITS = str(meta_bo.get("units") or meta_bd.get("units") or "MW (pcf)")
st.session_state["FEATURES_BO"] = features_bo
st.session_state["FEATURES_BD"] = features_bd
st.session_state["FEATURES"] = features_union
st.session_state["TARGET_BO"] = TARGET_BO
st.session_state["TARGET_BD"] = TARGET_BD
st.session_state["X_UNITS"] = X_UNITS
# =========================
# Session state
# =========================
st.session_state.setdefault("app_step", "intro")
st.session_state.setdefault("results", {})
st.session_state.setdefault("train_ranges", None)
st.session_state.setdefault("dev_file_name","")
st.session_state.setdefault("dev_file_bytes",b"")
st.session_state.setdefault("dev_file_loaded",False)
st.session_state.setdefault("dev_preview",False)
st.session_state.setdefault("show_preview_modal", False)
# =========================
# Sidebar branding
# =========================
st.sidebar.markdown(f"""
<div class="centered-container">
<img src="{inline_logo('logo.png')}" class="brand-logo">
<div style='font-weight:800;font-size:1.2rem;'>{APP_NAME}</div>
<div style='color:#667085;'>{TAGLINE}</div>
</div>
""", unsafe_allow_html=True)
def sticky_header(title, message):
st.markdown(f"""
<style>
.sticky-container {{
position: sticky; top: 0; background-color: white; z-index: 100;
padding-top: 10px; padding-bottom: 10px; border-bottom: 1px solid #eee;
}}
</style>
<div class="sticky-container">
<h3>{title}</h3>
<p>{message}</p>
</div>
""", unsafe_allow_html=True)
# =========================
# INTRO
# =========================
if st.session_state.app_step == "intro":
st.header("Welcome!")
st.markdown("This software estimates **Breakout** and **Breakdown** mud-weight limits from drilling data.")
render_bo_bd_note()
st.subheader("How It Works")
st.markdown("1) **Upload data** and preview.\n2) **Run Model** to compute Train/Test metrics.\n3) Go to **Validation** (with actual BO/BD) or **Prediction** (no actuals).\n4) Use **Combined** tab to see both limits on one track.")
if st.button("Start Showcase", type="primary"):
st.session_state.app_step = "dev"; st.rerun()
# =========================
# CASE BUILDING
# =========================
def _strict_prepare(df: pd.DataFrame, stage: str, features: list[str]) -> pd.DataFrame:
df = df.copy()
found = list(map(str, df.columns))
missing = [c for c in features if c not in df.columns]
extras = [c for c in df.columns if c not in features + [TARGET_BO, TARGET_BD]]
if missing:
st.error(
f"{stage}: Missing required column(s): {missing}\n\n"
f"Found columns: {found}\n\n"
f"Expected **exact** feature headers: {features}\n\n"
f"Targets expected (if present in this phase): {TARGET_BO}, {TARGET_BD}"
)
st.stop()
return df
if st.session_state.app_step == "dev":
st.sidebar.header("Case Building")
up = st.sidebar.file_uploader("Upload Your Data File", type=["xlsx","xls"])
if up is not None:
st.session_state.dev_file_bytes = up.getvalue()
st.session_state.dev_file_name = up.name
st.session_state.dev_file_loaded = True
st.session_state.dev_preview = False
if st.session_state.dev_file_loaded:
book = read_book_bytes(st.session_state.dev_file_bytes)
if book:
df0 = next(iter(book.values()))
st.sidebar.caption(f"**Data loaded:** {st.session_state.dev_file_name}{df0.shape[0]} rows × {df0.shape[1]} cols")
if st.sidebar.button("Preview data", use_container_width=True, disabled=not st.session_state.dev_file_loaded):
st.session_state.show_preview_modal = True; st.session_state.dev_preview = True
run = st.sidebar.button("Run Model", type="primary", use_container_width=True)
if st.sidebar.button("Proceed to Validation ▶", use_container_width=True): st.session_state.app_step="validate"; st.rerun()
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
if st.session_state.dev_file_loaded and st.session_state.dev_preview:
sticky_header("Case Building", "Previewed ✓ — now click **Run Model**.")
elif st.session_state.dev_file_loaded:
sticky_header("Case Building", "📄 **Preview** then click **Run Model**.")
else:
sticky_header("Case Building", "**Upload your data** and run the model.")
if run and st.session_state.dev_file_bytes:
book = read_book_bytes(st.session_state.dev_file_bytes)
sh_train = find_sheet(book, ["Train","Training","training2","train","training"])
sh_test = find_sheet(book, ["Test","Testing","testing2","test","testing"])
if sh_train is None or sh_test is None:
st.markdown('<div class="st-message-box st-error">Workbook must include Train/Training/training2 and Test/Test… sheets.</div>', unsafe_allow_html=True); st.stop()
tr_raw = book[sh_train].copy(); te_raw = book[sh_test].copy()
tr = _strict_prepare(tr_raw, "Training", st.session_state["FEATURES"])
te = _strict_prepare(te_raw, "Testing", st.session_state["FEATURES"])
Xtr_bo = _make_X(tr, st.session_state["FEATURES_BO"])
Xtr_bd = _make_X(tr, st.session_state["FEATURES_BD"])
Xte_bo = _make_X(te, st.session_state["FEATURES_BO"])
Xte_bd = _make_X(te, st.session_state["FEATURES_BD"])
tr[PRED_BO] = model_bo.predict(Xtr_bo)
tr[PRED_BD] = model_bd.predict(Xtr_bd)
te[PRED_BO] = model_bo.predict(Xte_bo)
te[PRED_BD] = model_bd.predict(Xte_bd)
st.session_state.results["Train"]=tr; st.session_state.results["Test"]=te
st.session_state.results["m_train_bo"]={"R": pearson_r(tr[TARGET_BO], tr[PRED_BO]), "RMSE": rmse(tr[TARGET_BO], tr[PRED_BO]), "MAPE": mape(tr[TARGET_BO], tr[PRED_BO])}
st.session_state.results["m_train_bd"]={"R": pearson_r(tr[TARGET_BD], tr[PRED_BD]), "RMSE": rmse(tr[TARGET_BD], tr[PRED_BD]), "MAPE": mape(tr[TARGET_BD], tr[PRED_BD])}
st.session_state.results["m_test_bo"] ={"R": pearson_r(te[TARGET_BO], te[PRED_BO]), "RMSE": rmse(te[TARGET_BO], te[PRED_BO]), "MAPE": mape(te[TARGET_BO], te[PRED_BO])}
st.session_state.results["m_test_bd"] ={"R": pearson_r(te[TARGET_BD], te[PRED_BD]), "RMSE": rmse(te[TARGET_BD], te[PRED_BD]), "MAPE": mape(te[TARGET_BD], te[PRED_BD])}
tr_min = tr[st.session_state["FEATURES"]].min().to_dict(); tr_max = tr[st.session_state["FEATURES"]].max().to_dict()
st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in st.session_state["FEATURES"]}
st.markdown('<div class="st-message-box st-success">Case has been built and results are displayed below.</div>', unsafe_allow_html=True)
def _metrics_block(lbl, m):
name = {"BO": "Breakout", "BD": "Breakdown"}.get(lbl, lbl)
c1, c2, c3 = st.columns(3)
c1.metric(f"R ({name})", f"{m['R']:.3f}")
c2.metric(f"RMSE ({name})", f"{m['RMSE']:.2f}")
c3.metric(f"MAPE (%) ({name})", f"{m['MAPE']:.2f}%")
def _dev_block(df, mbo, mbd):
_metrics_block("BO", mbo); _metrics_block("BD", mbd)
st.markdown("<div style='text-align:left;font-size:0.8em;color:#6b7280;margin-top:-16px;margin-bottom:8px;'><strong>R</strong> = Pearson correlation • <strong>RMSE</strong> in MW (pcf) • <strong>MAPE</strong> in %</div>", unsafe_allow_html=True)
t1, t2, t3 = st.tabs(["Breakout", "Breakdown", "Combined"])
with t1:
left, right = st.columns([3,1], gap="large")
with left:
st.plotly_chart(
track_plot_single(df, PRED_BO, actual_col=TARGET_BO, title_suffix="Breakout"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True}
)
with right:
xlab, ylab = _cross_titles("bo")
st.pyplot(
cross_plot_static(df[TARGET_BO], df[PRED_BO], xlab, ylab, COLORS["pred_bo"]),
use_container_width=False
)
with t2:
left, right = st.columns([3,1], gap="large")
with left:
st.plotly_chart(
track_plot_single(df, PRED_BD, actual_col=TARGET_BD, title_suffix="Breakdown"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True}
)
with right:
xlab, ylab = _cross_titles("bd")
st.pyplot(
cross_plot_static(df[TARGET_BD], df[PRED_BD], xlab, ylab, COLORS["pred_bd"]),
use_container_width=False
)
with t3:
st.plotly_chart(track_plot_combined(df), use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
if "Train" in st.session_state.results or "Test" in st.session_state.results:
tab1, tab2 = st.tabs(["Training", "Testing"])
if "Train" in st.session_state.results:
with tab1: _dev_block(st.session_state.results["Train"], st.session_state.results["m_train_bo"], st.session_state.results["m_train_bd"])
if "Test" in st.session_state.results:
with tab2: _dev_block(st.session_state.results["Test"], st.session_state.results["m_test_bo"], st.session_state.results["m_test_bd"])
render_export_button(phase_key="dev")
# =========================
# VALIDATION (with actuals)
# =========================
if st.session_state.app_step == "validate":
st.sidebar.header("Validate the Models")
up = st.sidebar.file_uploader("Upload Validation Excel", type=["xlsx","xls"])
if up is not None:
book = read_book_bytes(up.getvalue())
if book:
df0 = next(iter(book.values()))
st.sidebar.caption(f"**Data loaded:** {up.name}{df0.shape[0]} rows × {df0.shape[1]} cols")
if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
st.session_state.show_preview_modal = True
go_btn = st.sidebar.button("Predict & Validate", type="primary", use_container_width=True)
if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
sticky_header("Validate the Models", "Upload a dataset with the same **feature** columns and **BO/BD** actuals.")
if go_btn and up is not None:
book = read_book_bytes(up.getvalue())
name = find_sheet(book, ["Validation","Validate","validation2","Val","val"]) or list(book.keys())[0]
df = _strict_prepare(book[name].copy(), "Validation", st.session_state["FEATURES"])
df[PRED_BO] = model_bo.predict(_make_X(df, st.session_state["FEATURES_BO"]))
df[PRED_BD] = model_bd.predict(_make_X(df, st.session_state["FEATURES_BD"]))
st.session_state.results["Validate"]=df
ranges = st.session_state.train_ranges; oor_pct = 0.0; tbl=None
if ranges:
any_viol = pd.DataFrame({f:(df[f]<ranges[f][0])|(df[f]>ranges[f][1]) for f in st.session_state["FEATURES"]}).any(axis=1)
oor_pct = float(any_viol.mean()*100.0)
if any_viol.any():
tbl = df.loc[any_viol, st.session_state["FEATURES"]].copy()
for c in st.session_state["FEATURES"]:
if pd.api.types.is_numeric_dtype(tbl[c]): tbl[c] = tbl[c].round(2)
tbl["Violations"] = pd.DataFrame({f:(df[f]<ranges[f][0])|(df[f]>ranges[f][1]) for f in st.session_state["FEATURES"]}).loc[any_viol].apply(lambda r:", ".join([c for c,v in r.items() if v]), axis=1)
st.session_state.results["m_val_bo"]={"R": pearson_r(df[TARGET_BO], df[PRED_BO]), "RMSE": rmse(df[TARGET_BO], df[PRED_BO]), "MAPE": mape(df[TARGET_BO], df[PRED_BO])}
st.session_state.results["m_val_bd"]={"R": pearson_r(df[TARGET_BD], df[PRED_BD]), "RMSE": rmse(df[TARGET_BD], df[PRED_BD]), "MAPE": mape(df[TARGET_BD], df[PRED_BD])}
st.session_state.results["sv_val"]={"n":len(df), "bo_min":float(df[PRED_BO].min()), "bo_max":float(df[PRED_BO].max()),
"bd_min":float(df[PRED_BD].min()), "bd_max":float(df[PRED_BD].max()), "oor":oor_pct}
st.session_state.results["oor_tbl"]=tbl
if "Validate" in st.session_state.results:
df = st.session_state.results["Validate"]
m_bo, m_bd = st.session_state.results["m_val_bo"], st.session_state.results["m_val_bd"]
c1,c2,c3 = st.columns(3)
c1.metric("R (Breakout)", f"{m_bo['R']:.3f}")
c2.metric("RMSE (Breakout)", f"{m_bo['RMSE']:.2f}")
c3.metric("MAPE (%) (Breakout)", f"{m_bo['MAPE']:.2f}%")
c1,c2,c3 = st.columns(3)
c1.metric("R (Breakdown)", f"{m_bd['R']:.3f}")
c2.metric("RMSE (Breakdown)", f"{m_bd['RMSE']:.2f}")
c3.metric("MAPE (%) (Breakdown)", f"{m_bd['MAPE']:.2f}%")
st.markdown("<div style='text-align:left;font-size:0.8em;color:#6b7280;margin-top:-16px;margin-bottom:8px;'>R = Pearson correlation • RMSE in MW (pcf) • MAPE in %</div>", unsafe_allow_html=True)
t1, t2, t3 = st.tabs(["Breakout", "Breakdown", "Combined"])
with t1:
left, right = st.columns([3,1], gap="large")
with left:
st.plotly_chart(
track_plot_single(df, PRED_BO, actual_col=TARGET_BO, title_suffix="Breakout"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True}
)
with right:
xlab, ylab = _cross_titles("bo")
st.pyplot(
cross_plot_static(df[TARGET_BO], df[PRED_BO], xlab, ylab, COLORS["pred_bo"]),
use_container_width=False
)
with t2:
left, right = st.columns([3,1], gap="large")
with left:
st.plotly_chart(
track_plot_single(df, PRED_BD, actual_col=TARGET_BD, title_suffix="Breakdown"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True}
)
with right:
xlab, ylab = _cross_titles("bd")
st.pyplot(
cross_plot_static(df[TARGET_BD], df[PRED_BD], xlab, ylab, COLORS["pred_bd"]),
use_container_width=False
)
with t3:
st.plotly_chart(track_plot_combined(df), use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
render_export_button(phase_key="validate")
sv = st.session_state.results["sv_val"]
if sv["oor"] > 0: st.markdown('<div class="st-message-box st-warning">Some inputs fall outside **training min–max** ranges.</div>', unsafe_allow_html=True)
if st.session_state.results.get("oor_tbl") is not None:
st.write("*Out-of-range rows (vs. Training min–max):*"); df_centered_rounded(st.session_state.results["oor_tbl"])
# =========================
# PREDICTION (no actuals)
# =========================
if st.session_state.app_step == "predict":
st.sidebar.header("Prediction (No Actual BO/BD)")
up = st.sidebar.file_uploader("Upload Prediction Excel", type=["xlsx","xls"])
if up is not None:
book = read_book_bytes(up.getvalue())
if book:
df0 = next(iter(book.values()))
st.sidebar.caption(f"**Data loaded:** {up.name}{df0.shape[0]} rows × {df0.shape[1]} cols")
if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
st.session_state.show_preview_modal = True
go_btn = st.sidebar.button("Predict", type="primary", use_container_width=True)
if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
sticky_header("Prediction", "Upload a dataset with **feature columns only** (no BO/BD actuals).")
if go_btn and up is not None:
book = read_book_bytes(up.getvalue()); name = list(book.keys())[0]
df = _strict_prepare(book[name].copy(), "Prediction", st.session_state["FEATURES"])
df[PRED_BO] = model_bo.predict(_make_X(df, st.session_state["FEATURES_BO"]))
df[PRED_BD] = model_bd.predict(_make_X(df, st.session_state["FEATURES_BD"]))
st.session_state.results["PredictOnly"]=df
ranges = st.session_state.train_ranges; oor_pct = 0.0
if ranges:
any_viol = pd.DataFrame({f:(df[f]<ranges[f][0])|(df[f]>ranges[f][1]) for f in st.session_state["FEATURES"]}).any(axis=1)
oor_pct = float(any_viol.mean()*100.0)
st.session_state.results["sv_pred"]={
"n":len(df),
"bo_min":float(df[PRED_BO].min()), "bo_max":float(df[PRED_BO].max()),
"bd_min":float(df[PRED_BD].min()), "bd_max":float(df[PRED_BD].max()),
"bo_mean":float(df[PRED_BO].mean()), "bo_std":float(df[PRED_BO].std(ddof=0)),
"bd_mean":float(df[PRED_BD].mean()), "bd_std":float(df[PRED_BD].std(ddof=0)),
"oor":oor_pct
}
if "PredictOnly" in st.session_state.results:
df = st.session_state.results["PredictOnly"]; sv = st.session_state.results["sv_pred"]
col_left, col_right = st.columns([2,3], gap="large")
with col_left:
table = pd.DataFrame({
"Metric": ["# points","BO min","BO max","BO mean","BO std","BD min","BD max","BD mean","BD std","OOR %"],
"Value": [sv["n"], round(sv["bo_min"],2), round(sv["bo_max"],2), round(sv["bo_mean"],2), round(sv["bo_std"],2),
round(sv["bd_min"],2), round(sv["bd_max"],2), round(sv["bd_mean"],2), round(sv["bd_std"],2), f'{sv["oor"]:.1f}%']
})
st.markdown('<div class="st-message-box st-success">Predictions ready ✓</div>', unsafe_allow_html=True)
df_centered_rounded(table, hide_index=True)
st.caption("**★ OOR** = % of rows whose input features fall outside the training min–max range.")
with col_right:
t1, t2 = st.tabs(["Breakout", "Breakdown"])
with t1: st.plotly_chart(track_plot_single(df, PRED_BO, actual_col=None, title_suffix="Breakout"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
with t2: st.plotly_chart(track_plot_single(df, PRED_BD, actual_col=None, title_suffix="Breakdown"),
use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
st.plotly_chart(track_plot_combined(df), use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
render_export_button(phase_key="predict")
# =========================
# Preview modal
# =========================
if st.session_state.show_preview_modal:
book_to_preview = {}
if st.session_state.app_step == "dev":
book_to_preview = read_book_bytes(st.session_state.dev_file_bytes)
elif st.session_state.app_step in ["validate", "predict"] and 'up' in locals() and up is not None:
book_to_preview = read_book_bytes(up.getvalue())
with st.expander("Preview data", expanded=True):
if not book_to_preview:
st.markdown('<div class="st-message-box">No data loaded yet.</div>', unsafe_allow_html=True)
else:
names = list(book_to_preview.keys()); tabs = st.tabs(names)
for t, name in zip(tabs, names):
with t:
df = book_to_preview[name]
# show a strict check summary, but do not stop
missing = [c for c in st.session_state["FEATURES"] if c not in df.columns]
st.write("**Missing vs required features:**", missing if missing else "None ✅")
t1, t2 = st.tabs(["Tracks", "Summary"])
with t1:
st.pyplot(preview_tracks(df, st.session_state["FEATURES"]), use_container_width=True)
with t2:
feat_present = [c for c in st.session_state["FEATURES"] if c in df.columns]
if not feat_present:
st.info("No feature columns found to summarize.")
else:
tbl = (df[feat_present].agg(['min','max','mean','std'])
.T.rename(columns={"min":"Min","max":"Max","mean":"Mean","std":"Std"})
.reset_index(names="Feature"))
df_centered_rounded(tbl)
st.session_state.show_preview_modal = False
# =========================
# Footer
# =========================
st.markdown("""
<br><br><br>
<hr>
<div style='text-align:center;color:#6b7280;font-size:1.0em;'>
© 2025 Smart Thinking AI-Solutions Team. All rights reserved.<br>
Website: <a href="https://smartthinking.com.sa" target="_blank" rel="noopener noreferrer">smartthinking.com.sa</a>
</div>
""", unsafe_allow_html=True)