Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import pandas as pd
|
|
| 6 |
import numpy as np
|
| 7 |
import joblib
|
| 8 |
|
| 9 |
-
# matplotlib only for
|
| 10 |
import matplotlib
|
| 11 |
matplotlib.use("Agg")
|
| 12 |
import matplotlib.pyplot as plt
|
|
@@ -14,75 +14,63 @@ import matplotlib.pyplot as plt
|
|
| 14 |
import plotly.graph_objects as go
|
| 15 |
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
|
| 16 |
|
| 17 |
-
|
| 18 |
# =========================
|
| 19 |
# Defaults / Constants
|
| 20 |
# =========================
|
| 21 |
FEATURES = ["Q, gpm", "SPP(psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
|
| 22 |
TARGET = "UCS"
|
| 23 |
-
|
| 24 |
MODELS_DIR = Path("models")
|
| 25 |
DEFAULT_MODEL = MODELS_DIR / "ucs_rf.joblib"
|
| 26 |
MODEL_FALLBACKS = [MODELS_DIR / "model.joblib", MODELS_DIR / "model.pkl"]
|
| 27 |
|
| 28 |
COLORS = {"pred": "#1f77b4", "actual": "#f2b702", "ref": "#5a5a5a"}
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
CROSS_W, CROSS_H =
|
| 32 |
-
TRACK_W, TRACK_H = 220, 700
|
| 33 |
-
|
| 34 |
|
| 35 |
# =========================
|
| 36 |
# Page / Theme
|
| 37 |
# =========================
|
| 38 |
st.set_page_config(page_title="ST_GeoMech_UCS", page_icon="logo.png", layout="wide")
|
|
|
|
|
|
|
|
|
|
| 39 |
st.markdown(
|
| 40 |
"""
|
| 41 |
<style>
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
[data-testid="stBlock"]{ margin-top:0 !important; }
|
| 56 |
-
.help-foot { color:#6b7280; font-size:0.95rem; }
|
| 57 |
</style>
|
| 58 |
""",
|
| 59 |
unsafe_allow_html=True
|
| 60 |
)
|
| 61 |
|
| 62 |
-
|
| 63 |
# =========================
|
| 64 |
-
#
|
| 65 |
# =========================
|
| 66 |
def inline_logo(path="logo.png") -> str:
|
| 67 |
try:
|
| 68 |
p = Path(path)
|
| 69 |
-
if not p.exists():
|
| 70 |
-
return ""
|
| 71 |
return f"data:image/png;base64,{base64.b64encode(p.read_bytes()).decode('ascii')}"
|
| 72 |
except Exception:
|
| 73 |
return ""
|
| 74 |
|
| 75 |
-
|
| 76 |
-
# =========================
|
| 77 |
-
# Password gate (branded)
|
| 78 |
-
# =========================
|
| 79 |
def add_password_gate() -> bool:
|
| 80 |
-
"""
|
| 81 |
-
Ask for a password (APP_PASSWORD in Secrets or Env) before rendering the app.
|
| 82 |
-
If not configured, block with a clear admin message.
|
| 83 |
-
"""
|
| 84 |
-
# pull required password
|
| 85 |
-
required = ""
|
| 86 |
try:
|
| 87 |
required = st.secrets.get("APP_PASSWORD", "")
|
| 88 |
except Exception:
|
|
@@ -121,26 +109,30 @@ def add_password_gate() -> bool:
|
|
| 121 |
</div>
|
| 122 |
</div>
|
| 123 |
<div style="font-size:1.25rem;font-weight:700;margin:8px 0 4px 0;">Protected</div>
|
| 124 |
-
<div style="color:#6b7280;margin-bottom:14px;">
|
|
|
|
|
|
|
| 125 |
""",
|
| 126 |
unsafe_allow_html=True
|
| 127 |
)
|
|
|
|
| 128 |
pwd = st.text_input("Access key", type="password", placeholder="••••••••")
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
|
|
|
| 136 |
|
|
|
|
| 137 |
|
| 138 |
-
#
|
| 139 |
add_password_gate()
|
| 140 |
|
| 141 |
-
|
| 142 |
# =========================
|
| 143 |
-
#
|
| 144 |
# =========================
|
| 145 |
try:
|
| 146 |
dialog = st.dialog
|
|
@@ -177,27 +169,17 @@ def parse_excel(data_bytes: bytes):
|
|
| 177 |
|
| 178 |
def read_book_bytes(data_bytes: bytes):
|
| 179 |
if not data_bytes: return {}
|
| 180 |
-
try:
|
| 181 |
-
return parse_excel(data_bytes)
|
| 182 |
except Exception as e:
|
| 183 |
-
st.error(f"Failed to read Excel: {e}")
|
| 184 |
-
return {}
|
| 185 |
|
| 186 |
def find_sheet(book, names):
|
| 187 |
low2orig = {k.lower(): k for k in book.keys()}
|
| 188 |
for nm in names:
|
| 189 |
-
if nm.lower() in low2orig:
|
| 190 |
-
return low2orig[nm.lower()]
|
| 191 |
return None
|
| 192 |
|
| 193 |
-
|
| 194 |
-
# ---------- Plot helpers (interactive, fixed size, full outline) ----------
|
| 195 |
-
def _add_full_frame(fig):
|
| 196 |
-
fig.update_layout(shapes=[dict(
|
| 197 |
-
type="rect", xref="paper", yref="paper", x0=0, y0=0, x1=1, y1=1,
|
| 198 |
-
line=dict(color="#444", width=1), fillcolor="rgba(0,0,0,0)"
|
| 199 |
-
)])
|
| 200 |
-
|
| 201 |
def cross_plot_interactive(actual, pred):
|
| 202 |
a = pd.Series(actual).astype(float)
|
| 203 |
p = pd.Series(pred).astype(float)
|
|
@@ -221,30 +203,34 @@ def cross_plot_interactive(actual, pred):
|
|
| 221 |
|
| 222 |
fig.update_layout(
|
| 223 |
paper_bgcolor="#ffffff", plot_bgcolor="#ffffff",
|
| 224 |
-
margin=dict(l=
|
| 225 |
-
hovermode="closest", font=dict(size=
|
| 226 |
width=CROSS_W, height=CROSS_H
|
| 227 |
)
|
| 228 |
fig.update_xaxes(
|
| 229 |
-
title_text="<b>Actual UCS</b>",
|
| 230 |
-
|
| 231 |
-
|
|
|
|
|
|
|
| 232 |
)
|
| 233 |
fig.update_yaxes(
|
| 234 |
-
title_text="<b>Predicted UCS</b>",
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
|
|
|
| 238 |
)
|
| 239 |
-
_add_full_frame(fig)
|
| 240 |
return fig
|
| 241 |
|
| 242 |
def depth_or_index_track_interactive(df, include_actual=True):
|
| 243 |
depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
|
| 244 |
if depth_col is not None:
|
| 245 |
y = df[depth_col]; y_label = depth_col
|
|
|
|
| 246 |
else:
|
| 247 |
y = np.arange(1, len(df) + 1); y_label = "Point Index"
|
|
|
|
| 248 |
|
| 249 |
fig = go.Figure()
|
| 250 |
fig.add_trace(go.Scatter(
|
|
@@ -263,27 +249,28 @@ def depth_or_index_track_interactive(df, include_actual=True):
|
|
| 263 |
|
| 264 |
fig.update_layout(
|
| 265 |
paper_bgcolor="#ffffff", plot_bgcolor="#ffffff",
|
| 266 |
-
margin=dict(l=
|
| 267 |
-
hovermode="closest", font=dict(size=
|
| 268 |
-
legend=dict(
|
| 269 |
-
|
|
|
|
|
|
|
| 270 |
legend_title_text="",
|
| 271 |
width=TRACK_W, height=TRACK_H
|
| 272 |
)
|
| 273 |
fig.update_xaxes(
|
| 274 |
-
title_text="<b>UCS</b>", side="top",
|
| 275 |
-
ticks="outside", showline=True, linewidth=1.2, linecolor="#444", mirror=
|
| 276 |
-
showgrid=True, gridcolor="rgba(0,0,0,0.12)",
|
|
|
|
| 277 |
)
|
| 278 |
fig.update_yaxes(
|
| 279 |
-
title_text=f"<b>{y_label}</b>", autorange=
|
| 280 |
-
ticks="outside", showline=True, linewidth=1.2, linecolor="#444", mirror=
|
| 281 |
showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
|
| 282 |
)
|
| 283 |
-
_add_full_frame(fig)
|
| 284 |
return fig
|
| 285 |
|
| 286 |
-
|
| 287 |
# ---------- Preview modal helpers (matplotlib static) ----------
|
| 288 |
def make_index_tracks(df: pd.DataFrame, cols: list[str]):
|
| 289 |
cols = [c for c in cols if c in df.columns]
|
|
@@ -302,6 +289,8 @@ def make_index_tracks(df: pd.DataFrame, cols: list[str]):
|
|
| 302 |
ax.set_xlabel(col)
|
| 303 |
ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
|
| 304 |
ax.grid(True, linestyle=":", alpha=0.3)
|
|
|
|
|
|
|
| 305 |
axes[0].set_ylabel("Point Index")
|
| 306 |
return fig
|
| 307 |
|
|
@@ -325,6 +314,7 @@ def preview_modal_dev(book: dict[str, pd.DataFrame], feature_cols: list[str]):
|
|
| 325 |
if not tabs:
|
| 326 |
first_name = list(book.keys())[0]
|
| 327 |
tabs = [first_name]; data = [book[first_name]]
|
|
|
|
| 328 |
t_objs = st.tabs(tabs)
|
| 329 |
for t, df in zip(t_objs, data):
|
| 330 |
with t:
|
|
@@ -333,21 +323,19 @@ def preview_modal_dev(book: dict[str, pd.DataFrame], feature_cols: list[str]):
|
|
| 333 |
with t2: st.dataframe(stats_table(df, FEATURES), use_container_width=True)
|
| 334 |
|
| 335 |
@dialog("Preview data")
|
| 336 |
-
def
|
| 337 |
if not book:
|
| 338 |
st.info("No data loaded yet."); return
|
| 339 |
-
vname = find_sheet(book,
|
| 340 |
df = book[vname]
|
| 341 |
t1, t2 = st.tabs(["Tracks", "Summary"])
|
| 342 |
with t1: st.pyplot(make_index_tracks(df, feature_cols), use_container_width=True)
|
| 343 |
with t2: st.dataframe(stats_table(df, feature_cols), use_container_width=True)
|
| 344 |
|
| 345 |
-
|
| 346 |
# =========================
|
| 347 |
# Model presence
|
| 348 |
# =========================
|
| 349 |
MODEL_URL = _get_model_url()
|
| 350 |
-
|
| 351 |
def ensure_model_present() -> Path:
|
| 352 |
for p in [DEFAULT_MODEL, *MODEL_FALLBACKS]:
|
| 353 |
if p.exists() and p.stat().st_size > 0:
|
|
@@ -379,16 +367,29 @@ except Exception as e:
|
|
| 379 |
st.error(f"Failed to load model: {model_path}\n{e}")
|
| 380 |
st.stop()
|
| 381 |
|
| 382 |
-
#
|
| 383 |
meta_path = MODELS_DIR / "meta.json"
|
| 384 |
if meta_path.exists():
|
| 385 |
try:
|
| 386 |
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
| 387 |
-
FEATURES = meta.get("features", FEATURES)
|
| 388 |
-
TARGET = meta.get("target", TARGET)
|
| 389 |
except Exception:
|
| 390 |
pass
|
| 391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 392 |
|
| 393 |
# =========================
|
| 394 |
# Session state
|
|
@@ -397,7 +398,7 @@ if "app_step" not in st.session_state: st.session_state.app_step = "intro"
|
|
| 397 |
if "results" not in st.session_state: st.session_state.results = {}
|
| 398 |
if "train_ranges" not in st.session_state: st.session_state.train_ranges = None
|
| 399 |
|
| 400 |
-
#
|
| 401 |
for k, v in {
|
| 402 |
"dev_ready": False,
|
| 403 |
"dev_file_loaded": False,
|
|
@@ -411,9 +412,8 @@ for k, v in {
|
|
| 411 |
}.items():
|
| 412 |
if k not in st.session_state: st.session_state[k] = v
|
| 413 |
|
| 414 |
-
|
| 415 |
# =========================
|
| 416 |
-
# Hero
|
| 417 |
# =========================
|
| 418 |
st.markdown(
|
| 419 |
f"""
|
|
@@ -428,7 +428,6 @@ st.markdown(
|
|
| 428 |
unsafe_allow_html=True,
|
| 429 |
)
|
| 430 |
|
| 431 |
-
|
| 432 |
# =========================
|
| 433 |
# INTRO
|
| 434 |
# =========================
|
|
@@ -437,24 +436,23 @@ if st.session_state.app_step == "intro":
|
|
| 437 |
st.markdown("This software is developed by *Smart Thinking AI-Solutions Team* to estimate UCS from drilling data.")
|
| 438 |
st.subheader("Expected Input Features (in Order)")
|
| 439 |
st.markdown(
|
| 440 |
-
"- Q, gpm — Flow rate (gallons per minute)\n"
|
| 441 |
-
"- SPP(psi) — Stand pipe pressure\n"
|
| 442 |
-
"- T (kft.lbf) — Torque (thousand foot-pounds)\n"
|
| 443 |
-
"- WOB (klbf) — Weight on bit\n"
|
| 444 |
"- ROP (ft/h) — Rate of penetration"
|
| 445 |
)
|
| 446 |
st.subheader("How It Works")
|
| 447 |
st.markdown(
|
| 448 |
"1. **Upload your data to build the case and preview the performance of our model.** \n"
|
| 449 |
"2. Click **Run Model** to compute metrics and plots. \n"
|
| 450 |
-
"3. Click **Proceed to Validation** to
|
| 451 |
-
"4. Click **Proceed to Prediction**
|
| 452 |
"5. Export results to Excel at any time."
|
| 453 |
)
|
| 454 |
-
if st.button("Start Showcase", type="primary"):
|
| 455 |
st.session_state.app_step = "dev"; st.rerun()
|
| 456 |
|
| 457 |
-
|
| 458 |
# =========================
|
| 459 |
# CASE BUILDING (Development)
|
| 460 |
# =========================
|
|
@@ -463,7 +461,6 @@ if st.session_state.app_step == "dev":
|
|
| 463 |
dev_label = "Upload Data (Excel)" if not st.session_state.dev_file_name else "Replace data (Excel)"
|
| 464 |
train_test_file = st.sidebar.file_uploader(dev_label, type=["xlsx","xls"], key="dev_upload")
|
| 465 |
|
| 466 |
-
# Detect new/changed file and PERSIST BYTES
|
| 467 |
if train_test_file is not None:
|
| 468 |
try:
|
| 469 |
file_bytes = train_test_file.getvalue(); size = len(file_bytes)
|
|
@@ -489,16 +486,18 @@ if st.session_state.app_step == "dev":
|
|
| 489 |
f"{st.session_state.dev_file_rows} rows × {st.session_state.dev_file_cols} cols"
|
| 490 |
)
|
| 491 |
|
| 492 |
-
|
| 493 |
-
if
|
| 494 |
st.session_state.dev_preview_request = True
|
|
|
|
| 495 |
run_btn = st.sidebar.button("Run Model", type="primary", use_container_width=True)
|
|
|
|
| 496 |
if st.sidebar.button("Proceed to Validation ▶", use_container_width=True):
|
| 497 |
-
st.session_state.app_step = "
|
| 498 |
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True):
|
| 499 |
-
st.session_state.app_step = "
|
| 500 |
|
| 501 |
-
#
|
| 502 |
st.subheader("Case Building (Development)")
|
| 503 |
if st.session_state.dev_ready:
|
| 504 |
st.success("Case has been built and results are displayed below.")
|
|
@@ -509,25 +508,21 @@ if st.session_state.app_step == "dev":
|
|
| 509 |
else:
|
| 510 |
st.write("**Upload your data to build a case, then run the model to review development performance.**")
|
| 511 |
|
| 512 |
-
# open preview dialog if requested
|
| 513 |
if st.session_state.dev_preview_request and st.session_state.dev_file_bytes:
|
| 514 |
_book = read_book_bytes(st.session_state.dev_file_bytes)
|
| 515 |
st.session_state.dev_previewed = True
|
| 516 |
st.session_state.dev_preview_request = False
|
| 517 |
preview_modal_dev(_book, FEATURES)
|
| 518 |
|
| 519 |
-
# Run
|
| 520 |
if run_btn and st.session_state.dev_file_bytes:
|
| 521 |
with st.status("Processing…", expanded=False) as status:
|
| 522 |
book = read_book_bytes(st.session_state.dev_file_bytes)
|
| 523 |
-
if not book:
|
| 524 |
-
status.update(label="Failed to read workbook.", state="error"); st.stop()
|
| 525 |
status.update(label="Workbook read ✓")
|
| 526 |
sh_train = find_sheet(book, ["Train","Training","training2","train","training"])
|
| 527 |
sh_test = find_sheet(book, ["Test","Testing","testing2","test","testing"])
|
| 528 |
if sh_train is None or sh_test is None:
|
| 529 |
status.update(label="Workbook must include Train/Training/training2 and Test/Testing/testing2.", state="error"); st.stop()
|
| 530 |
-
|
| 531 |
df_tr = book[sh_train].copy(); df_te = book[sh_test].copy()
|
| 532 |
if not (ensure_cols(df_tr, FEATURES + [TARGET]) and ensure_cols(df_te, FEATURES + [TARGET])):
|
| 533 |
status.update(label="Missing required columns.", state="error"); st.stop()
|
|
@@ -552,53 +547,49 @@ if st.session_state.app_step == "dev":
|
|
| 552 |
st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
|
| 553 |
|
| 554 |
st.session_state.dev_ready = True
|
| 555 |
-
status.update(label="Done ✓", state="complete")
|
| 556 |
-
st.rerun()
|
| 557 |
|
| 558 |
# Results
|
| 559 |
if ("Train" in st.session_state.results) or ("Test" in st.session_state.results):
|
| 560 |
tab1, tab2 = st.tabs(["Training", "Testing"])
|
| 561 |
|
| 562 |
-
def
|
| 563 |
c1,c2,c3 = st.columns(3)
|
| 564 |
c1.metric("R²", f"{metrics['R2']:.4f}")
|
| 565 |
c2.metric("RMSE", f"{metrics['RMSE']:.4f}")
|
| 566 |
c3.metric("MAE", f"{metrics['MAE']:.4f}")
|
| 567 |
|
| 568 |
-
#
|
| 569 |
-
|
| 570 |
-
with
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
config={"displayModeBar": False, "scrollZoom": True}
|
| 583 |
-
)
|
| 584 |
|
| 585 |
if "Train" in st.session_state.results:
|
| 586 |
with tab1:
|
| 587 |
-
|
| 588 |
if "Test" in st.session_state.results:
|
| 589 |
with tab2:
|
| 590 |
-
|
| 591 |
|
| 592 |
st.markdown("---")
|
| 593 |
-
|
| 594 |
-
sheets = {}
|
| 595 |
-
rows = []
|
| 596 |
if "Train" in st.session_state.results:
|
| 597 |
sheets["Train_with_pred"] = st.session_state.results["Train"]
|
| 598 |
rows.append({"Split":"Train", **{k:round(v,6) for k,v in st.session_state.results["metrics_train"].items()}})
|
| 599 |
if "Test" in st.session_state.results:
|
| 600 |
sheets["Test_with_pred"] = st.session_state.results["Test"]
|
| 601 |
rows.append({"Split":"Test", **{k:round(v,6) for k,v in st.session_state.results["metrics_test"].items()}})
|
|
|
|
| 602 |
summary_df = pd.DataFrame(rows) if rows else None
|
| 603 |
try:
|
| 604 |
buf = io.BytesIO()
|
|
@@ -616,12 +607,11 @@ if st.session_state.app_step == "dev":
|
|
| 616 |
except Exception as e:
|
| 617 |
st.warning(str(e))
|
| 618 |
|
| 619 |
-
|
| 620 |
# =========================
|
| 621 |
-
# VALIDATION (with
|
| 622 |
# =========================
|
| 623 |
-
if st.session_state.app_step == "
|
| 624 |
-
st.sidebar.header("Validate the
|
| 625 |
validation_file = st.sidebar.file_uploader("Upload Validation Excel", type=["xlsx","xls"], key="val_upload")
|
| 626 |
if validation_file is not None:
|
| 627 |
_book_tmp = read_book_bytes(validation_file.getvalue())
|
|
@@ -629,18 +619,19 @@ if st.session_state.app_step == "val":
|
|
| 629 |
first_df = next(iter(_book_tmp.values()))
|
| 630 |
st.sidebar.caption(f"**Data loaded:** {validation_file.name} • {first_df.shape[0]} rows × {first_df.shape[1]} cols")
|
| 631 |
|
| 632 |
-
|
|
|
|
| 633 |
_book = read_book_bytes(validation_file.getvalue())
|
| 634 |
-
|
| 635 |
|
| 636 |
predict_btn = st.sidebar.button("Predict", type="primary", use_container_width=True)
|
| 637 |
-
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True):
|
| 638 |
-
st.session_state.app_step = "pred"; st.rerun()
|
| 639 |
if st.sidebar.button("⬅ Back to Case Building", use_container_width=True):
|
| 640 |
st.session_state.app_step = "dev"; st.rerun()
|
|
|
|
|
|
|
| 641 |
|
| 642 |
-
st.subheader("
|
| 643 |
-
st.write("Upload a dataset
|
| 644 |
|
| 645 |
if predict_btn and validation_file is not None:
|
| 646 |
with st.status("Predicting…", expanded=False) as status:
|
|
@@ -649,8 +640,7 @@ if st.session_state.app_step == "val":
|
|
| 649 |
status.update(label="Workbook read ✓")
|
| 650 |
vname = find_sheet(vbook, ["Validation","Validate","validation2","Val","val"]) or list(vbook.keys())[0]
|
| 651 |
df_val = vbook[vname].copy()
|
| 652 |
-
if not ensure_cols(df_val, FEATURES):
|
| 653 |
-
status.update(label="Missing required columns.", state="error"); st.stop()
|
| 654 |
status.update(label="Columns validated ✓")
|
| 655 |
df_val["UCS_Pred"] = model.predict(df_val[FEATURES])
|
| 656 |
st.session_state.results["Validate"] = df_val
|
|
@@ -664,13 +654,11 @@ if st.session_state.app_step == "val":
|
|
| 664 |
offenders["Violations"] = pd.DataFrame(viol).loc[any_viol].apply(lambda r: ", ".join([c for c,v in r.items() if v]), axis=1)
|
| 665 |
offenders.index = offenders.index + 1; oor_table = offenders
|
| 666 |
|
| 667 |
-
metrics_val =
|
| 668 |
-
|
| 669 |
-
|
| 670 |
-
|
| 671 |
-
|
| 672 |
-
"MAE": mean_absolute_error(df_val[TARGET], df_val["UCS_Pred"])
|
| 673 |
-
}
|
| 674 |
st.session_state.results["metrics_val"] = metrics_val
|
| 675 |
st.session_state.results["summary_val"] = {
|
| 676 |
"n_points": len(df_val),
|
|
@@ -682,58 +670,50 @@ if st.session_state.app_step == "val":
|
|
| 682 |
status.update(label="Predictions ready ✓", state="complete")
|
| 683 |
|
| 684 |
if "Validate" in st.session_state.results:
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
metrics_val
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 689 |
|
|
|
|
| 690 |
if sv["oor_pct"] > 0:
|
| 691 |
st.warning("Some validation inputs fall outside the **training min–max** ranges. Interpret predictions with caution.")
|
| 692 |
-
|
| 693 |
-
if metrics_val is not None:
|
| 694 |
-
c1, c2, c3 = st.columns(3)
|
| 695 |
-
c1.metric("R²", f"{metrics_val['R2']:.4f}")
|
| 696 |
-
c2.metric("RMSE", f"{metrics_val['RMSE']:.4f}")
|
| 697 |
-
c3.metric("MAE", f"{metrics_val['MAE']:.4f}")
|
| 698 |
-
else:
|
| 699 |
-
c1, c2, c3 = st.columns(3)
|
| 700 |
-
c1.metric("# points", f"{sv['n_points']}")
|
| 701 |
-
c2.metric("Pred min", f"{sv['pred_min']:.2f}")
|
| 702 |
-
c3.metric("Pred max", f"{sv['pred_max']:.2f}")
|
| 703 |
-
|
| 704 |
-
sp_l, main, sp_r = st.columns([1, 8, 1])
|
| 705 |
-
with main:
|
| 706 |
-
col1, col2 = st.columns(2)
|
| 707 |
-
with col1:
|
| 708 |
-
if TARGET in dfv.columns:
|
| 709 |
-
st.plotly_chart(
|
| 710 |
-
cross_plot_interactive(dfv[TARGET], dfv["UCS_Pred"]),
|
| 711 |
-
use_container_width=False,
|
| 712 |
-
config={"displayModeBar": False, "scrollZoom": True}
|
| 713 |
-
)
|
| 714 |
-
else:
|
| 715 |
-
st.info("Actual UCS values are not available in the validation data. Cross-plot cannot be generated.")
|
| 716 |
-
with col2:
|
| 717 |
-
st.plotly_chart(
|
| 718 |
-
depth_or_index_track_interactive(dfv, include_actual=(TARGET in dfv.columns)),
|
| 719 |
-
use_container_width=False,
|
| 720 |
-
config={"displayModeBar": False, "scrollZoom": True}
|
| 721 |
-
)
|
| 722 |
-
|
| 723 |
if oor_table is not None:
|
| 724 |
st.write("*Out-of-range rows (vs. Training min–max):*")
|
| 725 |
st.dataframe(oor_table, use_container_width=True)
|
| 726 |
|
| 727 |
st.markdown("---")
|
| 728 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 729 |
try:
|
| 730 |
buf = io.BytesIO()
|
| 731 |
with pd.ExcelWriter(buf, engine="openpyxl") as xw:
|
| 732 |
-
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
|
| 736 |
-
)
|
| 737 |
st.download_button(
|
| 738 |
"Export Validation Results to Excel",
|
| 739 |
data=buf.getvalue(),
|
|
@@ -743,12 +723,11 @@ if st.session_state.app_step == "val":
|
|
| 743 |
except Exception as e:
|
| 744 |
st.warning(str(e))
|
| 745 |
|
| 746 |
-
|
| 747 |
# =========================
|
| 748 |
-
# PREDICTION (no
|
| 749 |
# =========================
|
| 750 |
-
if st.session_state.app_step == "
|
| 751 |
-
st.sidebar.header("Prediction")
|
| 752 |
pred_file = st.sidebar.file_uploader("Upload Prediction Excel", type=["xlsx","xls"], key="pred_upload")
|
| 753 |
if pred_file is not None:
|
| 754 |
_book_tmp = read_book_bytes(pred_file.getvalue())
|
|
@@ -756,88 +735,75 @@ if st.session_state.app_step == "pred":
|
|
| 756 |
first_df = next(iter(_book_tmp.values()))
|
| 757 |
st.sidebar.caption(f"**Data loaded:** {pred_file.name} • {first_df.shape[0]} rows × {first_df.shape[1]} cols")
|
| 758 |
|
| 759 |
-
|
|
|
|
| 760 |
_book = read_book_bytes(pred_file.getvalue())
|
| 761 |
-
|
| 762 |
|
| 763 |
-
|
| 764 |
-
if st.sidebar.button("⬅ Back to
|
| 765 |
-
st.session_state.app_step = "
|
| 766 |
|
| 767 |
st.subheader("Prediction")
|
| 768 |
-
st.write("Upload a dataset
|
| 769 |
|
| 770 |
-
if
|
| 771 |
with st.status("Predicting…", expanded=False) as status:
|
| 772 |
pbook = read_book_bytes(pred_file.getvalue())
|
| 773 |
-
if not pbook: status.update(label="Could not read the
|
| 774 |
status.update(label="Workbook read ✓")
|
| 775 |
-
pname =
|
| 776 |
dfp = pbook[pname].copy()
|
| 777 |
-
if not ensure_cols(dfp, FEATURES):
|
| 778 |
-
status.update(label="Missing required columns.", state="error"); st.stop()
|
| 779 |
status.update(label="Columns validated ✓")
|
| 780 |
dfp["UCS_Pred"] = model.predict(dfp[FEATURES])
|
| 781 |
st.session_state.results["PredictOnly"] = dfp
|
| 782 |
|
| 783 |
-
ranges = st.session_state.train_ranges
|
| 784 |
-
oor_pct = None
|
| 785 |
if ranges:
|
| 786 |
-
|
| 787 |
-
any_viol = pd.DataFrame(viol).any(axis=1)
|
| 788 |
oor_pct = float(any_viol.mean()*100.0)
|
| 789 |
|
| 790 |
-
st.session_state.results["
|
| 791 |
"n_points": len(dfp),
|
| 792 |
"pred_min": float(dfp["UCS_Pred"].min()),
|
| 793 |
"pred_max": float(dfp["UCS_Pred"].max()),
|
| 794 |
"pred_mean": float(dfp["UCS_Pred"].mean()),
|
| 795 |
"pred_std": float(dfp["UCS_Pred"].std(ddof=0)),
|
| 796 |
-
"oor_pct": oor_pct
|
| 797 |
}
|
| 798 |
status.update(label="Predictions ready ✓", state="complete")
|
| 799 |
|
| 800 |
if "PredictOnly" in st.session_state.results:
|
| 801 |
dfp = st.session_state.results["PredictOnly"]
|
| 802 |
-
|
| 803 |
-
|
| 804 |
-
# summary table (
|
| 805 |
-
|
| 806 |
-
|
| 807 |
-
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
| 813 |
-
|
| 814 |
-
|
| 815 |
-
|
| 816 |
-
|
| 817 |
-
|
| 818 |
-
|
| 819 |
-
|
| 820 |
-
|
| 821 |
-
|
| 822 |
-
|
| 823 |
-
"<div class='help-foot'>* OOR% = percentage of rows with any input feature outside the "
|
| 824 |
-
"training min–max range (computed when Case Building has been run).</div>",
|
| 825 |
-
unsafe_allow_html=True
|
| 826 |
-
)
|
| 827 |
-
with col2:
|
| 828 |
-
st.plotly_chart(
|
| 829 |
-
depth_or_index_track_interactive(dfp, include_actual=False),
|
| 830 |
-
use_container_width=False,
|
| 831 |
-
config={"displayModeBar": False, "scrollZoom": True}
|
| 832 |
-
)
|
| 833 |
|
| 834 |
st.markdown("---")
|
| 835 |
-
# export
|
| 836 |
try:
|
| 837 |
buf = io.BytesIO()
|
| 838 |
with pd.ExcelWriter(buf, engine="openpyxl") as xw:
|
| 839 |
-
dfp.to_excel(xw, sheet_name="
|
| 840 |
-
pd.DataFrame([
|
| 841 |
st.download_button(
|
| 842 |
"Export Prediction Results to Excel",
|
| 843 |
data=buf.getvalue(),
|
|
@@ -847,7 +813,6 @@ if st.session_state.app_step == "pred":
|
|
| 847 |
except Exception as e:
|
| 848 |
st.warning(str(e))
|
| 849 |
|
| 850 |
-
|
| 851 |
# =========================
|
| 852 |
# Footer
|
| 853 |
# =========================
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
import joblib
|
| 8 |
|
| 9 |
+
# matplotlib only for PREVIEW modal (static thumbnails)
|
| 10 |
import matplotlib
|
| 11 |
matplotlib.use("Agg")
|
| 12 |
import matplotlib.pyplot as plt
|
|
|
|
| 14 |
import plotly.graph_objects as go
|
| 15 |
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
|
| 16 |
|
|
|
|
| 17 |
# =========================
|
| 18 |
# Defaults / Constants
|
| 19 |
# =========================
|
| 20 |
FEATURES = ["Q, gpm", "SPP(psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
|
| 21 |
TARGET = "UCS"
|
|
|
|
| 22 |
MODELS_DIR = Path("models")
|
| 23 |
DEFAULT_MODEL = MODELS_DIR / "ucs_rf.joblib"
|
| 24 |
MODEL_FALLBACKS = [MODELS_DIR / "model.joblib", MODELS_DIR / "model.pkl"]
|
| 25 |
|
| 26 |
COLORS = {"pred": "#1f77b4", "actual": "#f2b702", "ref": "#5a5a5a"}
|
| 27 |
|
| 28 |
+
# ——— Exact pixel sizes (match preview strip & square cross-plot) ———
|
| 29 |
+
CROSS_W, CROSS_H = 420, 420 # square cross-plot (px)
|
| 30 |
+
TRACK_W, TRACK_H = 220, 700 # match preview strip look (px)
|
| 31 |
+
FONT_SIZE = 13
|
| 32 |
|
| 33 |
# =========================
|
| 34 |
# Page / Theme
|
| 35 |
# =========================
|
| 36 |
st.set_page_config(page_title="ST_GeoMech_UCS", page_icon="logo.png", layout="wide")
|
| 37 |
+
|
| 38 |
+
# ---- CSS ----
|
| 39 |
+
st.markdown("<style>header, footer{visibility:hidden !important;}</style>", unsafe_allow_html=True)
|
| 40 |
st.markdown(
|
| 41 |
"""
|
| 42 |
<style>
|
| 43 |
+
.stApp { background: #FFFFFF; }
|
| 44 |
+
section[data-testid="stSidebar"] { background: #F6F9FC; }
|
| 45 |
+
.block-container { padding-top: .5rem; padding-bottom: .5rem; }
|
| 46 |
+
/* primary buttons */
|
| 47 |
+
.stButton>button{ background:#007bff; color:#fff; font-weight:bold; border-radius:8px; border:none; padding:10px 24px; }
|
| 48 |
+
.stButton>button:hover{ background:#0056b3; }
|
| 49 |
+
/* hero */
|
| 50 |
+
.st-hero { display:flex; align-items:center; gap:16px; padding-top: 4px; }
|
| 51 |
+
.st-hero .brand { width:110px; height:110px; object-fit:contain; }
|
| 52 |
+
.st-hero h1 { margin:0; line-height:1.05; }
|
| 53 |
+
.st-hero .tagline { margin:2px 0 0 2px; color:#6b7280; font-size:1.05rem; font-style:italic; }
|
| 54 |
+
/* tighter top margin for first block */
|
| 55 |
+
[data-testid="stBlock"]{ margin-top:0 !important; }
|
|
|
|
|
|
|
| 56 |
</style>
|
| 57 |
""",
|
| 58 |
unsafe_allow_html=True
|
| 59 |
)
|
| 60 |
|
|
|
|
| 61 |
# =========================
|
| 62 |
+
# Password gate (define first, then call)
|
| 63 |
# =========================
|
| 64 |
def inline_logo(path="logo.png") -> str:
|
| 65 |
try:
|
| 66 |
p = Path(path)
|
| 67 |
+
if not p.exists(): return ""
|
|
|
|
| 68 |
return f"data:image/png;base64,{base64.b64encode(p.read_bytes()).decode('ascii')}"
|
| 69 |
except Exception:
|
| 70 |
return ""
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
def add_password_gate() -> bool:
|
| 73 |
+
"""Branded access screen until correct APP_PASSWORD in Secrets/Env is entered."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
try:
|
| 75 |
required = st.secrets.get("APP_PASSWORD", "")
|
| 76 |
except Exception:
|
|
|
|
| 109 |
</div>
|
| 110 |
</div>
|
| 111 |
<div style="font-size:1.25rem;font-weight:700;margin:8px 0 4px 0;">Protected</div>
|
| 112 |
+
<div style="color:#6b7280;margin-bottom:14px;">
|
| 113 |
+
Please enter your access key to continue.
|
| 114 |
+
</div>
|
| 115 |
""",
|
| 116 |
unsafe_allow_html=True
|
| 117 |
)
|
| 118 |
+
|
| 119 |
pwd = st.text_input("Access key", type="password", placeholder="••••••••")
|
| 120 |
+
col1, _ = st.columns([1, 3])
|
| 121 |
+
with col1:
|
| 122 |
+
if st.button("Unlock", type="primary", use_container_width=True):
|
| 123 |
+
if pwd == required:
|
| 124 |
+
st.session_state.auth_ok = True
|
| 125 |
+
st.rerun()
|
| 126 |
+
else:
|
| 127 |
+
st.error("Incorrect key. Please try again.")
|
| 128 |
|
| 129 |
+
st.stop()
|
| 130 |
|
| 131 |
+
# call it now
|
| 132 |
add_password_gate()
|
| 133 |
|
|
|
|
| 134 |
# =========================
|
| 135 |
+
# Helpers
|
| 136 |
# =========================
|
| 137 |
try:
|
| 138 |
dialog = st.dialog
|
|
|
|
| 169 |
|
| 170 |
def read_book_bytes(data_bytes: bytes):
|
| 171 |
if not data_bytes: return {}
|
| 172 |
+
try: return parse_excel(data_bytes)
|
|
|
|
| 173 |
except Exception as e:
|
| 174 |
+
st.error(f"Failed to read Excel: {e}"); return {}
|
|
|
|
| 175 |
|
| 176 |
def find_sheet(book, names):
|
| 177 |
low2orig = {k.lower(): k for k in book.keys()}
|
| 178 |
for nm in names:
|
| 179 |
+
if nm.lower() in low2orig: return low2orig[nm.lower()]
|
|
|
|
| 180 |
return None
|
| 181 |
|
| 182 |
+
# ---------- Interactive plotting (fixed sizes, full outline, crisp fonts) ----------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
def cross_plot_interactive(actual, pred):
|
| 184 |
a = pd.Series(actual).astype(float)
|
| 185 |
p = pd.Series(pred).astype(float)
|
|
|
|
| 203 |
|
| 204 |
fig.update_layout(
|
| 205 |
paper_bgcolor="#ffffff", plot_bgcolor="#ffffff",
|
| 206 |
+
margin=dict(l=64, r=16, t=10, b=48),
|
| 207 |
+
hovermode="closest", font=dict(size=FONT_SIZE),
|
| 208 |
width=CROSS_W, height=CROSS_H
|
| 209 |
)
|
| 210 |
fig.update_xaxes(
|
| 211 |
+
title_text="<b>Actual UCS</b>",
|
| 212 |
+
range=[x0, x1], ticks="outside",
|
| 213 |
+
showline=True, linewidth=1.2, linecolor="#444", mirror="allticks",
|
| 214 |
+
showgrid=True, gridcolor="rgba(0,0,0,0.12)", tickformat=",.0f",
|
| 215 |
+
automargin=True
|
| 216 |
)
|
| 217 |
fig.update_yaxes(
|
| 218 |
+
title_text="<b>Predicted UCS</b>",
|
| 219 |
+
range=[x0, x1], ticks="outside",
|
| 220 |
+
showline=True, linewidth=1.2, linecolor="#444", mirror="allticks",
|
| 221 |
+
showgrid=True, gridcolor="rgba(0,0,0,0.12)", tickformat=",.0f",
|
| 222 |
+
scaleanchor="x", scaleratio=1, automargin=True
|
| 223 |
)
|
|
|
|
| 224 |
return fig
|
| 225 |
|
| 226 |
def depth_or_index_track_interactive(df, include_actual=True):
|
| 227 |
depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
|
| 228 |
if depth_col is not None:
|
| 229 |
y = df[depth_col]; y_label = depth_col
|
| 230 |
+
autorange = "reversed"
|
| 231 |
else:
|
| 232 |
y = np.arange(1, len(df) + 1); y_label = "Point Index"
|
| 233 |
+
autorange = "reversed"
|
| 234 |
|
| 235 |
fig = go.Figure()
|
| 236 |
fig.add_trace(go.Scatter(
|
|
|
|
| 249 |
|
| 250 |
fig.update_layout(
|
| 251 |
paper_bgcolor="#ffffff", plot_bgcolor="#ffffff",
|
| 252 |
+
margin=dict(l=72, r=18, t=36, b=48),
|
| 253 |
+
hovermode="closest", font=dict(size=FONT_SIZE),
|
| 254 |
+
legend=dict(
|
| 255 |
+
x=0.98, y=0.05, xanchor="right", yanchor="bottom",
|
| 256 |
+
bgcolor="rgba(255,255,255,0.75)", bordercolor="#cccccc", borderwidth=1
|
| 257 |
+
),
|
| 258 |
legend_title_text="",
|
| 259 |
width=TRACK_W, height=TRACK_H
|
| 260 |
)
|
| 261 |
fig.update_xaxes(
|
| 262 |
+
title_text="<b>UCS</b>", side="top",
|
| 263 |
+
ticks="outside", showline=True, linewidth=1.2, linecolor="#444", mirror="allticks",
|
| 264 |
+
showgrid=True, gridcolor="rgba(0,0,0,0.12)", tickformat=",.0f",
|
| 265 |
+
automargin=True
|
| 266 |
)
|
| 267 |
fig.update_yaxes(
|
| 268 |
+
title_text=f"<b>{y_label}</b>", autorange=autorange,
|
| 269 |
+
ticks="outside", showline=True, linewidth=1.2, linecolor="#444", mirror="allticks",
|
| 270 |
showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
|
| 271 |
)
|
|
|
|
| 272 |
return fig
|
| 273 |
|
|
|
|
| 274 |
# ---------- Preview modal helpers (matplotlib static) ----------
|
| 275 |
def make_index_tracks(df: pd.DataFrame, cols: list[str]):
|
| 276 |
cols = [c for c in cols if c in df.columns]
|
|
|
|
| 289 |
ax.set_xlabel(col)
|
| 290 |
ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
|
| 291 |
ax.grid(True, linestyle=":", alpha=0.3)
|
| 292 |
+
# draw full box
|
| 293 |
+
for spine in ax.spines.values(): spine.set_visible(True)
|
| 294 |
axes[0].set_ylabel("Point Index")
|
| 295 |
return fig
|
| 296 |
|
|
|
|
| 314 |
if not tabs:
|
| 315 |
first_name = list(book.keys())[0]
|
| 316 |
tabs = [first_name]; data = [book[first_name]]
|
| 317 |
+
st.write("Use the tabs to switch between Train/Test views (if available).")
|
| 318 |
t_objs = st.tabs(tabs)
|
| 319 |
for t, df in zip(t_objs, data):
|
| 320 |
with t:
|
|
|
|
| 323 |
with t2: st.dataframe(stats_table(df, FEATURES), use_container_width=True)
|
| 324 |
|
| 325 |
@dialog("Preview data")
|
| 326 |
+
def preview_modal_val(book: dict[str, pd.DataFrame], feature_cols: list[str]):
|
| 327 |
if not book:
|
| 328 |
st.info("No data loaded yet."); return
|
| 329 |
+
vname = find_sheet(book, ["Validation","Validate","validation2","Val","val"]) or list(book.keys())[0]
|
| 330 |
df = book[vname]
|
| 331 |
t1, t2 = st.tabs(["Tracks", "Summary"])
|
| 332 |
with t1: st.pyplot(make_index_tracks(df, feature_cols), use_container_width=True)
|
| 333 |
with t2: st.dataframe(stats_table(df, feature_cols), use_container_width=True)
|
| 334 |
|
|
|
|
| 335 |
# =========================
|
| 336 |
# Model presence
|
| 337 |
# =========================
|
| 338 |
MODEL_URL = _get_model_url()
|
|
|
|
| 339 |
def ensure_model_present() -> Path:
|
| 340 |
for p in [DEFAULT_MODEL, *MODEL_FALLBACKS]:
|
| 341 |
if p.exists() and p.stat().st_size > 0:
|
|
|
|
| 367 |
st.error(f"Failed to load model: {model_path}\n{e}")
|
| 368 |
st.stop()
|
| 369 |
|
| 370 |
+
# Meta override/infer
|
| 371 |
meta_path = MODELS_DIR / "meta.json"
|
| 372 |
if meta_path.exists():
|
| 373 |
try:
|
| 374 |
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
| 375 |
+
FEATURES = meta.get("features", FEATURES); TARGET = meta.get("target", TARGET)
|
|
|
|
| 376 |
except Exception:
|
| 377 |
pass
|
| 378 |
+
else:
|
| 379 |
+
def infer_features_from_model(m):
|
| 380 |
+
try:
|
| 381 |
+
if hasattr(m, "feature_names_in_") and len(getattr(m, "feature_names_in_")):
|
| 382 |
+
return [str(x) for x in m.feature_names_in_]
|
| 383 |
+
except Exception: pass
|
| 384 |
+
try:
|
| 385 |
+
if hasattr(m, "steps") and len(m.steps):
|
| 386 |
+
last = m.steps[-1][1]
|
| 387 |
+
if hasattr(last, "feature_names_in_") and len(last.feature_names_in_):
|
| 388 |
+
return [str(x) for x in last.feature_names_in_]
|
| 389 |
+
except Exception: pass
|
| 390 |
+
return None
|
| 391 |
+
infer = infer_features_from_model(model)
|
| 392 |
+
if infer: FEATURES = infer
|
| 393 |
|
| 394 |
# =========================
|
| 395 |
# Session state
|
|
|
|
| 398 |
if "results" not in st.session_state: st.session_state.results = {}
|
| 399 |
if "train_ranges" not in st.session_state: st.session_state.train_ranges = None
|
| 400 |
|
| 401 |
+
# Dev page file state
|
| 402 |
for k, v in {
|
| 403 |
"dev_ready": False,
|
| 404 |
"dev_file_loaded": False,
|
|
|
|
| 412 |
}.items():
|
| 413 |
if k not in st.session_state: st.session_state[k] = v
|
| 414 |
|
|
|
|
| 415 |
# =========================
|
| 416 |
+
# Hero
|
| 417 |
# =========================
|
| 418 |
st.markdown(
|
| 419 |
f"""
|
|
|
|
| 428 |
unsafe_allow_html=True,
|
| 429 |
)
|
| 430 |
|
|
|
|
| 431 |
# =========================
|
| 432 |
# INTRO
|
| 433 |
# =========================
|
|
|
|
| 436 |
st.markdown("This software is developed by *Smart Thinking AI-Solutions Team* to estimate UCS from drilling data.")
|
| 437 |
st.subheader("Expected Input Features (in Order)")
|
| 438 |
st.markdown(
|
| 439 |
+
"- Q, gpm — Flow rate (gallons per minute) \n"
|
| 440 |
+
"- SPP(psi) — Stand pipe pressure \n"
|
| 441 |
+
"- T (kft.lbf) — Torque (thousand foot-pounds) \n"
|
| 442 |
+
"- WOB (klbf) — Weight on bit \n"
|
| 443 |
"- ROP (ft/h) — Rate of penetration"
|
| 444 |
)
|
| 445 |
st.subheader("How It Works")
|
| 446 |
st.markdown(
|
| 447 |
"1. **Upload your data to build the case and preview the performance of our model.** \n"
|
| 448 |
"2. Click **Run Model** to compute metrics and plots. \n"
|
| 449 |
+
"3. Click **Proceed to Validation** to evaluate on a new dataset. \n"
|
| 450 |
+
"4. Click **Proceed to Prediction** for prediction-only (no actual UCS). \n"
|
| 451 |
"5. Export results to Excel at any time."
|
| 452 |
)
|
| 453 |
+
if st.button("Start Showcase", type="primary", key="start_showcase"):
|
| 454 |
st.session_state.app_step = "dev"; st.rerun()
|
| 455 |
|
|
|
|
| 456 |
# =========================
|
| 457 |
# CASE BUILDING (Development)
|
| 458 |
# =========================
|
|
|
|
| 461 |
dev_label = "Upload Data (Excel)" if not st.session_state.dev_file_name else "Replace data (Excel)"
|
| 462 |
train_test_file = st.sidebar.file_uploader(dev_label, type=["xlsx","xls"], key="dev_upload")
|
| 463 |
|
|
|
|
| 464 |
if train_test_file is not None:
|
| 465 |
try:
|
| 466 |
file_bytes = train_test_file.getvalue(); size = len(file_bytes)
|
|
|
|
| 486 |
f"{st.session_state.dev_file_rows} rows × {st.session_state.dev_file_cols} cols"
|
| 487 |
)
|
| 488 |
|
| 489 |
+
preview_btn = st.sidebar.button("Preview data", use_container_width=True, disabled=not st.session_state.dev_file_loaded)
|
| 490 |
+
if preview_btn and st.session_state.dev_file_loaded:
|
| 491 |
st.session_state.dev_preview_request = True
|
| 492 |
+
|
| 493 |
run_btn = st.sidebar.button("Run Model", type="primary", use_container_width=True)
|
| 494 |
+
# Always show navigation buttons (enabled feel)
|
| 495 |
if st.sidebar.button("Proceed to Validation ▶", use_container_width=True):
|
| 496 |
+
st.session_state.app_step = "validate"; st.rerun()
|
| 497 |
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True):
|
| 498 |
+
st.session_state.app_step = "predict"; st.rerun()
|
| 499 |
|
| 500 |
+
# helper bar at top
|
| 501 |
st.subheader("Case Building (Development)")
|
| 502 |
if st.session_state.dev_ready:
|
| 503 |
st.success("Case has been built and results are displayed below.")
|
|
|
|
| 508 |
else:
|
| 509 |
st.write("**Upload your data to build a case, then run the model to review development performance.**")
|
| 510 |
|
|
|
|
| 511 |
if st.session_state.dev_preview_request and st.session_state.dev_file_bytes:
|
| 512 |
_book = read_book_bytes(st.session_state.dev_file_bytes)
|
| 513 |
st.session_state.dev_previewed = True
|
| 514 |
st.session_state.dev_preview_request = False
|
| 515 |
preview_modal_dev(_book, FEATURES)
|
| 516 |
|
|
|
|
| 517 |
if run_btn and st.session_state.dev_file_bytes:
|
| 518 |
with st.status("Processing…", expanded=False) as status:
|
| 519 |
book = read_book_bytes(st.session_state.dev_file_bytes)
|
| 520 |
+
if not book: status.update(label="Failed to read workbook.", state="error"); st.stop()
|
|
|
|
| 521 |
status.update(label="Workbook read ✓")
|
| 522 |
sh_train = find_sheet(book, ["Train","Training","training2","train","training"])
|
| 523 |
sh_test = find_sheet(book, ["Test","Testing","testing2","test","testing"])
|
| 524 |
if sh_train is None or sh_test is None:
|
| 525 |
status.update(label="Workbook must include Train/Training/training2 and Test/Testing/testing2.", state="error"); st.stop()
|
|
|
|
| 526 |
df_tr = book[sh_train].copy(); df_te = book[sh_test].copy()
|
| 527 |
if not (ensure_cols(df_tr, FEATURES + [TARGET]) and ensure_cols(df_te, FEATURES + [TARGET])):
|
| 528 |
status.update(label="Missing required columns.", state="error"); st.stop()
|
|
|
|
| 547 |
st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
|
| 548 |
|
| 549 |
st.session_state.dev_ready = True
|
| 550 |
+
status.update(label="Done ✓", state="complete"); st.rerun()
|
|
|
|
| 551 |
|
| 552 |
# Results
|
| 553 |
if ("Train" in st.session_state.results) or ("Test" in st.session_state.results):
|
| 554 |
tab1, tab2 = st.tabs(["Training", "Testing"])
|
| 555 |
|
| 556 |
+
def dev_block(df, metrics):
|
| 557 |
c1,c2,c3 = st.columns(3)
|
| 558 |
c1.metric("R²", f"{metrics['R2']:.4f}")
|
| 559 |
c2.metric("RMSE", f"{metrics['RMSE']:.4f}")
|
| 560 |
c3.metric("MAE", f"{metrics['MAE']:.4f}")
|
| 561 |
|
| 562 |
+
# layout: spacer | crossplot | spacer | track
|
| 563 |
+
sp_left, col_cross, sp_mid, col_track, sp_right = st.columns([0.2, 0.6, 0.05, 0.35, 0.2])
|
| 564 |
+
with col_cross:
|
| 565 |
+
st.plotly_chart(
|
| 566 |
+
cross_plot_interactive(df[TARGET], df["UCS_Pred"]),
|
| 567 |
+
use_container_width=False,
|
| 568 |
+
config={"displayModeBar": False, "scrollZoom": True}
|
| 569 |
+
)
|
| 570 |
+
with col_track:
|
| 571 |
+
st.plotly_chart(
|
| 572 |
+
depth_or_index_track_interactive(df, include_actual=True),
|
| 573 |
+
use_container_width=False,
|
| 574 |
+
config={"displayModeBar": False, "scrollZoom": True}
|
| 575 |
+
)
|
|
|
|
|
|
|
| 576 |
|
| 577 |
if "Train" in st.session_state.results:
|
| 578 |
with tab1:
|
| 579 |
+
dev_block(st.session_state.results["Train"], st.session_state.results["metrics_train"])
|
| 580 |
if "Test" in st.session_state.results:
|
| 581 |
with tab2:
|
| 582 |
+
dev_block(st.session_state.results["Test"], st.session_state.results["metrics_test"])
|
| 583 |
|
| 584 |
st.markdown("---")
|
| 585 |
+
sheets = {}; rows = []
|
|
|
|
|
|
|
| 586 |
if "Train" in st.session_state.results:
|
| 587 |
sheets["Train_with_pred"] = st.session_state.results["Train"]
|
| 588 |
rows.append({"Split":"Train", **{k:round(v,6) for k,v in st.session_state.results["metrics_train"].items()}})
|
| 589 |
if "Test" in st.session_state.results:
|
| 590 |
sheets["Test_with_pred"] = st.session_state.results["Test"]
|
| 591 |
rows.append({"Split":"Test", **{k:round(v,6) for k,v in st.session_state.results["metrics_test"].items()}})
|
| 592 |
+
|
| 593 |
summary_df = pd.DataFrame(rows) if rows else None
|
| 594 |
try:
|
| 595 |
buf = io.BytesIO()
|
|
|
|
| 607 |
except Exception as e:
|
| 608 |
st.warning(str(e))
|
| 609 |
|
|
|
|
| 610 |
# =========================
|
| 611 |
+
# VALIDATION (with actual UCS)
|
| 612 |
# =========================
|
| 613 |
+
if st.session_state.app_step == "validate":
|
| 614 |
+
st.sidebar.header("Validate the Model")
|
| 615 |
validation_file = st.sidebar.file_uploader("Upload Validation Excel", type=["xlsx","xls"], key="val_upload")
|
| 616 |
if validation_file is not None:
|
| 617 |
_book_tmp = read_book_bytes(validation_file.getvalue())
|
|
|
|
| 619 |
first_df = next(iter(_book_tmp.values()))
|
| 620 |
st.sidebar.caption(f"**Data loaded:** {validation_file.name} • {first_df.shape[0]} rows × {first_df.shape[1]} cols")
|
| 621 |
|
| 622 |
+
preview_val_btn = st.sidebar.button("Preview data", use_container_width=True, disabled=(validation_file is None))
|
| 623 |
+
if preview_val_btn and validation_file is not None:
|
| 624 |
_book = read_book_bytes(validation_file.getvalue())
|
| 625 |
+
preview_modal_val(_book, FEATURES)
|
| 626 |
|
| 627 |
predict_btn = st.sidebar.button("Predict", type="primary", use_container_width=True)
|
|
|
|
|
|
|
| 628 |
if st.sidebar.button("⬅ Back to Case Building", use_container_width=True):
|
| 629 |
st.session_state.app_step = "dev"; st.rerun()
|
| 630 |
+
if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True):
|
| 631 |
+
st.session_state.app_step = "predict"; st.rerun()
|
| 632 |
|
| 633 |
+
st.subheader("Validate the Model")
|
| 634 |
+
st.write("Upload a dataset containing the same feature columns and **UCS** to evaluate model performance.")
|
| 635 |
|
| 636 |
if predict_btn and validation_file is not None:
|
| 637 |
with st.status("Predicting…", expanded=False) as status:
|
|
|
|
| 640 |
status.update(label="Workbook read ✓")
|
| 641 |
vname = find_sheet(vbook, ["Validation","Validate","validation2","Val","val"]) or list(vbook.keys())[0]
|
| 642 |
df_val = vbook[vname].copy()
|
| 643 |
+
if not ensure_cols(df_val, FEATURES + [TARGET]): status.update(label="Missing required columns.", state="error"); st.stop()
|
|
|
|
| 644 |
status.update(label="Columns validated ✓")
|
| 645 |
df_val["UCS_Pred"] = model.predict(df_val[FEATURES])
|
| 646 |
st.session_state.results["Validate"] = df_val
|
|
|
|
| 654 |
offenders["Violations"] = pd.DataFrame(viol).loc[any_viol].apply(lambda r: ", ".join([c for c,v in r.items() if v]), axis=1)
|
| 655 |
offenders.index = offenders.index + 1; oor_table = offenders
|
| 656 |
|
| 657 |
+
metrics_val = {
|
| 658 |
+
"R2": r2_score(df_val[TARGET], df_val["UCS_Pred"]),
|
| 659 |
+
"RMSE": rmse(df_val[TARGET], df_val["UCS_Pred"]),
|
| 660 |
+
"MAE": mean_absolute_error(df_val[TARGET], df_val["UCS_Pred"])
|
| 661 |
+
}
|
|
|
|
|
|
|
| 662 |
st.session_state.results["metrics_val"] = metrics_val
|
| 663 |
st.session_state.results["summary_val"] = {
|
| 664 |
"n_points": len(df_val),
|
|
|
|
| 670 |
status.update(label="Predictions ready ✓", state="complete")
|
| 671 |
|
| 672 |
if "Validate" in st.session_state.results:
|
| 673 |
+
metrics_val = st.session_state.results.get("metrics_val", {})
|
| 674 |
+
c1, c2, c3 = st.columns(3)
|
| 675 |
+
c1.metric("R²", f"{metrics_val.get('R2',0):.4f}")
|
| 676 |
+
c2.metric("RMSE", f"{metrics_val.get('RMSE',0):.4f}")
|
| 677 |
+
c3.metric("MAE", f"{metrics_val.get('MAE',0):.4f}")
|
| 678 |
+
|
| 679 |
+
left_sp, col_cross, sp_mid, col_track, sp_right = st.columns([0.2, 0.6, 0.05, 0.35, 0.2])
|
| 680 |
+
with col_cross:
|
| 681 |
+
st.plotly_chart(
|
| 682 |
+
cross_plot_interactive(
|
| 683 |
+
st.session_state.results["Validate"][TARGET],
|
| 684 |
+
st.session_state.results["Validate"]["UCS_Pred"]),
|
| 685 |
+
use_container_width=False,
|
| 686 |
+
config={"displayModeBar": False, "scrollZoom": True}
|
| 687 |
+
)
|
| 688 |
+
with col_track:
|
| 689 |
+
st.plotly_chart(
|
| 690 |
+
depth_or_index_track_interactive(
|
| 691 |
+
st.session_state.results["Validate"], include_actual=True),
|
| 692 |
+
use_container_width=False,
|
| 693 |
+
config={"displayModeBar": False, "scrollZoom": True}
|
| 694 |
+
)
|
| 695 |
|
| 696 |
+
sv = st.session_state.results["summary_val"]; oor_table = st.session_state.results.get("oor_table")
|
| 697 |
if sv["oor_pct"] > 0:
|
| 698 |
st.warning("Some validation inputs fall outside the **training min–max** ranges. Interpret predictions with caution.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 699 |
if oor_table is not None:
|
| 700 |
st.write("*Out-of-range rows (vs. Training min–max):*")
|
| 701 |
st.dataframe(oor_table, use_container_width=True)
|
| 702 |
|
| 703 |
st.markdown("---")
|
| 704 |
+
sheets = {"Validate_with_pred": st.session_state.results["Validate"]}
|
| 705 |
+
rows = []
|
| 706 |
+
for name, key in [("Train","metrics_train"), ("Test","metrics_test"), ("Validate","metrics_val")]:
|
| 707 |
+
m = st.session_state.results.get(key)
|
| 708 |
+
if m: rows.append({"Split": name, **{k: round(v,6) for k,v in m.items()}})
|
| 709 |
+
summary_df = pd.DataFrame(rows) if rows else None
|
| 710 |
try:
|
| 711 |
buf = io.BytesIO()
|
| 712 |
with pd.ExcelWriter(buf, engine="openpyxl") as xw:
|
| 713 |
+
for name, frame in sheets.items():
|
| 714 |
+
frame.to_excel(xw, sheet_name=name[:31], index=False)
|
| 715 |
+
if summary_df is not None:
|
| 716 |
+
summary_df.to_excel(xw, sheet_name="Summary", index=False)
|
|
|
|
| 717 |
st.download_button(
|
| 718 |
"Export Validation Results to Excel",
|
| 719 |
data=buf.getvalue(),
|
|
|
|
| 723 |
except Exception as e:
|
| 724 |
st.warning(str(e))
|
| 725 |
|
|
|
|
| 726 |
# =========================
|
| 727 |
+
# PREDICTION-ONLY (no actual UCS)
|
| 728 |
# =========================
|
| 729 |
+
if st.session_state.app_step == "predict":
|
| 730 |
+
st.sidebar.header("Prediction (No Actual UCS)")
|
| 731 |
pred_file = st.sidebar.file_uploader("Upload Prediction Excel", type=["xlsx","xls"], key="pred_upload")
|
| 732 |
if pred_file is not None:
|
| 733 |
_book_tmp = read_book_bytes(pred_file.getvalue())
|
|
|
|
| 735 |
first_df = next(iter(_book_tmp.values()))
|
| 736 |
st.sidebar.caption(f"**Data loaded:** {pred_file.name} • {first_df.shape[0]} rows × {first_df.shape[1]} cols")
|
| 737 |
|
| 738 |
+
preview_pred_btn = st.sidebar.button("Preview data", use_container_width=True, disabled=(pred_file is None))
|
| 739 |
+
if preview_pred_btn and pred_file is not None:
|
| 740 |
_book = read_book_bytes(pred_file.getvalue())
|
| 741 |
+
preview_modal_val(_book, FEATURES)
|
| 742 |
|
| 743 |
+
do_pred = st.sidebar.button("Predict", type="primary", use_container_width=True)
|
| 744 |
+
if st.sidebar.button("⬅ Back to Case Building", use_container_width=True):
|
| 745 |
+
st.session_state.app_step = "dev"; st.rerun()
|
| 746 |
|
| 747 |
st.subheader("Prediction")
|
| 748 |
+
st.write("Upload a dataset with the feature columns (no **UCS**). You’ll get predicted UCS, a UCS track, and a compact summary.")
|
| 749 |
|
| 750 |
+
if do_pred and pred_file is not None:
|
| 751 |
with st.status("Predicting…", expanded=False) as status:
|
| 752 |
pbook = read_book_bytes(pred_file.getvalue())
|
| 753 |
+
if not pbook: status.update(label="Could not read the Excel.", state="error"); st.stop()
|
| 754 |
status.update(label="Workbook read ✓")
|
| 755 |
+
pname = list(pbook.keys())[0]
|
| 756 |
dfp = pbook[pname].copy()
|
| 757 |
+
if not ensure_cols(dfp, FEATURES): status.update(label="Missing required columns.", state="error"); st.stop()
|
|
|
|
| 758 |
status.update(label="Columns validated ✓")
|
| 759 |
dfp["UCS_Pred"] = model.predict(dfp[FEATURES])
|
| 760 |
st.session_state.results["PredictOnly"] = dfp
|
| 761 |
|
| 762 |
+
ranges = st.session_state.train_ranges; oor_pct = 0.0
|
|
|
|
| 763 |
if ranges:
|
| 764 |
+
any_viol = pd.DataFrame({f: (dfp[f] < ranges[f][0]) | (dfp[f] > ranges[f][1]) for f in FEATURES}).any(axis=1)
|
|
|
|
| 765 |
oor_pct = float(any_viol.mean()*100.0)
|
| 766 |
|
| 767 |
+
st.session_state.results["summary_predonly"] = {
|
| 768 |
"n_points": len(dfp),
|
| 769 |
"pred_min": float(dfp["UCS_Pred"].min()),
|
| 770 |
"pred_max": float(dfp["UCS_Pred"].max()),
|
| 771 |
"pred_mean": float(dfp["UCS_Pred"].mean()),
|
| 772 |
"pred_std": float(dfp["UCS_Pred"].std(ddof=0)),
|
| 773 |
+
"oor_pct": oor_pct,
|
| 774 |
}
|
| 775 |
status.update(label="Predictions ready ✓", state="complete")
|
| 776 |
|
| 777 |
if "PredictOnly" in st.session_state.results:
|
| 778 |
dfp = st.session_state.results["PredictOnly"]
|
| 779 |
+
sv = st.session_state.results["summary_predonly"]
|
| 780 |
+
|
| 781 |
+
# Left summary table (in place of cross-plot), right UCS track
|
| 782 |
+
left_sp, col_summary, sp_mid, col_track, sp_right = st.columns([0.18, 0.42, 0.05, 0.35, 0.2])
|
| 783 |
+
|
| 784 |
+
with col_summary:
|
| 785 |
+
tbl = pd.DataFrame({
|
| 786 |
+
"Metric": ["# points","Pred min","Pred max","Pred mean","Pred std","OOR %"],
|
| 787 |
+
"Value": [sv["n_points"], sv["pred_min"], sv["pred_max"], sv["pred_mean"], sv["pred_std"], f'{sv["oor_pct"]:.1f}%']
|
| 788 |
+
})
|
| 789 |
+
st.success("Predictions ready ✓")
|
| 790 |
+
st.dataframe(tbl, use_container_width=True, hide_index=True)
|
| 791 |
+
|
| 792 |
+
st.caption("**★ OOR** = percentage of rows whose input features fall outside the training min–max range.")
|
| 793 |
+
|
| 794 |
+
with col_track:
|
| 795 |
+
st.plotly_chart(
|
| 796 |
+
depth_or_index_track_interactive(dfp, include_actual=False),
|
| 797 |
+
use_container_width=False,
|
| 798 |
+
config={"displayModeBar": False, "scrollZoom": True}
|
| 799 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 800 |
|
| 801 |
st.markdown("---")
|
|
|
|
| 802 |
try:
|
| 803 |
buf = io.BytesIO()
|
| 804 |
with pd.ExcelWriter(buf, engine="openpyxl") as xw:
|
| 805 |
+
dfp.to_excel(xw, sheet_name="Predictions", index=False)
|
| 806 |
+
pd.DataFrame([sv]).to_excel(xw, sheet_name="Summary", index=False)
|
| 807 |
st.download_button(
|
| 808 |
"Export Prediction Results to Excel",
|
| 809 |
data=buf.getvalue(),
|
|
|
|
| 813 |
except Exception as e:
|
| 814 |
st.warning(str(e))
|
| 815 |
|
|
|
|
| 816 |
# =========================
|
| 817 |
# Footer
|
| 818 |
# =========================
|