UCS2014 commited on
Commit
fe099d0
·
verified ·
1 Parent(s): 00711eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -120
app.py CHANGED
@@ -1,17 +1,16 @@
1
- # app.py — ST_Min_Horizontal_Stress (σhmin)
2
- # Self-contained Streamlit app that TRAINS a fixed, optimized ML pipeline in-app.
3
- # No external model files, no model-source UI. Upload Excel and go.
4
 
5
- import io, json, os, base64, math
6
  from pathlib import Path
7
  from datetime import datetime
8
 
9
  import streamlit as st
10
  import pandas as pd
11
  import numpy as np
12
- import joblib # only used to cache pipeline inside session
13
 
14
- # Matplotlib for static previews & cross-plot
15
  import matplotlib
16
  matplotlib.use("Agg")
17
  import matplotlib.pyplot as plt
@@ -26,18 +25,28 @@ from sklearn.impute import SimpleImputer
26
  # =========================
27
  # App constants / defaults
28
  # =========================
29
- APP_NAME = "ST_GeoMech_Shmin"
30
- TAGLINE = "Real-Time Minimum Horizontal Stress Prediction"
31
-
32
- # -------- Canonical names (match your files) --------
33
- FEATURES = ["Q (gpm)", "SPP (psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
34
- TARGET = "MINStress_Actual"
35
- PRED_COL = "MINStress_Pred"
36
- ACTUAL_COL = TARGET
37
- TRANSFORM = "none" # "none" | "log10" | "ln"
38
- UNITS = "Psi"
39
-
40
- # ---- Fixed ("best") model params baked into the code ----
 
 
 
 
 
 
 
 
 
 
41
  BEST_PARAMS = dict(
42
  n_estimators=400,
43
  max_depth=None,
@@ -56,8 +65,6 @@ TRACK_H, TRACK_W = 1000, 500
56
  FONT_SZ = 13
57
  BOLD_FONT = "Arial Black, Arial, sans-serif"
58
 
59
- STRICT_VERSION_CHECK = False # we now train in this env, so no version pin warnings
60
-
61
  # =========================
62
  # Page / CSS
63
  # =========================
@@ -87,13 +94,14 @@ TABLE_CENTER_CSS = [
87
  ]
88
 
89
  # =========================
90
- # Password gate (optional)
91
  # =========================
92
  def inline_logo(path="logo.png") -> str:
93
  try:
94
  p = Path(path)
95
  if not p.exists(): return ""
96
- return f"data:image/png;base64,{base64.b64encode(p.read_bytes()).decode('ascii')}"
 
97
  except Exception:
98
  return ""
99
 
@@ -102,13 +110,10 @@ def add_password_gate() -> None:
102
  required = st.secrets.get("APP_PASSWORD", "")
103
  except Exception:
104
  required = os.environ.get("APP_PASSWORD", "")
105
-
106
  if not required:
107
- return # no password configured
108
-
109
  if st.session_state.get("auth_ok", False):
110
  return
111
-
112
  st.sidebar.markdown(f"""
113
  <div class="centered-container">
114
  <img src="{inline_logo('logo.png')}" class="brand-logo">
@@ -209,6 +214,14 @@ def _make_X(df: pd.DataFrame, features: list[str]) -> pd.DataFrame:
209
  X[c] = pd.to_numeric(X[c], errors="coerce")
210
  return X
211
 
 
 
 
 
 
 
 
 
212
  # =========================
213
  # Export helpers
214
  # =========================
@@ -263,13 +276,13 @@ def build_export_workbook(selected: list[str], ndigits: int = 3, do_autofit: boo
263
  if "Training" in selected and "Train" in res: _add("Training", res["Train"])
264
  if "Training_Metrics" in selected and res.get("m_train"): _add("Training_Metrics", pd.DataFrame([res["m_train"]]))
265
  if "Training_Summary" in selected and "Train" in res:
266
- tr_cols = FEATURES + [c for c in [TARGET, PRED_COL] if c in res["Train"].columns]
267
  _add("Training_Summary", _summary_table(res["Train"], tr_cols))
268
 
269
  if "Testing" in selected and "Test" in res: _add("Testing", res["Test"])
270
  if "Testing_Metrics" in selected and res.get("m_test"): _add("Testing_Metrics", pd.DataFrame([res["m_test"]]))
271
  if "Testing_Summary" in selected and "Test" in res:
272
- te_cols = FEATURES + [c for c in [TARGET, PRED_COL] if c in res["Test"].columns]
273
  _add("Testing_Summary", _summary_table(res["Test"], te_cols))
274
 
275
  if "Validation" in selected and "Validate" in res: _add("Validation", res["Validate"])
@@ -288,7 +301,7 @@ def build_export_workbook(selected: list[str], ndigits: int = 3, do_autofit: boo
288
  info = pd.DataFrame([
289
  {"Key": "AppName", "Value": APP_NAME},
290
  {"Key": "Tagline", "Value": TAGLINE},
291
- {"Key": "Target", "Value": TARGET},
292
  {"Key": "PredColumn", "Value": PRED_COL},
293
  {"Key": "Features", "Value": ", ".join(FEATURES)},
294
  {"Key": "ExportedAt", "Value": datetime.now().strftime("%Y-%m-%d %H:%M:%S")},
@@ -305,7 +318,7 @@ def build_export_workbook(selected: list[str], ndigits: int = 3, do_autofit: boo
305
  df.to_excel(writer, sheet_name=sheet, index=False)
306
  if do_autofit: _excel_autofit(writer, sheet, df)
307
  bio.seek(0)
308
- fname = f"MinStress_Export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"
309
  return bio.getvalue(), fname, order
310
 
311
  def render_export_button(phase_key: str) -> None:
@@ -316,28 +329,27 @@ def render_export_button(phase_key: str) -> None:
316
  options = _available_sections()
317
  selected_sheets = st.multiselect(
318
  "Sheets to include",
319
- options=options,
320
- default=[],
321
  placeholder="Choose option(s)",
322
  help="Pick the sheets you want in the Excel export.",
323
  key=f"sheets_{phase_key}",
324
  )
325
  if not selected_sheets:
326
  st.caption("Select one or more sheets above to enable export.")
327
- st.download_button("⬇️ Export Excel", data=b"", file_name="MinStress_Export.xlsx",
328
  mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
329
  disabled=True, key=f"download_{phase_key}")
330
  return
331
  data, fname, names = build_export_workbook(selected=selected_sheets, ndigits=3, do_autofit=True)
332
  if names: st.caption("Will include: " + ", ".join(names))
333
- st.download_button("⬇️ Export Excel", data=(data or b""), file_name=(fname or "MinStress_Export.xlsx"),
334
  mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
335
  disabled=(data is None), key=f"download_{phase_key}")
336
 
337
  # =========================
338
- # Plots
339
  # =========================
340
- def cross_plot_static(actual, pred):
341
  a = pd.Series(actual, dtype=float)
342
  p = pd.Series(pred, dtype=float)
343
  lo = float(min(a.min(), p.min())); hi = float(max(a.max(), p.max()))
@@ -354,11 +366,11 @@ def cross_plot_static(actual, pred):
354
  ax.set_xticks(ticks); ax.set_yticks(ticks)
355
  ax.set_aspect("equal", adjustable="box")
356
 
357
- fmt = FuncFormatter(lambda x, _: f"{x:.0f}") # no decimals on cross-plot
358
  ax.xaxis.set_major_formatter(fmt); ax.yaxis.set_major_formatter(fmt)
359
 
360
- ax.set_xlabel(f"Actual Min Stress ({UNITS})", fontweight="bold", fontsize=10, color="black")
361
- ax.set_ylabel(f"Predicted Min Stress ({UNITS})", fontweight="bold", fontsize=10, color="black")
362
  ax.tick_params(labelsize=6, colors="black")
363
  ax.grid(True, linestyle=":", alpha=0.3)
364
  for spine in ax.spines.values():
@@ -367,8 +379,8 @@ def cross_plot_static(actual, pred):
367
  fig.subplots_adjust(left=0.16, bottom=0.16, right=0.98, top=0.98)
368
  return fig
369
 
370
- def track_plot(df, include_actual=True):
371
- depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
372
  if depth_col is not None:
373
  y = pd.to_numeric(df[depth_col], errors="coerce"); ylab = depth_col
374
  y_range = [float(np.nanmax(y)), float(np.nanmin(y))] # reversed
@@ -377,9 +389,9 @@ def track_plot(df, include_actual=True):
377
  y_range = [float(y.max()), float(y.min())]
378
 
379
  x_series = pd.Series(df.get(PRED_COL, pd.Series(dtype=float))).astype(float)
380
- act_col = ACTUAL_COL if (ACTUAL_COL and ACTUAL_COL in df.columns) else TARGET
381
- if include_actual and act_col in df.columns:
382
- x_series = pd.concat([x_series, pd.Series(df[act_col]).astype(float)], ignore_index=True)
383
  x_lo, x_hi = float(x_series.min()), float(x_series.max())
384
  x_pad = 0.03 * (x_hi - x_lo if x_hi > x_lo else 1.0)
385
  xmin, xmax = x_lo - x_pad, x_hi + x_pad
@@ -391,14 +403,14 @@ def track_plot(df, include_actual=True):
391
  x=df[PRED_COL], y=y, mode="lines",
392
  line=dict(color=COLORS["pred"], width=1.8),
393
  name=PRED_COL,
394
- hovertemplate=f"{PRED_COL}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"
395
  ))
396
- if include_actual and act_col in df.columns:
397
  fig.add_trace(go.Scatter(
398
- x=df[act_col], y=y, mode="lines",
399
  line=dict(color=COLORS["actual"], width=2.0, dash="dot"),
400
- name=f"{act_col} (actual)",
401
- hovertemplate=f"{act_col}: "+"%{x:.2f}<br>"+ylab+": %{y}<extra></extra>"
402
  ))
403
 
404
  fig.update_layout(
@@ -411,13 +423,11 @@ def track_plot(df, include_actual=True):
411
  legend_title_text=""
412
  )
413
  fig.update_xaxes(
414
- title_text=f"Min Stress ({UNITS})",
415
  title_font=dict(size=20, family=BOLD_FONT, color="#000"),
416
  tickfont=dict(size=15, family=BOLD_FONT, color="#000"),
417
  side="top", range=[xmin, xmax],
418
- ticks="outside",
419
- tickformat=",.0f", # <— no decimals on ticks
420
- tickmode="auto", tick0=tick0,
421
  showline=True, linewidth=1.2, linecolor="#444", mirror=True,
422
  showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
423
  )
@@ -440,7 +450,7 @@ def preview_tracks(df: pd.DataFrame, cols: list[str]):
440
  ax.axis("off")
441
  return fig
442
 
443
- depth_col = next((c for c in df.columns if 'depth' in str(c).lower()), None)
444
  if depth_col is not None:
445
  idx = pd.to_numeric(df[depth_col], errors="coerce")
446
  y_label = depth_col
@@ -475,12 +485,6 @@ def preview_tracks(df: pd.DataFrame, cols: list[str]):
475
  # Fixed training pipeline
476
  # =========================
477
  def build_pipeline() -> Pipeline:
478
- """
479
- Fixed, optimized pipeline:
480
- - Numeric imputation (median)
481
- - RandomForestRegressor with tuned params (BEST_PARAMS)
482
- Trees don't need scaling; robust to feature distributions.
483
- """
484
  model = RandomForestRegressor(**BEST_PARAMS)
485
  pipe = Pipeline(steps=[
486
  ("imputer", SimpleImputer(strategy="median")),
@@ -489,7 +493,7 @@ def build_pipeline() -> Pipeline:
489
  return pipe
490
 
491
  # =========================
492
- # Session state
493
  # =========================
494
  st.session_state.setdefault("app_step", "intro")
495
  st.session_state.setdefault("results", {})
@@ -497,12 +501,11 @@ st.session_state.setdefault("train_ranges", None)
497
  st.session_state.setdefault("dev_file_name","")
498
  st.session_state.setdefault("dev_file_bytes",b"")
499
  st.session_state.setdefault("dev_file_loaded",False)
500
- st.session_state.setdefault("dev_preview",False)
501
- st.session_state.setdefault("fitted_model", None) # cache trained pipeline
502
 
503
- # NEW: persistent top-of-page preview panel state
504
  st.session_state.setdefault("show_preview_panel", False)
505
- st.session_state.setdefault("preview_book", {}) # parsed Excel sheets to preview
506
 
507
  # =========================
508
  # Sidebar branding
@@ -532,12 +535,10 @@ def sticky_header(title, message):
532
  unsafe_allow_html=True
533
  )
534
 
535
- # ---------- Top-of-page Preview Panel ----------
536
  def render_preview_panel():
537
- """If enabled, draws a preview panel at the very top of the page."""
538
  if not st.session_state.get("show_preview_panel"):
539
  return
540
-
541
  st.markdown("## 🔎 Data preview")
542
  book = st.session_state.get("preview_book", {}) or {}
543
  if not book:
@@ -581,7 +582,7 @@ def render_preview_panel():
581
  # =========================
582
  if st.session_state.app_step == "intro":
583
  st.header("Welcome!")
584
- st.markdown(f"This software is developed by *Smart Thinking AI-Solutions Team* to estimate **Minimum Horizontal Stress** ({UNITS}) from drilling/offset data.")
585
  st.subheader("How It Works")
586
  st.markdown(
587
  "1) **Upload your data file** and click **Run Model** to fit the baked-in pipeline. \n"
@@ -607,8 +608,11 @@ if st.session_state.app_step == "dev":
607
  st.session_state.dev_file_bytes = up.getvalue()
608
  st.session_state.dev_file_name = up.name
609
  st.session_state.dev_file_loaded = True
610
- st.session_state.dev_preview = False
611
- st.session_state.fitted_model = None # reset
 
 
 
612
 
613
  if st.session_state.dev_file_loaded:
614
  tmp = read_book_bytes(st.session_state.dev_file_bytes)
@@ -616,12 +620,6 @@ if st.session_state.app_step == "dev":
616
  df0 = next(iter(tmp.values()))
617
  st.sidebar.caption(f"**Data loaded:** {st.session_state.dev_file_name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
618
 
619
- # PREVIEW button -> show preview panel at top
620
- if st.sidebar.button("Preview data", use_container_width=True, disabled=not st.session_state.dev_file_loaded):
621
- st.session_state.preview_book = read_book_bytes(st.session_state.dev_file_bytes) if st.session_state.dev_file_bytes else {}
622
- st.session_state.show_preview_panel = True
623
- st.rerun()
624
-
625
  run = st.sidebar.button("Run Model", type="primary", use_container_width=True)
626
  if st.sidebar.button("Proceed to Validation ▶", use_container_width=True): st.session_state.app_step="validate"; st.rerun()
627
  if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
@@ -633,7 +631,6 @@ if st.session_state.app_step == "dev":
633
  else:
634
  sticky_header("Case Building", "**Upload your data to build a case, then run the model to review performance.**")
635
 
636
- # Render the preview panel at the very top (above results)
637
  render_preview_panel()
638
 
639
  if run and st.session_state.dev_file_bytes:
@@ -647,22 +644,27 @@ if st.session_state.app_step == "dev":
647
  tr0 = book[sh_train].copy()
648
  te0 = book[sh_test].copy()
649
 
650
- # Ensure columns exist
651
- if not (ensure_cols(tr0, FEATURES+[TARGET]) and ensure_cols(te0, FEATURES+[TARGET])):
652
- st.markdown('<div class="st-message-box st-error">Missing required columns.</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
653
  st.stop()
654
 
655
  # Prepare X,y
656
  X_tr = _make_X(tr0, FEATURES)
657
- y_tr = pd.to_numeric(tr0[TARGET], errors="coerce")
658
  X_te = _make_X(te0, FEATURES)
659
- y_te = pd.to_numeric(te0[TARGET], errors="coerce")
660
 
661
  # Drop rows with NA in y
662
- mask_tr = np.isfinite(y_tr)
663
- X_tr, y_tr = X_tr.loc[mask_tr], y_tr.loc[mask_tr]
664
- mask_te = np.isfinite(y_te)
665
- X_te, y_te = X_te.loc[mask_te], y_te.loc[mask_te]
666
 
667
  pipe = build_pipeline()
668
  pipe.fit(X_tr, y_tr)
@@ -673,24 +675,31 @@ if st.session_state.app_step == "dev":
673
  tr[PRED_COL] = _inv_transform(pipe.predict(_make_X(tr0, FEATURES)), TRANSFORM)
674
  te[PRED_COL] = _inv_transform(pipe.predict(_make_X(te0, FEATURES)), TRANSFORM)
675
 
 
676
  st.session_state.results["Train"] = tr
677
  st.session_state.results["Test"] = te
678
  st.session_state.results["m_train"] = {
679
- "R": pearson_r(tr[TARGET], tr[PRED_COL]),
680
- "RMSE": rmse(tr[TARGET], tr[PRED_COL]),
681
- "MAPE%": mape(tr[TARGET], tr[PRED_COL]),
682
  }
683
  st.session_state.results["m_test"] = {
684
- "R": pearson_r(te[TARGET], te[PRED_COL]),
685
- "RMSE": rmse(te[TARGET], te[PRED_COL]),
686
- "MAPE%": mape(te[TARGET], te[PRED_COL]),
687
  }
688
 
 
 
 
 
 
689
  tr_min = tr[FEATURES].min().to_dict(); tr_max = tr[FEATURES].max().to_dict()
690
  st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
 
691
  st.markdown('<div class="st-message-box st-success">Case has been built and results are displayed below.</div>', unsafe_allow_html=True)
692
 
693
- def _dev_block(df, m):
694
  c1,c2,c3 = st.columns(3)
695
  c1.metric("R", f"{m['R']:.3f}")
696
  c2.metric("RMSE", f"{m['RMSE']:.2f}")
@@ -704,17 +713,17 @@ if st.session_state.app_step == "dev":
704
  """, unsafe_allow_html=True)
705
  col_track, col_cross = st.columns([2, 3], gap="large")
706
  with col_track:
707
- st.plotly_chart(track_plot(df, include_actual=True),
708
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
709
  with col_cross:
710
- st.pyplot(cross_plot_static(df[TARGET], df[PRED_COL]), use_container_width=False)
711
 
712
  if "Train" in st.session_state.results or "Test" in st.session_state.results:
713
  tab1, tab2 = st.tabs(["Training", "Testing"])
714
- if "Train" in st.session_state.results:
715
- with tab1: _dev_block(st.session_state.results["Train"], st.session_state.results["m_train"])
716
  if "Test" in st.session_state.results:
717
- with tab2: _dev_block(st.session_state.results["Test"], st.session_state.results["m_test"])
718
  render_export_button(phase_key="dev")
719
 
720
  # =========================
@@ -729,7 +738,7 @@ if st.session_state.app_step == "validate":
729
  df0 = next(iter(book.values()))
730
  st.sidebar.caption(f"**Data loaded:** {up.name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
731
 
732
- # PREVIEW button -> show preview panel at top
733
  if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
734
  st.session_state.preview_book = read_book_bytes(up.getvalue()) if up is not None else {}
735
  st.session_state.show_preview_panel = True
@@ -739,26 +748,30 @@ if st.session_state.app_step == "validate":
739
  if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
740
  if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
741
 
742
- sticky_header("Validate the Model", "Upload a dataset with the same **features** and **MINStress_Actual** to evaluate performance.")
743
- render_preview_panel() # top-of-page preview
744
 
745
  if go_btn and up is not None:
746
  if st.session_state.fitted_model is None:
747
- st.error("Please train the model first in Case Building.")
748
- st.stop()
749
 
750
  book = read_book_bytes(up.getvalue())
751
  names = list(book.keys())
752
  name = next((s for s in names if s.lower() in ("validation","validate","validation2","val","val2")), names[0])
753
  df0 = book[name].copy()
754
 
755
- if not ensure_cols(df0, FEATURES+[TARGET]):
756
- st.markdown('<div class="st-message-box st-error">Missing required columns.</div>', unsafe_allow_html=True); st.stop()
 
 
 
 
757
 
758
  df = df0.copy()
759
  df[PRED_COL] = _inv_transform(st.session_state.fitted_model.predict(_make_X(df0, FEATURES)), TRANSFORM)
760
  st.session_state.results["Validate"] = df
761
 
 
762
  ranges = st.session_state.train_ranges; oor_pct = 0.0; tbl=None
763
  if ranges:
764
  any_viol = pd.DataFrame({f:(df[f] < ranges[f][0]) | (df[f] > ranges[f][1]) for f in FEATURES}).any(axis=1)
@@ -772,15 +785,16 @@ if st.session_state.app_step == "validate":
772
  )
773
 
774
  st.session_state.results["m_val"] = {
775
- "R": pearson_r(df[TARGET], df[PRED_COL]),
776
- "RMSE": rmse(df[TARGET], df[PRED_COL]),
777
- "MAPE%": mape(df[TARGET], df[PRED_COL]),
778
  }
779
  st.session_state.results["sv_val"] = {"n":len(df), "pred_min":float(df[PRED_COL].min()), "pred_max":float(df[PRED_COL].max()), "oor":oor_pct}
780
  st.session_state.results["oor_tbl"] = tbl
 
781
 
782
  if "Validate" in st.session_state.results:
783
- m = st.session_state.results["m_val"]
784
  c1,c2,c3 = st.columns(3)
785
  c1.metric("R", f"{m['R']:.3f}"); c2.metric("RMSE", f"{m['RMSE']:.2f}"); c3.metric("MAPE%", f"{m['MAPE%']:.2f}")
786
  st.markdown("""
@@ -793,11 +807,12 @@ if st.session_state.app_step == "validate":
793
 
794
  col_track, col_cross = st.columns([2, 3], gap="large")
795
  with col_track:
796
- st.plotly_chart(track_plot(st.session_state.results["Validate"], include_actual=True),
797
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
798
  with col_cross:
799
- st.pyplot(cross_plot_static(st.session_state.results["Validate"][TARGET],
800
- st.session_state.results["Validate"][PRED_COL]),
 
801
  use_container_width=False)
802
 
803
  render_export_button(phase_key="validate")
@@ -820,7 +835,7 @@ if st.session_state.app_step == "predict":
820
  df0 = next(iter(book.values()))
821
  st.sidebar.caption(f"**Data loaded:** {up.name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
822
 
823
- # PREVIEW button -> show preview panel at top
824
  if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
825
  st.session_state.preview_book = read_book_bytes(up.getvalue()) if up is not None else {}
826
  st.session_state.show_preview_panel = True
@@ -830,17 +845,17 @@ if st.session_state.app_step == "predict":
830
  if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
831
 
832
  sticky_header("Prediction", "Upload a dataset with the 5 feature columns (no actual column).")
833
- render_preview_panel() # top-of-page preview
834
 
835
  if go_btn and up is not None:
836
  if st.session_state.fitted_model is None:
837
- st.error("Please train the model first in Case Building.")
838
- st.stop()
839
 
840
  book = read_book_bytes(up.getvalue()); name = list(book.keys())[0]
841
  df0 = book[name].copy()
842
  if not ensure_cols(df0, FEATURES):
843
  st.markdown('<div class="st-message-box st-error">Missing required columns.</div>', unsafe_allow_html=True); st.stop()
 
844
  df = df0.copy()
845
  df[PRED_COL] = _inv_transform(st.session_state.fitted_model.predict(_make_X(df0, FEATURES)), TRANSFORM)
846
  st.session_state.results["PredictOnly"] = df
@@ -871,7 +886,7 @@ if st.session_state.app_step == "predict":
871
  df_centered_rounded(table, hide_index=True)
872
  st.caption("**★ OOR** = % of rows with input features outside the training min–max range.")
873
  with col_right:
874
- st.plotly_chart(track_plot(df, include_actual=False),
875
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
876
  render_export_button(phase_key="predict")
877
 
@@ -882,7 +897,6 @@ st.markdown("""
882
  <br><br><br>
883
  <hr>
884
  <div style='text-align:center;color:#6b7280;font-size:1.0em;'>
885
- © 2025 Smart Thinking AI-Solutions Team. All rights reserved.<br>
886
- Website: <a href="https://smartthinking.com.sa" target="_blank" rel="noopener noreferrer">smartthinking.com.sa</a>
887
  </div>
888
  """, unsafe_allow_html=True)
 
1
+ # app_FP.py — ST_GeoMech_FP (Fracture Pressure)
2
+ # Mirrors the SHmin app's specs & workflow (password gate, top preview panel, UI/metrics/exports).
3
+ # Self-contained: trains a fixed, optimized RF pipeline in-app. No external model files.
4
 
5
+ import io, os, base64, math
6
  from pathlib import Path
7
  from datetime import datetime
8
 
9
  import streamlit as st
10
  import pandas as pd
11
  import numpy as np
 
12
 
13
+ # Matplotlib (static previews & cross-plot)
14
  import matplotlib
15
  matplotlib.use("Agg")
16
  import matplotlib.pyplot as plt
 
25
  # =========================
26
  # App constants / defaults
27
  # =========================
28
+ APP_NAME = "ST_GeoMech_FP"
29
+ TAGLINE = "Real-Time Fracture Pressure Prediction"
30
+
31
+ # Canonical feature names (match SHmin app)
32
+ FEATURES = ["Q (gpm)", "SPP (psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
33
+
34
+ # Canonical prediction/target labels (we'll auto-resolve target via aliases)
35
+ TARGET_CANON = "FracPress_Actual"
36
+ PRED_COL = "FracPress_Pred"
37
+ UNITS = "Psi"
38
+
39
+ # Target aliases (we'll accept any of these if present in sheets)
40
+ TARGET_ALIASES = [
41
+ "FracPress_Actual", "FracturePressure_Actual", "Fracture Pressure (psi)",
42
+ "Frac Pressure (psi)", "FracPressure", "Frac_Pressure", "FracturePressure",
43
+ "FP_Actual", "FP (psi)"
44
+ ]
45
+
46
+ # Model transform (kept for parity with SHmin; unused for RF by default)
47
+ TRANSFORM = "none" # "none" | "log10" | "ln"
48
+
49
+ # Fixed "best" RF params (robust & fast; you can tweak if you have tuned FP params)
50
  BEST_PARAMS = dict(
51
  n_estimators=400,
52
  max_depth=None,
 
65
  FONT_SZ = 13
66
  BOLD_FONT = "Arial Black, Arial, sans-serif"
67
 
 
 
68
  # =========================
69
  # Page / CSS
70
  # =========================
 
94
  ]
95
 
96
  # =========================
97
+ # Password gate (same as SHmin)
98
  # =========================
99
  def inline_logo(path="logo.png") -> str:
100
  try:
101
  p = Path(path)
102
  if not p.exists(): return ""
103
+ import base64 as _b64
104
+ return f"data:image/png;base64,{_b64.b64encode(p.read_bytes()).decode('ascii')}"
105
  except Exception:
106
  return ""
107
 
 
110
  required = st.secrets.get("APP_PASSWORD", "")
111
  except Exception:
112
  required = os.environ.get("APP_PASSWORD", "")
 
113
  if not required:
114
+ return
 
115
  if st.session_state.get("auth_ok", False):
116
  return
 
117
  st.sidebar.markdown(f"""
118
  <div class="centered-container">
119
  <img src="{inline_logo('logo.png')}" class="brand-logo">
 
214
  X[c] = pd.to_numeric(X[c], errors="coerce")
215
  return X
216
 
217
+ # ---------- Target resolver ----------
218
+ def _resolve_target_col(df: pd.DataFrame) -> str | None:
219
+ cols_lower = {c.lower(): c for c in df.columns}
220
+ for cand in TARGET_ALIASES:
221
+ if cand.lower() in cols_lower:
222
+ return cols_lower[cand.lower()]
223
+ return None
224
+
225
  # =========================
226
  # Export helpers
227
  # =========================
 
276
  if "Training" in selected and "Train" in res: _add("Training", res["Train"])
277
  if "Training_Metrics" in selected and res.get("m_train"): _add("Training_Metrics", pd.DataFrame([res["m_train"]]))
278
  if "Training_Summary" in selected and "Train" in res:
279
+ tr_cols = FEATURES + [c for c in [PRED_COL, st.session_state.get("tcol_train", TARGET_CANON)] if c in res["Train"].columns]
280
  _add("Training_Summary", _summary_table(res["Train"], tr_cols))
281
 
282
  if "Testing" in selected and "Test" in res: _add("Testing", res["Test"])
283
  if "Testing_Metrics" in selected and res.get("m_test"): _add("Testing_Metrics", pd.DataFrame([res["m_test"]]))
284
  if "Testing_Summary" in selected and "Test" in res:
285
+ te_cols = FEATURES + [c for c in [PRED_COL, st.session_state.get("tcol_test", TARGET_CANON)] if c in res["Test"].columns]
286
  _add("Testing_Summary", _summary_table(res["Test"], te_cols))
287
 
288
  if "Validation" in selected and "Validate" in res: _add("Validation", res["Validate"])
 
301
  info = pd.DataFrame([
302
  {"Key": "AppName", "Value": APP_NAME},
303
  {"Key": "Tagline", "Value": TAGLINE},
304
+ {"Key": "Target", "Value": st.session_state.get("tcol_train", TARGET_CANON)},
305
  {"Key": "PredColumn", "Value": PRED_COL},
306
  {"Key": "Features", "Value": ", ".join(FEATURES)},
307
  {"Key": "ExportedAt", "Value": datetime.now().strftime("%Y-%m-%d %H:%M:%S")},
 
318
  df.to_excel(writer, sheet_name=sheet, index=False)
319
  if do_autofit: _excel_autofit(writer, sheet, df)
320
  bio.seek(0)
321
+ fname = f"FracPressure_Export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"
322
  return bio.getvalue(), fname, order
323
 
324
  def render_export_button(phase_key: str) -> None:
 
329
  options = _available_sections()
330
  selected_sheets = st.multiselect(
331
  "Sheets to include",
332
+ options=options, default=[],
 
333
  placeholder="Choose option(s)",
334
  help="Pick the sheets you want in the Excel export.",
335
  key=f"sheets_{phase_key}",
336
  )
337
  if not selected_sheets:
338
  st.caption("Select one or more sheets above to enable export.")
339
+ st.download_button("⬇️ Export Excel", data=b"", file_name="FracPressure_Export.xlsx",
340
  mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
341
  disabled=True, key=f"download_{phase_key}")
342
  return
343
  data, fname, names = build_export_workbook(selected=selected_sheets, ndigits=3, do_autofit=True)
344
  if names: st.caption("Will include: " + ", ".join(names))
345
+ st.download_button("⬇️ Export Excel", data=(data or b""), file_name=(fname or "FracPressure_Export.xlsx"),
346
  mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
347
  disabled=(data is None), key=f"download_{phase_key}")
348
 
349
  # =========================
350
+ # Plots (integer x ticks)
351
  # =========================
352
+ def cross_plot_static(actual, pred, label="Fracture Pressure"):
353
  a = pd.Series(actual, dtype=float)
354
  p = pd.Series(pred, dtype=float)
355
  lo = float(min(a.min(), p.min())); hi = float(max(a.max(), p.max()))
 
366
  ax.set_xticks(ticks); ax.set_yticks(ticks)
367
  ax.set_aspect("equal", adjustable="box")
368
 
369
+ fmt = FuncFormatter(lambda x, _: f"{x:.0f}")
370
  ax.xaxis.set_major_formatter(fmt); ax.yaxis.set_major_formatter(fmt)
371
 
372
+ ax.set_xlabel(f"Actual {label} ({UNITS})", fontweight="bold", fontsize=10, color="black")
373
+ ax.set_ylabel(f"Predicted {label} ({UNITS})", fontweight="bold", fontsize=10, color="black")
374
  ax.tick_params(labelsize=6, colors="black")
375
  ax.grid(True, linestyle=":", alpha=0.3)
376
  for spine in ax.spines.values():
 
379
  fig.subplots_adjust(left=0.16, bottom=0.16, right=0.98, top=0.98)
380
  return fig
381
 
382
+ def track_plot(df: pd.DataFrame, actual_col: str | None, include_actual=True):
383
+ depth_col = next((c for c in df.columns if 'depth' in str(c).lower() or 'tvd' in str(c).lower()), None)
384
  if depth_col is not None:
385
  y = pd.to_numeric(df[depth_col], errors="coerce"); ylab = depth_col
386
  y_range = [float(np.nanmax(y)), float(np.nanmin(y))] # reversed
 
389
  y_range = [float(y.max()), float(y.min())]
390
 
391
  x_series = pd.Series(df.get(PRED_COL, pd.Series(dtype=float))).astype(float)
392
+ if include_actual and actual_col and actual_col in df.columns:
393
+ x_series = pd.concat([x_series, pd.Series(df[actual_col]).astype(float)], ignore_index=True)
394
+
395
  x_lo, x_hi = float(x_series.min()), float(x_series.max())
396
  x_pad = 0.03 * (x_hi - x_lo if x_hi > x_lo else 1.0)
397
  xmin, xmax = x_lo - x_pad, x_hi + x_pad
 
403
  x=df[PRED_COL], y=y, mode="lines",
404
  line=dict(color=COLORS["pred"], width=1.8),
405
  name=PRED_COL,
406
+ hovertemplate=f"{PRED_COL}: "+ "%{x:.0f}<br>" + ylab + ": %{y}<extra></extra>"
407
  ))
408
+ if include_actual and actual_col and actual_col in df.columns:
409
  fig.add_trace(go.Scatter(
410
+ x=df[actual_col], y=y, mode="lines",
411
  line=dict(color=COLORS["actual"], width=2.0, dash="dot"),
412
+ name=f"{actual_col} (actual)",
413
+ hovertemplate=f"{actual_col}: "+ "%{x:.0f}<br>" + ylab + ": %{y}<extra></extra>"
414
  ))
415
 
416
  fig.update_layout(
 
423
  legend_title_text=""
424
  )
425
  fig.update_xaxes(
426
+ title_text=f"Fracture Pressure ({UNITS})",
427
  title_font=dict(size=20, family=BOLD_FONT, color="#000"),
428
  tickfont=dict(size=15, family=BOLD_FONT, color="#000"),
429
  side="top", range=[xmin, xmax],
430
+ ticks="outside", tickformat=",.0f", tickmode="auto", tick0=tick0,
 
 
431
  showline=True, linewidth=1.2, linecolor="#444", mirror=True,
432
  showgrid=True, gridcolor="rgba(0,0,0,0.12)", automargin=True
433
  )
 
450
  ax.axis("off")
451
  return fig
452
 
453
+ depth_col = next((c for c in df.columns if 'depth' in str(c).lower() or 'tvd' in str(c).lower()), None)
454
  if depth_col is not None:
455
  idx = pd.to_numeric(df[depth_col], errors="coerce")
456
  y_label = depth_col
 
485
  # Fixed training pipeline
486
  # =========================
487
  def build_pipeline() -> Pipeline:
 
 
 
 
 
 
488
  model = RandomForestRegressor(**BEST_PARAMS)
489
  pipe = Pipeline(steps=[
490
  ("imputer", SimpleImputer(strategy="median")),
 
493
  return pipe
494
 
495
  # =========================
496
+ # Session state (mirrors SHmin)
497
  # =========================
498
  st.session_state.setdefault("app_step", "intro")
499
  st.session_state.setdefault("results", {})
 
501
  st.session_state.setdefault("dev_file_name","")
502
  st.session_state.setdefault("dev_file_bytes",b"")
503
  st.session_state.setdefault("dev_file_loaded",False)
504
+ st.session_state.setdefault("fitted_model", None)
 
505
 
506
+ # Persistent top-of-page preview panel
507
  st.session_state.setdefault("show_preview_panel", False)
508
+ st.session_state.setdefault("preview_book", {})
509
 
510
  # =========================
511
  # Sidebar branding
 
535
  unsafe_allow_html=True
536
  )
537
 
 
538
  def render_preview_panel():
539
+ """Top-of-page preview panel (same behavior as SHmin)."""
540
  if not st.session_state.get("show_preview_panel"):
541
  return
 
542
  st.markdown("## 🔎 Data preview")
543
  book = st.session_state.get("preview_book", {}) or {}
544
  if not book:
 
582
  # =========================
583
  if st.session_state.app_step == "intro":
584
  st.header("Welcome!")
585
+ st.markdown(f"This software is developed by *Smart Thinking AI-Solutions Team* to estimate **Fracture Pressure** ({UNITS}) from drilling/offset data.")
586
  st.subheader("How It Works")
587
  st.markdown(
588
  "1) **Upload your data file** and click **Run Model** to fit the baked-in pipeline. \n"
 
608
  st.session_state.dev_file_bytes = up.getvalue()
609
  st.session_state.dev_file_name = up.name
610
  st.session_state.dev_file_loaded = True
611
+ st.session_state.fitted_model = None
612
+ # show preview panel
613
+ st.session_state.preview_book = read_book_bytes(st.session_state.dev_file_bytes) if st.session_state.dev_file_bytes else {}
614
+ st.session_state.show_preview_panel = True
615
+ st.rerun()
616
 
617
  if st.session_state.dev_file_loaded:
618
  tmp = read_book_bytes(st.session_state.dev_file_bytes)
 
620
  df0 = next(iter(tmp.values()))
621
  st.sidebar.caption(f"**Data loaded:** {st.session_state.dev_file_name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
622
 
 
 
 
 
 
 
623
  run = st.sidebar.button("Run Model", type="primary", use_container_width=True)
624
  if st.sidebar.button("Proceed to Validation ▶", use_container_width=True): st.session_state.app_step="validate"; st.rerun()
625
  if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
 
631
  else:
632
  sticky_header("Case Building", "**Upload your data to build a case, then run the model to review performance.**")
633
 
 
634
  render_preview_panel()
635
 
636
  if run and st.session_state.dev_file_bytes:
 
644
  tr0 = book[sh_train].copy()
645
  te0 = book[sh_test].copy()
646
 
647
+ # Resolve target name per-sheet
648
+ tcol_tr = _resolve_target_col(tr0)
649
+ tcol_te = _resolve_target_col(te0)
650
+ if tcol_tr is None or tcol_te is None:
651
+ st.error(f"Missing target column. Expected one of: {TARGET_ALIASES}")
652
+ st.stop()
653
+
654
+ # Ensure features exist
655
+ if not (ensure_cols(tr0, FEATURES) and ensure_cols(te0, FEATURES)):
656
+ st.markdown('<div class="st-message-box st-error">Missing required feature columns.</div>', unsafe_allow_html=True)
657
  st.stop()
658
 
659
  # Prepare X,y
660
  X_tr = _make_X(tr0, FEATURES)
661
+ y_tr = pd.to_numeric(tr0[tcol_tr], errors="coerce")
662
  X_te = _make_X(te0, FEATURES)
663
+ y_te = pd.to_numeric(te0[tcol_te], errors="coerce")
664
 
665
  # Drop rows with NA in y
666
+ mask_tr = np.isfinite(y_tr); X_tr, y_tr = X_tr.loc[mask_tr], y_tr.loc[mask_tr]
667
+ mask_te = np.isfinite(y_te); X_te, y_te = X_te.loc[mask_te], y_te.loc[mask_te]
 
 
668
 
669
  pipe = build_pipeline()
670
  pipe.fit(X_tr, y_tr)
 
675
  tr[PRED_COL] = _inv_transform(pipe.predict(_make_X(tr0, FEATURES)), TRANSFORM)
676
  te[PRED_COL] = _inv_transform(pipe.predict(_make_X(te0, FEATURES)), TRANSFORM)
677
 
678
+ # Save results & metrics
679
  st.session_state.results["Train"] = tr
680
  st.session_state.results["Test"] = te
681
  st.session_state.results["m_train"] = {
682
+ "R": pearson_r(tr[tcol_tr], tr[PRED_COL]),
683
+ "RMSE": rmse(tr[tcol_tr], tr[PRED_COL]),
684
+ "MAPE%": mape(tr[tcol_tr], tr[PRED_COL]),
685
  }
686
  st.session_state.results["m_test"] = {
687
+ "R": pearson_r(te[tcol_te], te[PRED_COL]),
688
+ "RMSE": rmse(te[tcol_te], te[PRED_COL]),
689
+ "MAPE%": mape(te[tcol_te], te[PRED_COL]),
690
  }
691
 
692
+ # Persist which target names we used (for export/plots)
693
+ st.session_state["tcol_train"] = tcol_tr
694
+ st.session_state["tcol_test"] = tcol_te
695
+
696
+ # Training min–max ranges
697
  tr_min = tr[FEATURES].min().to_dict(); tr_max = tr[FEATURES].max().to_dict()
698
  st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
699
+
700
  st.markdown('<div class="st-message-box st-success">Case has been built and results are displayed below.</div>', unsafe_allow_html=True)
701
 
702
+ def _dev_block(df: pd.DataFrame, actual_col: str, m: dict):
703
  c1,c2,c3 = st.columns(3)
704
  c1.metric("R", f"{m['R']:.3f}")
705
  c2.metric("RMSE", f"{m['RMSE']:.2f}")
 
713
  """, unsafe_allow_html=True)
714
  col_track, col_cross = st.columns([2, 3], gap="large")
715
  with col_track:
716
+ st.plotly_chart(track_plot(df, actual_col, include_actual=True),
717
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
718
  with col_cross:
719
+ st.pyplot(cross_plot_static(df[actual_col], df[PRED_COL], label="Fracture Pressure"), use_container_width=False)
720
 
721
  if "Train" in st.session_state.results or "Test" in st.session_state.results:
722
  tab1, tab2 = st.tabs(["Training", "Testing"])
723
+ if "Train" in st.session_state.results:
724
+ with tab1: _dev_block(st.session_state.results["Train"], st.session_state.get("tcol_train", TARGET_CANON), st.session_state.results["m_train"])
725
  if "Test" in st.session_state.results:
726
+ with tab2: _dev_block(st.session_state.results["Test"], st.session_state.get("tcol_test", TARGET_CANON), st.session_state.results["m_test"])
727
  render_export_button(phase_key="dev")
728
 
729
  # =========================
 
738
  df0 = next(iter(book.values()))
739
  st.sidebar.caption(f"**Data loaded:** {up.name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
740
 
741
+ # preview panel on top
742
  if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
743
  st.session_state.preview_book = read_book_bytes(up.getvalue()) if up is not None else {}
744
  st.session_state.show_preview_panel = True
 
748
  if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
749
  if st.sidebar.button("Proceed to Prediction ▶", use_container_width=True): st.session_state.app_step="predict"; st.rerun()
750
 
751
+ sticky_header("Validate the Model", "Upload a dataset with the same **features** and an **actual fracture pressure** column.")
752
+ render_preview_panel()
753
 
754
  if go_btn and up is not None:
755
  if st.session_state.fitted_model is None:
756
+ st.error("Please train the model first in Case Building."); st.stop()
 
757
 
758
  book = read_book_bytes(up.getvalue())
759
  names = list(book.keys())
760
  name = next((s for s in names if s.lower() in ("validation","validate","validation2","val","val2")), names[0])
761
  df0 = book[name].copy()
762
 
763
+ tcol = _resolve_target_col(df0)
764
+ if tcol is None:
765
+ st.error(f"Missing target column. Expected one of: {TARGET_ALIASES}")
766
+ st.stop()
767
+ if not ensure_cols(df0, FEATURES):
768
+ st.markdown('<div class="st-message-box st-error">Missing required feature columns.</div>', unsafe_allow_html=True); st.stop()
769
 
770
  df = df0.copy()
771
  df[PRED_COL] = _inv_transform(st.session_state.fitted_model.predict(_make_X(df0, FEATURES)), TRANSFORM)
772
  st.session_state.results["Validate"] = df
773
 
774
+ # Range checks
775
  ranges = st.session_state.train_ranges; oor_pct = 0.0; tbl=None
776
  if ranges:
777
  any_viol = pd.DataFrame({f:(df[f] < ranges[f][0]) | (df[f] > ranges[f][1]) for f in FEATURES}).any(axis=1)
 
785
  )
786
 
787
  st.session_state.results["m_val"] = {
788
+ "R": pearson_r(df[tcol], df[PRED_COL]),
789
+ "RMSE": rmse(df[tcol], df[PRED_COL]),
790
+ "MAPE%": mape(df[tcol], df[PRED_COL]),
791
  }
792
  st.session_state.results["sv_val"] = {"n":len(df), "pred_min":float(df[PRED_COL].min()), "pred_max":float(df[PRED_COL].max()), "oor":oor_pct}
793
  st.session_state.results["oor_tbl"] = tbl
794
+ st.session_state["tcol_val"] = tcol
795
 
796
  if "Validate" in st.session_state.results:
797
+ m = st.session_state.results["m_val"]; tcol = st.session_state.get("tcol_val", TARGET_CANON)
798
  c1,c2,c3 = st.columns(3)
799
  c1.metric("R", f"{m['R']:.3f}"); c2.metric("RMSE", f"{m['RMSE']:.2f}"); c3.metric("MAPE%", f"{m['MAPE%']:.2f}")
800
  st.markdown("""
 
807
 
808
  col_track, col_cross = st.columns([2, 3], gap="large")
809
  with col_track:
810
+ st.plotly_chart(track_plot(st.session_state.results["Validate"], tcol, include_actual=True),
811
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
812
  with col_cross:
813
+ st.pyplot(cross_plot_static(st.session_state.results["Validate"][tcol],
814
+ st.session_state.results["Validate"][PRED_COL],
815
+ label="Fracture Pressure"),
816
  use_container_width=False)
817
 
818
  render_export_button(phase_key="validate")
 
835
  df0 = next(iter(book.values()))
836
  st.sidebar.caption(f"**Data loaded:** {up.name} • {df0.shape[0]} rows × {df0.shape[1]} cols")
837
 
838
+ # preview panel on top
839
  if st.sidebar.button("Preview data", use_container_width=True, disabled=(up is None)):
840
  st.session_state.preview_book = read_book_bytes(up.getvalue()) if up is not None else {}
841
  st.session_state.show_preview_panel = True
 
845
  if st.sidebar.button("⬅ Back to Case Building", use_container_width=True): st.session_state.app_step="dev"; st.rerun()
846
 
847
  sticky_header("Prediction", "Upload a dataset with the 5 feature columns (no actual column).")
848
+ render_preview_panel()
849
 
850
  if go_btn and up is not None:
851
  if st.session_state.fitted_model is None:
852
+ st.error("Please train the model first in Case Building."); st.stop()
 
853
 
854
  book = read_book_bytes(up.getvalue()); name = list(book.keys())[0]
855
  df0 = book[name].copy()
856
  if not ensure_cols(df0, FEATURES):
857
  st.markdown('<div class="st-message-box st-error">Missing required columns.</div>', unsafe_allow_html=True); st.stop()
858
+
859
  df = df0.copy()
860
  df[PRED_COL] = _inv_transform(st.session_state.fitted_model.predict(_make_X(df0, FEATURES)), TRANSFORM)
861
  st.session_state.results["PredictOnly"] = df
 
886
  df_centered_rounded(table, hide_index=True)
887
  st.caption("**★ OOR** = % of rows with input features outside the training min–max range.")
888
  with col_right:
889
+ st.plotly_chart(track_plot(df, actual_col=None, include_actual=False),
890
  use_container_width=False, config={"displayModeBar": False, "scrollZoom": True})
891
  render_export_button(phase_key="predict")
892
 
 
897
  <br><br><br>
898
  <hr>
899
  <div style='text-align:center;color:#6b7280;font-size:1.0em;'>
900
+ © 2025 Smart Thinking AI-Solutions Team. All rights reserved.
 
901
  </div>
902
  """, unsafe_allow_html=True)