UCS2014 commited on
Commit
b6d6cd6
·
verified ·
1 Parent(s): 031665d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +423 -0
app.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import io, json, os, base64
3
+ from pathlib import Path
4
+ import streamlit as st
5
+ import pandas as pd
6
+ import numpy as np
7
+ import joblib
8
+ import matplotlib.pyplot as plt
9
+ from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
10
+
11
+ # =========================
12
+ # Defaults (overridden by models/meta.json or model.feature_names_in_)
13
+ # =========================
14
+ FEATURES = ["Q, gpm", "SPP(psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
15
+ TARGET = "UCS"
16
+ MODELS_DIR = Path("models")
17
+ DEFAULT_MODEL = MODELS_DIR / "ucs_rf.joblib"
18
+ MODEL_FALLBACKS = [MODELS_DIR / "model.joblib", MODELS_DIR / "model.pkl"]
19
+
20
+ # =========================
21
+ # Page / Theme
22
+ # =========================
23
+ st.set_page_config(page_title="ST_GeoMech_UCS", page_icon="logo.png", layout="wide")
24
+ st.markdown("<style>header, footer{visibility:hidden !important;}</style>", unsafe_allow_html=True)
25
+ st.markdown(
26
+ """
27
+ <style>
28
+ .stApp { background: #FFFFFF; }
29
+ section[data-testid="stSidebar"] { background: #F6F9FC; }
30
+ .stButton>button{ background:#007bff; color:#fff; font-weight:bold; border-radius:8px; border:none; padding:10px 24px; }
31
+ .stButton>button:hover{ background:#0056b3; }
32
+ /* Hero header */
33
+ .st-hero { display:flex; align-items:center; gap:14px; padding: 6px 0 0 0; }
34
+ .st-hero .brand { width:70px; height:70px; object-fit:contain; }
35
+ .st-hero h1 { margin:0; line-height:1.05; }
36
+ .st-hero .tagline { margin:2px 0 0 2px; color:#6b7280; font-size:1.05rem; font-style:italic; }
37
+ /* Ensure hero is tight to the top */
38
+ [data-testid="stBlock"]{ margin-top:0 !important; }
39
+ </style>
40
+ """,
41
+ unsafe_allow_html=True
42
+ )
43
+
44
+ # =========================
45
+ # Small helpers
46
+ # =========================
47
+ def _get_model_url():
48
+ # Avoid Streamlit secrets error when secrets.toml is absent
49
+ try:
50
+ return (st.secrets.get("MODEL_URL", "") or os.environ.get("MODEL_URL", "") or "").strip()
51
+ except Exception:
52
+ return (os.environ.get("MODEL_URL", "") or "").strip()
53
+
54
+ def rmse(y_true, y_pred):
55
+ return float(np.sqrt(mean_squared_error(y_true, y_pred)))
56
+
57
+ def ensure_cols(df, cols):
58
+ miss = [c for c in cols if c not in df.columns]
59
+ if miss:
60
+ st.error(f"Missing columns: {miss}\nFound: {list(df.columns)}")
61
+ return False
62
+ return True
63
+
64
+ @st.cache_resource(show_spinner=False)
65
+ def load_model(model_path: str):
66
+ return joblib.load(model_path)
67
+
68
+ @st.cache_data(show_spinner=False)
69
+ def parse_excel(data_bytes: bytes):
70
+ bio = io.BytesIO(data_bytes)
71
+ xl = pd.ExcelFile(bio)
72
+ return {sh: xl.parse(sh) for sh in xl.sheet_names}
73
+
74
+ def read_book(upload):
75
+ if upload is None: return {}
76
+ try: return parse_excel(upload.getvalue())
77
+ except Exception as e:
78
+ st.error(f"Failed to read Excel: {e}"); return {}
79
+
80
+ def find_sheet(book, names):
81
+ low2orig = {k.lower(): k for k in book.keys()}
82
+ for nm in names:
83
+ if nm.lower() in low2orig: return low2orig[nm.lower()]
84
+ return None
85
+
86
+ def cross_plot(actual, pred, title, size=(5.6,5.6)):
87
+ fig, ax = plt.subplots(figsize=size)
88
+ ax.scatter(actual, pred, s=16, alpha=0.7)
89
+ lo = float(np.nanmin([actual.min(), pred.min()]))
90
+ hi = float(np.nanmax([actual.max(), pred.max()]))
91
+ ax.plot([lo,hi], [lo,hi], '--')
92
+ ax.set_xlabel("Actual UCS"); ax.set_ylabel("Predicted UCS"); ax.set_title(title)
93
+ ax.grid(True, ls=":", alpha=0.4)
94
+ return fig
95
+
96
+ def depth_or_index_track(df, title, include_actual=True):
97
+ # If a depth-like column exists, plot UCS vs Depth (depth downward); else index track
98
+ depth_col = None
99
+ for c in df.columns:
100
+ if 'depth' in str(c).lower():
101
+ depth_col = c; break
102
+ fig, ax = plt.subplots(figsize=(5.8, 7.5))
103
+ if depth_col is not None:
104
+ ax.plot(df["UCS_Pred"], df[depth_col], label="UCS_Pred")
105
+ if include_actual and TARGET in df.columns:
106
+ ax.plot(df[TARGET], df[depth_col], alpha=0.7, label="UCS (actual)")
107
+ ax.set_ylabel(depth_col); ax.set_xlabel("UCS")
108
+ ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
109
+ else:
110
+ idx = np.arange(1, len(df) + 1)
111
+ ax.plot(df["UCS_Pred"], idx, label="UCS_Pred")
112
+ if include_actual and TARGET in df.columns:
113
+ ax.plot(df[TARGET], idx, alpha=0.7, label="UCS (actual)")
114
+ ax.set_ylabel("Point Index"); ax.set_xlabel("UCS")
115
+ ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
116
+ ax.grid(True, linestyle=":", alpha=0.4); ax.set_title(title, pad=12); ax.legend()
117
+ return fig
118
+
119
+ def export_workbook(sheets_dict, summary_df=None):
120
+ try: import openpyxl
121
+ except Exception:
122
+ raise RuntimeError("Export requires openpyxl. Please add it to requirements or install it.")
123
+ buf = io.BytesIO()
124
+ with pd.ExcelWriter(buf, engine="openpyxl") as xw:
125
+ for name, frame in sheets_dict.items():
126
+ frame.to_excel(xw, sheet_name=name[:31], index=False)
127
+ if summary_df is not None:
128
+ summary_df.to_excel(xw, sheet_name="Summary", index=False)
129
+ return buf.getvalue()
130
+
131
+ def toast(msg):
132
+ try: st.toast(msg)
133
+ except Exception: st.info(msg)
134
+
135
+ def infer_features_from_model(m):
136
+ try:
137
+ if hasattr(m, "feature_names_in_") and len(getattr(m, "feature_names_in_")):
138
+ return [str(x) for x in m.feature_names_in_]
139
+ except Exception: pass
140
+ try:
141
+ if hasattr(m, "steps") and len(m.steps):
142
+ last = m.steps[-1][1]
143
+ if hasattr(last, "feature_names_in_") and len(last.feature_names_in_):
144
+ return [str(x) for x in last.feature_names_in_]
145
+ except Exception: pass
146
+ return None
147
+
148
+ def inline_logo(path="logo.png") -> str:
149
+ try:
150
+ p = Path(path)
151
+ if not p.exists(): return ""
152
+ return f"data:image/png;base64,{base64.b64encode(p.read_bytes()).decode('ascii')}"
153
+ except Exception:
154
+ return ""
155
+
156
+ # =========================
157
+ # Model availability (download on cloud if needed)
158
+ # =========================
159
+ MODEL_URL = _get_model_url()
160
+
161
+ def ensure_model_present() -> Path:
162
+ # Check local paths first
163
+ for p in [DEFAULT_MODEL, *MODEL_FALLBACKS]:
164
+ if p.exists():
165
+ return p
166
+ # Download if MODEL_URL provided
167
+ if MODEL_URL:
168
+ try:
169
+ import requests
170
+ except Exception:
171
+ st.error("requests is required to download the model. Add 'requests' to requirements.txt.")
172
+ return None
173
+ try:
174
+ DEFAULT_MODEL.parent.mkdir(parents=True, exist_ok=True)
175
+ with requests.get(MODEL_URL, stream=True) as r:
176
+ r.raise_for_status()
177
+ with open(DEFAULT_MODEL, "wb") as f:
178
+ for chunk in r.iter_content(chunk_size=1<<20):
179
+ f.write(chunk)
180
+ return DEFAULT_MODEL
181
+ except Exception as e:
182
+ st.error(f"Failed to download model from MODEL_URL. {e}")
183
+ return None
184
+
185
+ model_path = ensure_model_present()
186
+ if not model_path:
187
+ st.error("Model not found. Upload models/ucs_rf.joblib (or set MODEL_URL in Settings → Variables).")
188
+ st.stop()
189
+
190
+ # Load model
191
+ try:
192
+ model = load_model(str(model_path))
193
+ except Exception as e:
194
+ st.error(f"Failed to load model: {model_path}
195
+ {e}")
196
+ st.stop()
197
+
198
+ # Meta overrides
199
+ meta_path = MODELS_DIR / "meta.json"
200
+ if meta_path.exists():
201
+ try:
202
+ meta = json.loads(meta_path.read_text(encoding="utf-8"))
203
+ FEATURES = meta.get("features", FEATURES)
204
+ TARGET = meta.get("target", TARGET)
205
+ except Exception:
206
+ pass
207
+ else:
208
+ infer = infer_features_from_model(model)
209
+ if infer: FEATURES = infer
210
+
211
+ # =========================
212
+ # Session state
213
+ # =========================
214
+ if "app_step" not in st.session_state: st.session_state.app_step = "intro"
215
+ if "results" not in st.session_state: st.session_state.results = {}
216
+ if "train_ranges" not in st.session_state: st.session_state.train_ranges = None
217
+
218
+ # =========================
219
+ # Hero header (logo + title)
220
+ # =========================
221
+ st.markdown(
222
+ f"""
223
+ <div class="st-hero">
224
+ <img src="{inline_logo()}" class="brand" />
225
+ <div>
226
+ <h1>ST_GeoMech_UCS</h1>
227
+ <div class="tagline">Real-Time UCS Tracking While Drilling — Cloud Ready</div>
228
+ </div>
229
+ </div>
230
+ """,
231
+ unsafe_allow_html=True,
232
+ )
233
+
234
+ # =========================
235
+ # INTRO PAGE (as requested)
236
+ # =========================
237
+ if st.session_state.app_step == "intro":
238
+ st.header("Welcome!")
239
+ st.markdown("This software is developed by *Smart Thinking AI-Solutions Team* to predict the UCS of the underlying formations while drilling using the drilling data.")
240
+ st.subheader("Required Input Columns")
241
+ st.markdown("- Q, gpm — Flow rate (gallons per minute)
242
+ - SPP(psi) — Stand pipe pressure
243
+ - T (kft.lbf) — Torque (thousand foot-pounds)
244
+ - WOB (klbf) — Weight on bit
245
+ - ROP (ft/h) — Rate of penetration")
246
+ st.subheader("How It Works")
247
+ st.markdown("1. *Upload the Model Development Data.* This should contain your training and testing sets.
248
+ 2. Click *Run Model* to view metrics, cross-plots, and a track plot.
249
+ 3. Click *Go to Prediction* and upload a new dataset to get predictions.
250
+ 4. *Export* everything to Excel for further analysis.")
251
+ if st.button("Start Showcase", type="primary", key="start_showcase"):
252
+ st.session_state.app_step = "dev"; st.rerun()
253
+
254
+ # =========================
255
+ # MODEL DEVELOPMENT (Train/Test)
256
+ # =========================
257
+ if st.session_state.app_step == "dev":
258
+ st.sidebar.header("Model Development Data")
259
+ train_test_file = st.sidebar.file_uploader("Upload Train/Test Excel", type=["xlsx","xls"], key="dev_upload")
260
+ run_btn = st.sidebar.button("Run Model", type="primary", use_container_width=True)
261
+ if "Train" in st.session_state.results or "Test" in st.session_state.results:
262
+ st.sidebar.button("Go to Prediction ▶", use_container_width=True, on_click=lambda: st.session_state.update(app_step="predict"))
263
+
264
+ st.subheader("Model Development")
265
+ if run_btn and train_test_file is not None:
266
+ with st.status("Processing…", expanded=False) as status:
267
+ book = read_book(train_test_file)
268
+ if not book: status.update(label="Failed to read workbook.", state="error"); st.stop()
269
+ status.update(label="Workbook read ✓")
270
+
271
+ sh_train = find_sheet(book, ["Train","Training","training2","train","training"])
272
+ sh_test = find_sheet(book, ["Test","Testing","testing2","test","testing"])
273
+ if sh_train is None or sh_test is None:
274
+ status.update(label="Workbook must include Train/Training/training2 and Test/Testing/testing2.", state="error"); st.stop()
275
+
276
+ df_tr = book[sh_train].copy(); df_te = book[sh_test].copy()
277
+ if not (ensure_cols(df_tr, FEATURES + [TARGET]) and ensure_cols(df_te, FEATURES + [TARGET])):
278
+ status.update(label="Missing required columns.", state="error"); st.stop()
279
+ status.update(label="Columns validated ✓")
280
+ status.update(label="Predicting…")
281
+
282
+ df_tr["UCS_Pred"] = model.predict(df_tr[FEATURES])
283
+ df_te["UCS_Pred"] = model.predict(df_te[FEATURES])
284
+ st.session_state.results["Train"] = df_tr; st.session_state.results["Test"] = df_te
285
+
286
+ st.session_state.results["metrics_train"] = {
287
+ "R2": r2_score(df_tr[TARGET], df_tr["UCS_Pred"]),
288
+ "RMSE": rmse(df_tr[TARGET], df_tr["UCS_Pred"]),
289
+ "MAE": mean_absolute_error(df_tr[TARGET], df_tr["UCS_Pred"]),
290
+ }
291
+ st.session_state.results["metrics_test"] = {
292
+ "R2": r2_score(df_te[TARGET], df_te["UCS_Pred"]),
293
+ "RMSE": rmse(df_te[TARGET], df_te["UCS_Pred"]),
294
+ "MAE": mean_absolute_error(df_te[TARGET], df_te["UCS_Pred"]),
295
+ }
296
+
297
+ tr_min = df_tr[FEATURES].min().to_dict(); tr_max = df_tr[FEATURES].max().to_dict()
298
+ st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
299
+
300
+ status.update(label="Done ✓", state="complete"); toast("Model run complete 🚀")
301
+
302
+ if "Train" in st.session_state.results or "Test" in st.session_state.results:
303
+ tab1, tab2 = st.tabs(["Training", "Testing"])
304
+ if "Train" in st.session_state.results:
305
+ with tab1:
306
+ df = st.session_state.results["Train"]; m = st.session_state.results["metrics_train"]
307
+ c1,c2,c3 = st.columns(3); c1.metric("R²", f"{m['R2']:.4f}"); c2.metric("RMSE", f"{m['RMSE']:.4f}"); c3.metric("MAE", f"{m['MAE']:.4f}")
308
+ left,right = st.columns(2)
309
+ with left: st.pyplot(cross_plot(df[TARGET], df["UCS_Pred"], "Training: Actual vs Predicted"), use_container_width=True)
310
+ with right: st.pyplot(depth_or_index_track(df, "Training: Depth/Index Track", include_actual=True), use_container_width=True)
311
+ if "Test" in st.session_state.results:
312
+ with tab2:
313
+ df = st.session_state.results["Test"]; m = st.session_state.results["metrics_test"]
314
+ c1,c2,c3 = st.columns(3); c1.metric("R²", f"{m['R2']:.4f}"); c2.metric("RMSE", f"{m['RMSE']:.4f}"); c3.metric("MAE", f"{m['MAE']:.4f}")
315
+ left,right = st.columns(2)
316
+ with left: st.pyplot(cross_plot(df[TARGET], df["UCS_Pred"], "Testing: Actual vs Predicted"), use_container_width=True)
317
+ with right: st.pyplot(depth_or_index_track(df, "Testing: Depth/Index Track", include_actual=True), use_container_width=True)
318
+
319
+ st.markdown("---")
320
+ sheets = {}; rows = []
321
+ if "Train" in st.session_state.results:
322
+ sheets["Train_with_pred"] = st.session_state.results["Train"]
323
+ rows.append({"Split":"Train", **{k:round(v,6) for k,v in st.session_state.results["metrics_train"].items()}})
324
+ if "Test" in st.session_state.results:
325
+ sheets["Test_with_pred"] = st.session_state.results["Test"]
326
+ rows.append({"Split":"Test", **{k:round(v,6) for k,v in st.session_state.results["metrics_test"].items()}})
327
+ summary_df = pd.DataFrame(rows) if rows else None
328
+ try:
329
+ data_bytes = export_workbook(sheets, summary_df)
330
+ st.download_button("Export Train/Test Results to Excel",
331
+ data=data_bytes, file_name="UCS_Dev_Results.xlsx",
332
+ mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
333
+ except RuntimeError as e:
334
+ st.warning(str(e))
335
+
336
+ # =========================
337
+ # PREDICTION (Validation)
338
+ # =========================
339
+ if st.session_state.app_step == "predict":
340
+ st.sidebar.header("Prediction (Validation)")
341
+ validation_file = st.sidebar.file_uploader("Upload Validation Excel", type=["xlsx","xls"], key="val_upload")
342
+ predict_btn = st.sidebar.button("Predict", type="primary", use_container_width=True)
343
+ st.sidebar.button("⬅ Back", on_click=lambda: st.session_state.update(app_step="dev"), use_container_width=True)
344
+
345
+ st.subheader("Prediction")
346
+ st.write("Upload a new dataset to get UCS predictions and see how the model performs on new data.")
347
+
348
+ if predict_btn and validation_file is not None:
349
+ with st.status("Predicting…", expanded=False) as status:
350
+ vbook = read_book(validation_file)
351
+ if not vbook: status.update(label="Could not read the Validation Excel.", state="error"); st.stop()
352
+ status.update(label="Workbook read ✓")
353
+ vname = find_sheet(vbook, ["Validation","Validate","validation2","Val","val"]) or list(vbook.keys())[0]
354
+ df_val = vbook[vname].copy()
355
+ if not ensure_cols(df_val, FEATURES): status.update(label="Missing required columns.", state="error"); st.stop()
356
+ status.update(label="Columns validated ✓")
357
+ df_val["UCS_Pred"] = model.predict(df_val[FEATURES])
358
+ st.session_state.results["Validate"] = df_val
359
+
360
+ ranges = st.session_state.train_ranges; oor_table = None; oor_pct = 0.0
361
+ if ranges:
362
+ viol = {f: (df_val[f] < ranges[f][0]) | (df_val[f] > ranges[f][1]) for f in FEATURES}
363
+ any_viol = pd.DataFrame(viol).any(axis=1); oor_pct = float(any_viol.mean()*100.0)
364
+ if any_viol.any():
365
+ offenders = df_val.loc[any_viol, FEATURES].copy()
366
+ offenders["Violations"] = pd.DataFrame(viol).loc[any_viol].apply(lambda r: ", ".join([c for c,v in r.items() if v]), axis=1)
367
+ offenders.index = offenders.index + 1; oor_table = offenders
368
+
369
+ metrics_val = None
370
+ if TARGET in df_val.columns:
371
+ metrics_val = {
372
+ "R2": r2_score(df_val[TARGET], df_val["UCS_Pred"]),
373
+ "RMSE": rmse(df_val[TARGET], df_val["UCS_Pred"]),
374
+ "MAE": mean_absolute_error(df_val[TARGET], df_val["UCS_Pred"])
375
+ }
376
+ st.session_state.results["metrics_val"] = metrics_val
377
+ st.session_state.results["summary_val"] = {
378
+ "n_points": len(df_val),
379
+ "pred_min": float(df_val["UCS_Pred"].min()),
380
+ "pred_max": float(df_val["UCS_Pred"].max()),
381
+ "oor_pct": oor_pct
382
+ }
383
+ st.session_state.results["oor_table"] = oor_table
384
+ status.update(label="Predictions ready ✓", state="complete")
385
+
386
+ if "Validate" in st.session_state.results:
387
+ st.subheader("Validation Results")
388
+ sv = st.session_state.results["summary_val"]; oor_table = st.session_state.results.get("oor_table")
389
+ c1,c2,c3,c4 = st.columns(4)
390
+ c1.metric("points", f"{sv['n_points']}"); c2.metric("Pred min", f"{sv['pred_min']:.2f}")
391
+ c3.metric("Pred max", f"{sv['pred_max']:.2f}"); c4.metric("OOR %", f"{sv['oor_pct']:.1f}%")
392
+ left,right = st.columns(2)
393
+ with left:
394
+ if TARGET in st.session_state.results["Validate"].columns:
395
+ st.pyplot(cross_plot(st.session_state.results["Validate"][TARGET], st.session_state.results["Validate"]["UCS_Pred"], "Validation: Actual vs Predicted"), use_container_width=True)
396
+ else:
397
+ st.info("Actual UCS values are not available in the validation data. Cross-plot cannot be generated.")
398
+ with right:
399
+ st.pyplot(depth_or_index_track(st.session_state.results["Validate"], "Validation: Depth/Index Track", include_actual=(TARGET in st.session_state.results["Validate"].columns)), use_container_width=True)
400
+ if oor_table is not None:
401
+ st.write("*Out-of-range rows (vs. Training min–max):*")
402
+ st.dataframe(oor_table, use_container_width=True)
403
+
404
+ st.markdown("---")
405
+ sheets = {"Validate_with_pred": st.session_state.results["Validate"]}
406
+ rows = []
407
+ for name, key in [("Train","metrics_train"), ("Test","metrics_test"), ("Validate","metrics_val")]:
408
+ m = st.session_state.results.get(key)
409
+ if m: rows.append({"Split": name, **{k: round(v,6) for k,v in m.items()}})
410
+ summary_df = pd.DataFrame(rows) if rows else None
411
+ try:
412
+ data_bytes = export_workbook(sheets, summary_df)
413
+ st.download_button("Export Validation Results to Excel",
414
+ data=data_bytes, file_name="UCS_Validation_Results.xlsx",
415
+ mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
416
+ except RuntimeError as e:
417
+ st.warning(str(e))
418
+
419
+ # =========================
420
+ # Footer
421
+ # =========================
422
+ st.markdown("---")
423
+ st.markdown("<div style='text-align:center; color:#6b7280;'>ST_GeoMech_UCS • © Smart Thinking</div>", unsafe_allow_html=True)