UCS2014 commited on
Commit
4962fcd
·
verified ·
1 Parent(s): 601b073

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +398 -0
  2. logo.png +0 -0
  3. models/meta.json +17 -0
  4. models/ucs_rf.joblib +3 -0
  5. requirements.txt +8 -3
app.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import io, json, os
3
+ from pathlib import Path
4
+ import streamlit as st
5
+ import pandas as pd
6
+ import numpy as np
7
+ import joblib
8
+ import matplotlib.pyplot as plt
9
+ from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
10
+
11
+ # =========================
12
+ # Defaults (overridden by models/meta.json or model.feature_names_in_)
13
+ # =========================
14
+ FEATURES = ["Q, gpm", "SPP(psi)", "T (kft.lbf)", "WOB (klbf)", "ROP (ft/h)"]
15
+ TARGET = "UCS"
16
+ MODELS_DIR = Path("models")
17
+ DEFAULT_MODEL = MODELS_DIR / "ucs_rf.joblib"
18
+ MODEL_FALLBACKS = [MODELS_DIR / "model.joblib", MODELS_DIR / "model.pkl"]
19
+
20
+ # =========================
21
+ # Page / Theme
22
+ # =========================
23
+ st.set_page_config(page_title="ST_GeoMech_UCS", page_icon="🛠️", layout="wide")
24
+ st.markdown("<style>header, footer{visibility:hidden !important;}</style>", unsafe_allow_html=True)
25
+ st.markdown("""
26
+
27
+ <style>
28
+ .stApp { background: #FFFFFF; }
29
+ section[data-testid="stSidebar"] { background: #F6F9FC; }
30
+ .sidebar-card{ border:1px solid #E5E7EB; border-radius:12px; background:#FFFFFF;
31
+ padding:10px 12px; margin:8px 0; box-shadow:0 1px 3px rgba(0,0,0,.06); display:inline-block; }
32
+ .sidebar-card h3{ margin:0; font-size:1rem; line-height:1.2; text-align:center; }
33
+ .stButton>button{ background:#007bff; color:#fff; font-weight:bold; border-radius:8px; border:none; padding:10px 24px; }
34
+ .stButton>button:hover{ background:#0056b3; }
35
+ .pill { display:inline-block; padding:2px 10px; border-radius:999px; border:1px solid #e5e7eb; margin:2px; background:#fff; font-size:.9rem; }
36
+ </style>
37
+ """, unsafe_allow_html=True)
38
+
39
+ # =========================
40
+ # Helpers
41
+ # =========================
42
+ def rmse(y_true, y_pred):
43
+ return float(np.sqrt(mean_squared_error(y_true, y_pred)))
44
+
45
+ def ensure_cols(df, cols):
46
+ miss = [c for c in cols if c not in df.columns]
47
+ if miss:
48
+ st.error(f"Missing columns: {miss}\nFound: {list(df.columns)}")
49
+ return False
50
+ return True
51
+
52
+ @st.cache_resource(show_spinner=False)
53
+ def load_model(model_path: str):
54
+ return joblib.load(model_path)
55
+
56
+ @st.cache_data(show_spinner=False)
57
+ def parse_excel(data_bytes: bytes):
58
+ bio = io.BytesIO(data_bytes)
59
+ xl = pd.ExcelFile(bio)
60
+ return {sh: xl.parse(sh) for sh in xl.sheet_names}
61
+
62
+ def read_book(upload):
63
+ if upload is None:
64
+ return {}
65
+ try:
66
+ return parse_excel(upload.getvalue())
67
+ except Exception as e:
68
+ st.error(f"Failed to read Excel: {e}")
69
+ return {}
70
+
71
+ def find_sheet(book, names):
72
+ low2orig = {k.lower(): k for k in book.keys()}
73
+ for nm in names:
74
+ if nm.lower() in low2orig:
75
+ return low2orig[nm.lower()]
76
+ return None
77
+
78
+ def cross_plot(actual, pred, title, size=(5.6,5.6)):
79
+ fig, ax = plt.subplots(figsize=size)
80
+ ax.scatter(actual, pred, s=16, alpha=0.7)
81
+ lo = float(np.nanmin([actual.min(), pred.min()]))
82
+ hi = float(np.nanmax([actual.max(), pred.max()]))
83
+ ax.plot([lo,hi], [lo,hi], '--')
84
+ ax.set_xlabel("Actual UCS"); ax.set_ylabel("Predicted UCS"); ax.set_title(title)
85
+ ax.grid(True, ls=":", alpha=0.4)
86
+ return fig
87
+
88
+ def depth_or_index_track(df, title, include_actual=True):
89
+ # If a depth-like column exists, plot UCS vs Depth (depth downward); else index track
90
+ depth_col = None
91
+ for c in df.columns:
92
+ if 'depth' in str(c).lower():
93
+ depth_col = c; break
94
+ fig, ax = plt.subplots(figsize=(5.8, 7.5))
95
+ if depth_col is not None:
96
+ ax.plot(df["UCS_Pred"], df[depth_col], label="UCS_Pred")
97
+ if include_actual and TARGET in df.columns:
98
+ ax.plot(df[TARGET], df[depth_col], alpha=0.7, label="UCS (actual)")
99
+ ax.set_ylabel(depth_col); ax.set_xlabel("UCS")
100
+ ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
101
+ else:
102
+ idx = np.arange(1, len(df) + 1)
103
+ ax.plot(df["UCS_Pred"], idx, label="UCS_Pred")
104
+ if include_actual and TARGET in df.columns:
105
+ ax.plot(df[TARGET], idx, alpha=0.7, label="UCS (actual)")
106
+ ax.set_ylabel("Point Index"); ax.set_xlabel("UCS")
107
+ ax.xaxis.set_label_position('top'); ax.xaxis.tick_top(); ax.invert_yaxis()
108
+ ax.grid(True, linestyle=":", alpha=0.4); ax.set_title(title, pad=12); ax.legend()
109
+ return fig
110
+
111
+ def export_workbook(sheets_dict, summary_df=None):
112
+ try:
113
+ import openpyxl # ensure engine available
114
+ except Exception:
115
+ raise RuntimeError("Export requires openpyxl. Please add it to requirements or install it.")
116
+ buf = io.BytesIO()
117
+ with pd.ExcelWriter(buf, engine="openpyxl") as xw:
118
+ for name, frame in sheets_dict.items():
119
+ frame.to_excel(xw, sheet_name=name[:31], index=False)
120
+ if summary_df is not None:
121
+ summary_df.to_excel(xw, sheet_name="Summary", index=False)
122
+ return buf.getvalue()
123
+
124
+ def toast(msg):
125
+ try: st.toast(msg)
126
+ except Exception: st.info(msg)
127
+
128
+ def infer_features_from_model(m):
129
+ # Attempt to get feature names from model or last pipeline step
130
+ try:
131
+ if hasattr(m, "feature_names_in_") and len(getattr(m, "feature_names_in_")):
132
+ return [str(x) for x in m.feature_names_in_]
133
+ except Exception:
134
+ pass
135
+ try:
136
+ if hasattr(m, "steps") and len(m.steps):
137
+ last = m.steps[-1][1]
138
+ if hasattr(last, "feature_names_in_") and len(last.feature_names_in_):
139
+ return [str(x) for x in last.feature_names_in_]
140
+ except Exception:
141
+ pass
142
+ return None
143
+
144
+ # =========================
145
+ # Model availability (download on cloud if needed)
146
+ # =========================
147
+ MODEL_URL = st.secrets.get("MODEL_URL", os.environ.get("MODEL_URL", "")).strip()
148
+
149
+ def ensure_model_present() -> Path:
150
+ # Check local paths first
151
+ for p in [DEFAULT_MODEL, *MODEL_FALLBACKS]:
152
+ if p.exists():
153
+ return p
154
+ # Download if MODEL_URL provided
155
+ if MODEL_URL:
156
+ try:
157
+ import requests
158
+ except Exception:
159
+ st.error("requests is required to download the model. Add 'requests' to requirements.txt.")
160
+ return None
161
+ try:
162
+ DEFAULT_MODEL.parent.mkdir(parents=True, exist_ok=True)
163
+ with requests.get(MODEL_URL, stream=True) as r:
164
+ r.raise_for_status()
165
+ with open(DEFAULT_MODEL, "wb") as f:
166
+ for chunk in r.iter_content(chunk_size=1<<20):
167
+ f.write(chunk)
168
+ return DEFAULT_MODEL
169
+ except Exception as e:
170
+ st.error(f"Failed to download model from MODEL_URL. {e}")
171
+ return None
172
+
173
+ model_path = ensure_model_present()
174
+ if not model_path:
175
+ st.error("Model not found. Upload models/ucs_rf.joblib (or set MODEL_URL in Secrets).")
176
+ st.stop()
177
+
178
+ # Load model
179
+ try:
180
+ model = load_model(str(model_path))
181
+ except Exception as e:
182
+ st.error(f"Failed to load model: {model_path}\n{e}")
183
+ st.stop()
184
+
185
+ # Meta overrides
186
+ meta_path = MODELS_DIR / "meta.json"
187
+ if meta_path.exists():
188
+ try:
189
+ meta = json.loads(meta_path.read_text(encoding="utf-8"))
190
+ FEATURES = meta.get("features", FEATURES)
191
+ TARGET = meta.get("target", TARGET)
192
+ except Exception:
193
+ pass
194
+ else:
195
+ infer = infer_features_from_model(model)
196
+ if infer:
197
+ FEATURES = infer
198
+
199
+ # =========================
200
+ # Session state
201
+ # =========================
202
+ if "app_step" not in st.session_state: st.session_state.app_step = "intro"
203
+ if "results" not in st.session_state: st.session_state.results = {}
204
+ if "train_ranges" not in st.session_state: st.session_state.train_ranges = None
205
+
206
+ # =========================
207
+ # Sidebar: Model & schema
208
+ # =========================
209
+ st.sidebar.markdown('<div class="sidebar-card"><h3>Model</h3>', unsafe_allow_html=True)
210
+ st.sidebar.write(f"**Loaded:** `{Path(model_path).name}`")
211
+ st.sidebar.write("**Target:**", TARGET)
212
+ st.sidebar.write("**Features:**")
213
+ for f in FEATURES:
214
+ st.sidebar.markdown(f"<span class='pill'>{f}</span>", unsafe_allow_html=True)
215
+ st.sidebar.markdown('</div>', unsafe_allow_html=True)
216
+
217
+ # =========================
218
+ # Intro
219
+ # =========================
220
+ st.title("ST_GeoMech_UCS")
221
+ st.caption("Real-Time UCS Tracking While Drilling — Cloud Ready")
222
+ if st.session_state.app_step == "intro":
223
+ st.header("Welcome!")
224
+ st.write("Upload Train/Test data, run the model, then go to Prediction.")
225
+ if st.button("Start ▶", type="primary"):
226
+ st.session_state.app_step = "dev"; st.rerun()
227
+
228
+ # =========================
229
+ # Development (Train/Test)
230
+ # =========================
231
+ if st.session_state.app_step == "dev":
232
+ st.sidebar.markdown('<div class="sidebar-card"><h3>Model Development Data</h3>', unsafe_allow_html=True)
233
+ train_test_file = st.sidebar.file_uploader("Upload Train/Test Excel", type=["xlsx","xls"], key="dev_upload")
234
+ run_btn = st.sidebar.button("Run Model", type="primary", use_container_width=True)
235
+ if "Train" in st.session_state.results or "Test" in st.session_state.results:
236
+ st.sidebar.button("Go to Prediction ▶", use_container_width=True, on_click=lambda: st.session_state.update(app_step="predict"))
237
+ st.sidebar.markdown('</div>', unsafe_allow_html=True)
238
+
239
+ if run_btn and train_test_file is not None:
240
+ with st.status("Processing…", expanded=False) as status:
241
+ book = read_book(train_test_file)
242
+ if not book: status.update(label="Failed to read workbook.", state="error"); st.stop()
243
+ status.update(label="Workbook read ✓")
244
+
245
+ sh_train = find_sheet(book, ["Train","Training","training2","train","training"])
246
+ sh_test = find_sheet(book, ["Test","Testing","testing2","test","testing"])
247
+ if sh_train is None or sh_test is None:
248
+ status.update(label="Workbook must include Train/Training/training2 and Test/Testing/testing2.", state="error"); st.stop()
249
+
250
+ df_tr = book[sh_train].copy(); df_te = book[sh_test].copy()
251
+ if not (ensure_cols(df_tr, FEATURES + [TARGET]) and ensure_cols(df_te, FEATURES + [TARGET])):
252
+ status.update(label="Missing required columns.", state="error"); st.stop()
253
+ status.update(label="Columns validated ✓")
254
+ status.update(label="Predicting…")
255
+
256
+ df_tr["UCS_Pred"] = model.predict(df_tr[FEATURES])
257
+ df_te["UCS_Pred"] = model.predict(df_te[FEATURES])
258
+ st.session_state.results["Train"] = df_tr; st.session_state.results["Test"] = df_te
259
+
260
+ st.session_state.results["metrics_train"] = {
261
+ "R2": r2_score(df_tr[TARGET], df_tr["UCS_Pred"]),
262
+ "RMSE": rmse(df_tr[TARGET], df_tr["UCS_Pred"]),
263
+ "MAE": mean_absolute_error(df_tr[TARGET], df_tr["UCS_Pred"]),
264
+ }
265
+ st.session_state.results["metrics_test"] = {
266
+ "R2": r2_score(df_te[TARGET], df_te["UCS_Pred"]),
267
+ "RMSE": rmse(df_te[TARGET], df_te["UCS_Pred"]),
268
+ "MAE": mean_absolute_error(df_te[TARGET], df_te["UCS_Pred"]),
269
+ }
270
+
271
+ tr_min = df_tr[FEATURES].min().to_dict(); tr_max = df_tr[FEATURES].max().to_dict()
272
+ st.session_state.train_ranges = {f:(float(tr_min[f]), float(tr_max[f])) for f in FEATURES}
273
+
274
+ status.update(label="Done ✓", state="complete"); toast("Model run complete 🚀")
275
+
276
+ if "Train" in st.session_state.results or "Test" in st.session_state.results:
277
+ tab1, tab2 = st.tabs(["Training", "Testing"])
278
+ if "Train" in st.session_state.results:
279
+ with tab1:
280
+ df = st.session_state.results["Train"]; m = st.session_state.results["metrics_train"]
281
+ c1,c2,c3 = st.columns(3); c1.metric("R²", f"{m['R2']:.4f}"); c2.metric("RMSE", f"{m['RMSE']:.4f}"); c3.metric("MAE", f"{m['MAE']:.4f}")
282
+ left,right = st.columns(2)
283
+ with left: st.pyplot(cross_plot(df[TARGET], df["UCS_Pred"], "Training: Actual vs Predicted"), use_container_width=True)
284
+ with right: st.pyplot(depth_or_index_track(df, "Training: Depth/Index Track", include_actual=True), use_container_width=True)
285
+ if "Test" in st.session_state.results:
286
+ with tab2:
287
+ df = st.session_state.results["Test"]; m = st.session_state.results["metrics_test"]
288
+ c1,c2,c3 = st.columns(3); c1.metric("R²", f"{m['R2']:.4f}"); c2.metric("RMSE", f"{m['RMSE']:.4f}"); c3.metric("MAE", f"{m['MAE']:.4f}")
289
+ left,right = st.columns(2)
290
+ with left: st.pyplot(cross_plot(df[TARGET], df["UCS_Pred"], "Testing: Actual vs Predicted"), use_container_width=True)
291
+ with right: st.pyplot(depth_or_index_track(df, "Testing: Depth/Index Track", include_actual=True), use_container_width=True)
292
+
293
+ # Export Dev results
294
+ st.markdown("---")
295
+ sheets = {}; rows = []
296
+ if "Train" in st.session_state.results:
297
+ sheets["Train_with_pred"] = st.session_state.results["Train"]
298
+ rows.append({"Split":"Train", **{k:round(v,6) for k,v in st.session_state.results["metrics_train"].items()}})
299
+ if "Test" in st.session_state.results:
300
+ sheets["Test_with_pred"] = st.session_state.results["Test"]
301
+ rows.append({"Split":"Test", **{k:round(v,6) for k,v in st.session_state.results["metrics_test"].items()}})
302
+ summary_df = pd.DataFrame(rows) if rows else None
303
+ try:
304
+ data_bytes = export_workbook(sheets, summary_df)
305
+ st.download_button("Export Train/Test Results to Excel",
306
+ data=data_bytes, file_name="UCS_Dev_Results.xlsx",
307
+ mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
308
+ except RuntimeError as e:
309
+ st.warning(str(e))
310
+
311
+ # =========================
312
+ # Prediction (Validation)
313
+ # =========================
314
+ if st.session_state.app_step == "predict":
315
+ st.sidebar.markdown('<div class="sidebar-card"><h3>Prediction (Validation)</h3>', unsafe_allow_html=True)
316
+ validation_file = st.sidebar.file_uploader("Upload Validation Excel", type=["xlsx","xls"], key="val_upload")
317
+ predict_btn = st.sidebar.button("Predict", type="primary", use_container_width=True)
318
+ st.sidebar.button("⬅ Back", on_click=lambda: st.session_state.update(app_step="dev"), use_container_width=True)
319
+ st.sidebar.markdown('</div>', unsafe_allow_html=True)
320
+
321
+ if predict_btn and validation_file is not None:
322
+ with st.status("Predicting…", expanded=False) as status:
323
+ vbook = read_book(validation_file)
324
+ if not vbook: status.update(label="Could not read the Validation Excel.", state="error"); st.stop()
325
+ status.update(label="Workbook read ✓")
326
+ vname = find_sheet(vbook, ["Validation","Validate","validation2","Val","val"]) or list(vbook.keys())[0]
327
+ df_val = vbook[vname].copy()
328
+ if not ensure_cols(df_val, FEATURES): status.update(label="Missing required columns.", state="error"); st.stop()
329
+ status.update(label="Columns validated ✓")
330
+ df_val["UCS_Pred"] = model.predict(df_val[FEATURES])
331
+ st.session_state.results["Validate"] = df_val
332
+
333
+ # OOR check: min–max vs training
334
+ ranges = st.session_state.train_ranges; oor_table = None; oor_pct = 0.0
335
+ if ranges:
336
+ viol = {f: (df_val[f] < ranges[f][0]) | (df_val[f] > ranges[f][1]) for f in FEATURES}
337
+ any_viol = pd.DataFrame(viol).any(axis=1); oor_pct = float(any_viol.mean()*100.0)
338
+ if any_viol.any():
339
+ offenders = df_val.loc[any_viol, FEATURES].copy()
340
+ offenders["Violations"] = pd.DataFrame(viol).loc[any_viol].apply(lambda r: ", ".join([c for c,v in r.items() if v]), axis=1)
341
+ offenders.index = offenders.index + 1; oor_table = offenders
342
+
343
+ metrics_val = None
344
+ if TARGET in df_val.columns:
345
+ metrics_val = {
346
+ "R2": r2_score(df_val[TARGET], df_val["UCS_Pred"]),
347
+ "RMSE": rmse(df_val[TARGET], df_val["UCS_Pred"]),
348
+ "MAE": mean_absolute_error(df_val[TARGET], df_val["UCS_Pred"])
349
+ }
350
+ st.session_state.results["metrics_val"] = metrics_val
351
+ st.session_state.results["summary_val"] = {
352
+ "n_points": len(df_val),
353
+ "pred_min": float(df_val["UCS_Pred"].min()),
354
+ "pred_max": float(df_val["UCS_Pred"].max()),
355
+ "oor_pct": oor_pct
356
+ }
357
+ st.session_state.results["oor_table"] = oor_table
358
+ status.update(label="Predictions ready ✓", state="complete")
359
+
360
+ if "Validate" in st.session_state.results:
361
+ st.subheader("Validation Results")
362
+ sv = st.session_state.results["summary_val"]; oor_table = st.session_state.results.get("oor_table")
363
+ c1,c2,c3,c4 = st.columns(4)
364
+ c1.metric("# points", f"{sv['n_points']}"); c2.metric("Pred min", f"{sv['pred_min']:.2f}")
365
+ c3.metric("Pred max", f"{sv['pred_max']:.2f}"); c4.metric("OOR %", f"{sv['oor_pct']:.1f}%")
366
+ left,right = st.columns(2)
367
+ with left:
368
+ if TARGET in st.session_state.results["Validate"].columns:
369
+ st.pyplot(cross_plot(st.session_state.results["Validate"][TARGET], st.session_state.results["Validate"]["UCS_Pred"], "Validation: Actual vs Predicted"), use_container_width=True)
370
+ else:
371
+ st.info("Actual UCS values are not available in the validation data. Cross-plot cannot be generated.")
372
+ with right:
373
+ st.pyplot(depth_or_index_track(st.session_state.results["Validate"], "Validation: Depth/Index Track", include_actual=(TARGET in st.session_state.results["Validate"].columns)), use_container_width=True)
374
+ if oor_table is not None:
375
+ st.write("*Out-of-range rows (vs. Training min–max):*")
376
+ st.dataframe(oor_table, use_container_width=True)
377
+
378
+ # Export
379
+ st.markdown("---")
380
+ sheets = {"Validate_with_pred": st.session_state.results["Validate"]}
381
+ rows = []
382
+ for name, key in [("Train","metrics_train"), ("Test","metrics_test"), ("Validate","metrics_val")]:
383
+ m = st.session_state.results.get(key)
384
+ if m: rows.append({"Split": name, **{k: round(v,6) for k,v in m.items()}})
385
+ summary_df = pd.DataFrame(rows) if rows else None
386
+ try:
387
+ data_bytes = export_workbook(sheets, summary_df)
388
+ st.download_button("Export Validation Results to Excel",
389
+ data=data_bytes, file_name="UCS_Validation_Results.xlsx",
390
+ mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
391
+ except RuntimeError as e:
392
+ st.warning(str(e))
393
+
394
+ # =========================
395
+ # Footer
396
+ # =========================
397
+ st.markdown("---")
398
+ st.markdown("<div style='text-align:center; color:#6b7280;'>ST_GeoMech_UCS • © Smart Thinking</div>", unsafe_allow_html=True)
logo.png ADDED
models/meta.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "Q, gpm",
4
+ "SPP(psi)",
5
+ "T (kft.lbf)",
6
+ "WOB (klbf)",
7
+ "ROP (ft/h)"
8
+ ],
9
+ "target": "UCS",
10
+ "best_params": {
11
+ "n_estimators": 150,
12
+ "max_depth": 21,
13
+ "max_features": "log2",
14
+ "random_state": 10
15
+ },
16
+ "depth_col": "Depth (ft)"
17
+ }
models/ucs_rf.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8cbfb44765b8c142c71d28a7fe1adda8baefd7da627943255a6ac04531ea65c
3
+ size 24344513
requirements.txt CHANGED
@@ -1,3 +1,8 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
1
+ streamlit>=1.33
2
+ pandas>=2.0
3
+ numpy>=1.24
4
+ scikit-learn>=1.3
5
+ matplotlib>=3.7
6
+ joblib>=1.3
7
+ openpyxl>=3.1
8
+ requests>=2.31