Inframat-x commited on
Commit
6b180ac
·
1 Parent(s): 41c1531

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +685 -249
app.py CHANGED
@@ -1,277 +1,713 @@
1
- # Gradio UI aligned to the training script column names (October1.xlsx)
2
- # - Uses the trained pipeline saved as: stress_gf_xgb.joblib
3
- # - Makes many inputs optional; missing values are handled by the pipeline imputers
 
 
 
 
 
 
 
4
 
 
5
  import os
6
- import joblib
 
 
 
 
 
 
 
7
  import numpy as np
8
  import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import gradio as gr
10
 
11
- # ========================= Column Names (match training script) =========================
12
- CF_COL = "Conductive Filler Conc. (wt%)"
13
- TARGET_COL = "Stress GF (MPa-1)"
14
-
15
- MAIN_VARIABLES = [
16
- "Filler 1 Type",
17
- "Filler 1 Diameter (µm)",
18
- "Filler 1 Length (mm)",
19
- CF_COL,
20
- "Filler 1 Dimensionality",
21
- "Filler 2 Type",
22
- "Filler 2 Diameter (µm)",
23
- "Filler 2 Length (mm)",
24
- "Filler 2 Dimensionality",
25
- "Specimen Volume (mm3)",
26
- "Probe Count",
27
- "Probe Material",
28
- "W/B",
29
- "S/B",
30
- "Gauge Length (mm)",
31
- "Curing Condition",
32
- "Number of Fillers",
33
- "Drying Temperature (°C)",
34
- "Drying Duration (hr)",
35
- "Loading Rate (MPa/s)",
36
- "Modulus of Elasticity (GPa)",
37
- "Current Type",
38
- "Applied Voltage (V)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  ]
 
40
 
41
- NUMERIC_COLS = {
42
- "Filler 1 Diameter (µm)",
43
- "Filler 1 Length (mm)",
44
- CF_COL,
45
- "Filler 2 Diameter (µm)",
46
- "Filler 2 Length (mm)",
47
- "Specimen Volume (mm3)",
48
- "Probe Count",
49
- "W/B",
50
- "S/B",
51
- "Gauge Length (mm)",
52
- "Number of Fillers",
53
- "Drying Temperature (°C)",
54
- "Drying Duration (hr)",
55
- "Loading Rate (MPa/s)",
56
- "Modulus of Elasticity (GPa)",
57
- "Applied Voltage (V)"
58
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- CATEGORICAL_COLS = {
61
- "Filler 1 Type",
62
- "Filler 1 Dimensionality",
63
- "Filler 2 Type",
64
- "Filler 2 Dimensionality",
65
- "Probe Material",
66
- "Curing Condition",
67
- "Current Type"
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
 
70
- # Reasonable UI choices (free text is still allowed)
71
- DIM_CHOICES = ["0D", "1D", "2D", "3D", "NA"]
72
- CURRENT_CHOICES = ["DC", "AC", "NA"]
 
 
73
 
74
- # ========================= Model Loader ================================================
75
- MODEL_CANDIDATES = [
76
- "stress_gf_xgb.joblib",
77
- "models/stress_gf_xgb.joblib",
78
- "/home/user/app/stress_gf_xgb.joblib",
79
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
- def _load_model_or_error():
82
- for p in MODEL_CANDIDATES:
83
- if os.path.exists(p):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  try:
85
- return joblib.load(p)
86
- except Exception as e:
87
- return f"Could not load model from {p}: {e}"
88
- return (
89
- "Model file not found. Upload your trained pipeline as "
90
- "stress_gf_xgb.joblib (or put it in models/)."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  )
92
 
93
- # ========================= Input Coercion =============================================
94
- def _coerce_to_row(form_dict: dict) -> pd.DataFrame:
95
- row = {}
96
- for col in MAIN_VARIABLES:
97
- v = form_dict.get(col, None)
98
- if col in NUMERIC_COLS:
99
- if v in ("", None):
100
- row[col] = np.nan
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  else:
102
- try:
103
- row[col] = float(v)
104
- except Exception:
105
- row[col] = np.nan
106
  else:
107
- row[col] = "" if v in (None, "NA") else str(v).strip()
108
- return pd.DataFrame([row], columns=MAIN_VARIABLES)
109
 
110
- # ========================= Predict Function ===========================================
111
- def predict_fn(**kwargs):
112
- mdl = _load_model_or_error()
113
- if isinstance(mdl, str):
114
- return mdl
115
-
116
- X_new = _coerce_to_row(kwargs)
117
 
 
118
  try:
119
- y_log = mdl.predict(X_new) # model predicts log1p(target)
120
- y = float(np.expm1(y_log)[0]) # back to original scale MPa^-1
121
- if -1e-10 < y < 0:
122
- y = 0.0
 
123
  return y
124
  except Exception as e:
125
- return f"Prediction error: {e}"
126
-
127
- # ========================= Example Prefill ============================================
128
- EXAMPLE = {
129
- "Filler 1 Type": "CNT",
130
- "Filler 1 Dimensionality": "1D",
131
- "Filler 1 Diameter (µm)": 0.02,
132
- "Filler 1 Length (mm)": 1.2,
133
- CF_COL: 0.5,
134
- "Filler 2 Type": "",
135
- "Filler 2 Dimensionality": "NA",
136
- "Filler 2 Diameter (µm)": None,
137
- "Filler 2 Length (mm)": None,
138
- "Specimen Volume (mm3)": 1000,
139
- "Probe Count": 2,
140
- "Probe Material": "Copper",
141
- "W/B": 0.4,
142
- "S/B": 2.5,
143
- "Gauge Length (mm)": 20,
144
- "Curing Condition": "28d water, 20°C",
145
- "Number of Fillers": 1,
146
- "Drying Temperature (°C)": 60,
147
- "Drying Duration (hr)": 24,
148
- "Loading Rate (MPa/s)": 0.1,
149
- "Modulus of Elasticity (GPa)": 25,
150
- "Current Type": "DC",
151
- "Applied Voltage (V)": 5.0,
152
- }
153
 
154
- def _fill_example():
155
- return [EXAMPLE.get(k, None) for k in MAIN_VARIABLES]
156
-
157
- def _clear_all():
158
- cleared = []
159
- for col in MAIN_VARIABLES:
160
- if col in NUMERIC_COLS:
161
- cleared.append(None)
162
- elif col in {"Filler 1 Dimensionality", "Filler 2 Dimensionality"}:
163
- cleared.append("NA")
164
- elif col == "Current Type":
165
- cleared.append("NA")
166
- else:
167
- cleared.append("")
168
- return cleared
169
-
170
- # ========================= UI =========================================================
171
- CSS = """
172
- /* Blue to green gradient background */
173
- .gradio-container {
174
- background: linear-gradient(135deg, #1e3a8a 0%, #166534 60%, #15803d 100%) !important;
175
- }
176
- * {font-family: ui-sans-serif, system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial;}
177
- /* cards - subtle translucent white */
178
- .card {background: rgba(255,255,255,0.07) !important; border: 1px solid rgba(255,255,255,0.12);}
179
- label.svelte-1ipelgc {color: #e0f2fe !important;}
180
- """
181
-
182
- theme = gr.themes.Soft(
183
- primary_hue="blue",
184
- neutral_hue="green"
185
- ).set(
186
- body_background_fill="#1e3a8a",
187
- body_text_color="#e0f2fe",
188
- input_background_fill="#172554",
189
- input_border_color="#1e40af",
190
- button_primary_background_fill="#2563eb",
191
- button_primary_text_color="#ffffff",
192
- button_secondary_background_fill="#14532d",
193
- button_secondary_text_color="#ecfdf5",
194
- )
195
 
196
- with gr.Blocks(css=CSS, theme=theme, fill_height=True) as demo:
 
197
  gr.Markdown(
198
- "<h1 style='margin:0'>Stress Gauge Factor (MPa^-1) - ML Predictor</h1>"
199
- "<p style='opacity:.9'>Fields and units match your training data. "
200
- "Leave anything blank if unknown - the model handles missing values.</p>"
 
201
  )
202
 
203
- with gr.Row():
204
- # ---------------- Inputs (Left) ----------------
205
- with gr.Column(scale=7):
206
- with gr.Accordion("Primary conductive filler", open=True, elem_classes=["card"]):
207
- f1_type = gr.Textbox(label="Filler 1 Type", placeholder="e.g., CNT, Graphite, Steel fiber")
208
- f1_dim = gr.Dropdown(DIM_CHOICES, value="NA", label="Filler 1 Dimensionality")
209
- f1_diam = gr.Number(label="Filler 1 Diameter (µm)")
210
- f1_len = gr.Number(label="Filler 1 Length (mm)")
211
- cf_conc = gr.Number(label=f"{CF_COL}", info="Weight percent of total binder")
212
-
213
- with gr.Accordion("Secondary filler (optional)", open=False, elem_classes=["card"]):
214
- f2_type = gr.Textbox(label="Filler 2 Type", placeholder="Optional")
215
- f2_dim = gr.Dropdown(DIM_CHOICES, value="NA", label="Filler 2 Dimensionality")
216
- f2_diam = gr.Number(label="Filler 2 Diameter (µm)")
217
- f2_len = gr.Number(label="Filler 2 Length (mm)")
218
-
219
- with gr.Accordion("Mix design & specimen", open=False, elem_classes=["card"]):
220
- spec_vol = gr.Number(label="Specimen Volume (mm3)")
221
- probe_cnt = gr.Number(label="Probe Count")
222
- probe_mat = gr.Textbox(label="Probe Material", placeholder="e.g., Copper, Silver paste")
223
- wb = gr.Number(label="W/B")
224
- sb = gr.Number(label="S/B")
225
- gauge_len = gr.Number(label="Gauge Length (mm)")
226
- curing = gr.Textbox(label="Curing Condition", placeholder="e.g., 28d water, 20°C")
227
- n_fillers = gr.Number(label="Number of Fillers")
228
-
229
- with gr.Accordion("Processing", open=False, elem_classes=["card"]):
230
- dry_temp = gr.Number(label="Drying Temperature (°C)")
231
- dry_hrs = gr.Number(label="Drying Duration (hr)")
232
-
233
- with gr.Accordion("Mechanical & electrical loading", open=False, elem_classes=["card"]):
234
- load_rate = gr.Number(label="Loading Rate (MPa/s)")
235
- E_mod = gr.Number(label="Modulus of Elasticity (GPa)")
236
- current = gr.Dropdown(CURRENT_CHOICES, value="NA", label="Current Type")
237
- voltage = gr.Number(label="Applied Voltage (V)")
238
-
239
- # ---------------- Output (Right) ----------------
240
- with gr.Column(scale=5):
241
- with gr.Group(elem_classes=["card"]):
242
- out_pred = gr.Number(label="Predicted Stress GF (MPa-1)", precision=6)
243
- with gr.Row():
244
- btn_pred = gr.Button("Predict", variant="primary")
245
- btn_clear = gr.Button("Clear")
246
- btn_demo = gr.Button("Fill Example")
247
-
248
- with gr.Accordion("About this model", open=False, elem_classes=["card"]):
249
- gr.Markdown(
250
- "- Pipeline: ColumnTransformer -> (RobustScaler + OneHot) -> XGBoost\n"
251
- "- Target: Stress GF (MPa^-1) on original scale (model trains on log1p).\n"
252
- "- Missing values are safely imputed per-feature.\n"
253
- "- Trained columns:\n"
254
- f" `{', '.join(MAIN_VARIABLES)}`"
255
- )
256
-
257
- # Wire buttons
258
- inputs_in_order = [
259
- f1_type, f1_diam, f1_len, cf_conc,
260
- f1_dim, f2_type, f2_diam, f2_len,
261
- f2_dim, spec_vol, probe_cnt, probe_mat,
262
- wb, sb, gauge_len, curing, n_fillers,
263
- dry_temp, dry_hrs, load_rate,
264
- E_mod, current, voltage
265
- ]
266
-
267
- def _predict_wrapper(*vals):
268
- data = {k: v for k, v in zip(MAIN_VARIABLES, vals)}
269
- return predict_fn(**data)
270
-
271
- btn_pred.click(_predict_wrapper, inputs=inputs_in_order, outputs=out_pred)
272
- btn_clear.click(lambda: _clear_all(), inputs=None, outputs=inputs_in_order)
273
- btn_demo.click(lambda: _fill_example(), inputs=None, outputs=inputs_in_order)
274
-
275
- # ------------- Launch -------------
276
- if __name__ == "__main__":
277
- demo.queue().launch()
 
1
+ # ================================================================
2
+ # Self-Sensing Concrete Assistant Hybrid RAG + XGB + (opt) GPT-5
3
+ # FIXED for Windows/Conda import issues (transformers/quantizers)
4
+ # - Pins compatible versions (transformers 4.44.2, sbert 2.7.0, torch 2.x)
5
+ # - Disables TF/Flax backends; safe fallbacks if dense fails
6
+ # - Hybrid retrieval (BM25 + TF-IDF + Dense*) + MMR sentence selection
7
+ # - Local folder only (RAG reads from ./literature_pdfs); no online indexing
8
+ # - Optional GPT-5 synthesis strictly from selected cited sentences
9
+ # - Gradio UI with Prediction + Literature Q&A tabs
10
+ # ================================================================
11
 
12
+ # ---------------------- MUST RUN THESE FLAGS FIRST ----------------------
13
  import os
14
+ os.environ["TRANSFORMERS_NO_TF"] = "1" # don't import TensorFlow
15
+ os.environ["TRANSFORMERS_NO_FLAX"] = "1" # don't import Flax/JAX
16
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
17
+
18
+ # ------------------------------- Imports -----------------------------------
19
+ import re, json, time, joblib, warnings, math, hashlib
20
+ from pathlib import Path
21
+ from typing import List, Dict
22
  import numpy as np
23
  import pandas as pd
24
+
25
+ from sklearn.model_selection import train_test_split
26
+ from sklearn.impute import SimpleImputer
27
+ from sklearn.pipeline import Pipeline
28
+ from sklearn.compose import ColumnTransformer
29
+ from sklearn.preprocessing import RobustScaler, OneHotEncoder
30
+ from sklearn.preprocessing import normalize as sk_normalize
31
+ from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
32
+ from sklearn.feature_selection import VarianceThreshold
33
+ from sklearn.feature_extraction.text import TfidfVectorizer
34
+
35
+ from xgboost import XGBRegressor
36
+ from pypdf import PdfReader
37
+ import fitz # PyMuPDF
38
  import gradio as gr
39
 
40
+ USE_DENSE = True
41
+ try:
42
+ from sentence_transformers import SentenceTransformer
43
+ except Exception as e:
44
+ USE_DENSE = False
45
+ print("⚠️ sentence-transformers unavailable; continuing with TF-IDF + BM25 only.\n", e)
46
+
47
+ from rank_bm25 import BM25Okapi
48
+ from openai import OpenAI
49
+
50
+ warnings.filterwarnings("ignore", category=UserWarning)
51
+
52
+ # ============================ Config =======================================
53
+ # --- Data & model paths ---
54
+ DATA_PATH = "july3.xlsx" # <- update if needed
55
+
56
+ # --- Local PDF folder for RAG (no online indexing) ---
57
+ LOCAL_PDF_DIR = Path(r"C:\Users\nmoha13\Downloads\literature_pdfs") # <- your local folder
58
+ LOCAL_PDF_DIR.mkdir(exist_ok=True)
59
+
60
+ # --- RAG artifacts (kept in working dir) ---
61
+ ARTIFACT_DIR = Path("rag_artifacts"); ARTIFACT_DIR.mkdir(exist_ok=True)
62
+ MODEL_OUT = "stress_gf_xgb.joblib"
63
+ TFIDF_VECT_PATH = ARTIFACT_DIR / "tfidf_vectorizer.joblib"
64
+ TFIDF_MAT_PATH = ARTIFACT_DIR / "tfidf_matrix.joblib"
65
+ BM25_TOK_PATH = ARTIFACT_DIR / "bm25_tokens.joblib"
66
+ EMB_NPY_PATH = ARTIFACT_DIR / "chunk_embeddings.npy"
67
+ RAG_META_PATH = ARTIFACT_DIR / "chunks.parquet"
68
+
69
+ # --- Embedding model (fast CPU) ---
70
+ EMB_MODEL_NAME = os.getenv("EMB_MODEL_NAME", "sentence-transformers/all-MiniLM-L6-v2")
71
+
72
+ # --- OpenAI (optional LLM synthesis) ---
73
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") # e.g., "gpt-5-mini"
74
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) # set env var to enable LLM
75
+
76
+ # --- Retrieval weights (UI defaults adapt if dense disabled) ---
77
+ W_TFIDF_DEFAULT = 0.50 if not USE_DENSE else 0.30
78
+ W_BM25_DEFAULT = 0.50 if not USE_DENSE else 0.30
79
+ W_EMB_DEFAULT = 0.00 if not USE_DENSE else 0.40
80
+
81
+ RANDOM_SEED = 42
82
+
83
+ # ==================== XGB Pipeline (Prediction) ============================
84
+ def make_onehot():
85
+ try:
86
+ return OneHotEncoder(handle_unknown="ignore", sparse_output=False)
87
+ except TypeError:
88
+ return OneHotEncoder(handle_unknown="ignore", sparse=False)
89
+
90
+ def rmse(y_true, y_pred):
91
+ return mean_squared_error(y_true, y_pred)
92
+
93
+ def evaluate(m, X, y_log, name="Model"):
94
+ y_pred_log = m.predict(X)
95
+ y_pred = np.expm1(y_pred_log)
96
+ y_true = np.expm1(y_log)
97
+ r2 = r2_score(y_true, y_pred)
98
+ r = rmse(y_true, y_pred)
99
+ mae = mean_absolute_error(y_true, y_pred)
100
+ print(f"{name}: R²={r2:.3f}, RMSE={r:.3f}, MAE={mae:.3f}")
101
+ return r2, r, mae
102
+
103
+ # --- Load data
104
+ df = pd.read_excel(DATA_PATH)
105
+ df.columns = df.columns.str.strip()
106
+
107
+ drop_cols = [
108
+ 'Loading rate (MPa/s)', 'Voltage (V) AC\\DC', 'Elastic Modulus (GPa)', 'Duration (hrs) of Dying Method'
109
  ]
110
+ df = df.drop(columns=[c for c in drop_cols if c in df.columns], errors='ignore')
111
 
112
+ main_variables = [
113
+ 'Filler1_Type', 'Filler1_Diameter_um', 'Filler1_Length_mm',
114
+ 'AvgFiller_Density_g/cm3', 'AvgFiller_weight_%', 'AvgFiller_Volume_%',
115
+ 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Diameter_um', 'Filler2_Length_mm',
116
+ 'Filler2_Dimensions', 'Sample_Volume_mm3', 'Electrode/Probe_Count', 'Electrode/Probe_Material',
117
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Curing_Conditions', 'Num_ConductiveFillers',
118
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
119
+ 'ElasticModulus_Gpa', 'Voltage_Type', 'Applied_Voltage_V'
120
+ ]
121
+ target_col = 'Stress_GF_Mpa'
122
+
123
+ df = df[main_variables + [target_col]].copy()
124
+ df = df.dropna(subset=[target_col])
125
+ df = df[df[target_col] > 0]
126
+
127
+ numeric_cols = [
128
+ 'Filler1_Diameter_um', 'Filler1_Length_mm', 'AvgFiller_Density_g/cm3',
129
+ 'AvgFiller_weight_%', 'AvgFiller_Volume_%', 'Filler2_Diameter_um',
130
+ 'Filler2_Length_mm', 'Sample_Volume_mm3', 'Electrode/Probe_Count',
131
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Num_ConductiveFillers',
132
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
133
+ 'ElasticModulus_Gpa', 'Applied_Voltage_V'
134
+ ]
135
+ categorical_cols = [
136
+ 'Filler1_Type', 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Dimensions',
137
+ 'Electrode/Probe_Material', 'Curing_Conditions', 'Voltage_Type'
138
+ ]
139
+
140
+ for c in numeric_cols:
141
+ df[c] = pd.to_numeric(df[c], errors='coerce')
142
+ for c in categorical_cols:
143
+ df[c] = df[c].astype(str)
144
+
145
+ vt = VarianceThreshold(threshold=1e-3)
146
+ vt.fit(df[numeric_cols])
147
+ numeric_cols = [c for c in numeric_cols if c not in df[numeric_cols].columns[vt.variances_ < 1e-3]]
148
 
149
+ corr = df[numeric_cols].corr().abs()
150
+ upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
151
+ to_drop = [c for c in upper.columns if any(upper[c] > 0.95)]
152
+ numeric_cols = [c for c in numeric_cols if c not in to_drop]
153
+
154
+ X = df[main_variables].copy()
155
+ y = np.log1p(df[target_col])
156
+
157
+ X_train, X_test, y_train, y_test = train_test_split(
158
+ X, y, test_size=0.2, random_state=RANDOM_SEED
159
+ )
160
+
161
+ BEST_PARAMS = {
162
+ "regressor__subsample": 1.0,
163
+ "regressor__reg_lambda": 5,
164
+ "regressor__reg_alpha": 0.05,
165
+ "regressor__n_estimators": 300,
166
+ "regressor__max_depth": 6,
167
+ "regressor__learning_rate": 0.1,
168
+ "regressor__gamma": 0,
169
+ "regressor__colsample_bytree": 1.0
170
  }
171
 
172
+ def train_and_save_model():
173
+ num_tf = Pipeline([('imputer', SimpleImputer(strategy='median')),
174
+ ('scaler', RobustScaler())])
175
+ cat_tf = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')),
176
+ ('onehot', make_onehot())])
177
 
178
+ preprocessor = ColumnTransformer([
179
+ ('num', num_tf, numeric_cols),
180
+ ('cat', cat_tf, categorical_cols)
181
+ ])
182
+
183
+ xgb_pipe = Pipeline([
184
+ ('preprocessor', preprocessor),
185
+ ('regressor', XGBRegressor(random_state=RANDOM_SEED, n_jobs=-1, verbosity=0))
186
+ ])
187
+ xgb_pipe.set_params(**BEST_PARAMS).fit(X_train, y_train)
188
+
189
+ joblib.dump(xgb_pipe, MODEL_OUT)
190
+ print(f"✅ Trained new model and saved → {MODEL_OUT}")
191
+ return xgb_pipe
192
+
193
+ def load_or_train_model():
194
+ if os.path.exists(MODEL_OUT):
195
+ print(f"📂 Loading existing model from {MODEL_OUT}")
196
+ return joblib.load(MODEL_OUT)
197
+ else:
198
+ print("⚠️ No saved model found. Training a new one...")
199
+ return train_and_save_model()
200
+
201
+ xgb_pipe = load_or_train_model()
202
+
203
+ # ======================= Hybrid RAG Indexing ================================
204
+ _SENT_SPLIT_RE = re.compile(r"(?<=[.!?])\s+|\n+")
205
+ TOKEN_RE = re.compile(r"[A-Za-z0-9_#+\-/\.%]+")
206
+
207
+ def sent_split(text: str) -> List[str]:
208
+ sents = [s.strip() for s in _SENT_SPLIT_RE.split(text) if s.strip()]
209
+ return [s for s in sents if len(s.split()) >= 5]
210
+
211
+ def tokenize(text: str) -> List[str]:
212
+ return [t.lower() for t in TOKEN_RE.findall(text)]
213
+
214
+ def extract_text_pymupdf(pdf_path: Path) -> str:
215
+ try:
216
+ doc = fitz.open(pdf_path)
217
+ buff = []
218
+ for i, page in enumerate(doc):
219
+ txt = page.get_text("text") or ""
220
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
221
+ return "\n\n".join(buff)
222
+ except Exception:
223
+ # Fallback to PyPDF
224
+ try:
225
+ reader = PdfReader(str(pdf_path))
226
+ buff = []
227
+ for i, p in enumerate(reader.pages):
228
+ txt = p.extract_text() or ""
229
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
230
+ return "\n\n".join(buff)
231
+ except Exception as e:
232
+ print(f"PDF read error ({pdf_path}): {e}")
233
+ return ""
234
+
235
+ def chunk_by_sentence_windows(text: str, win_size=8, overlap=2) -> List[str]:
236
+ sents = sent_split(text)
237
+ chunks = []
238
+ step = max(1, win_size - overlap)
239
+ for i in range(0, len(sents), step):
240
+ window = sents[i:i+win_size]
241
+ if not window: break
242
+ chunks.append(" ".join(window))
243
+ return chunks
244
+
245
+ def _safe_init_st_model(name: str):
246
+ """Try to init SentenceTransformer; on failure, disable dense and return None."""
247
+ global USE_DENSE
248
+ if not USE_DENSE:
249
+ return None
250
+ try:
251
+ m = SentenceTransformer(name)
252
+ return m
253
+ except Exception as e:
254
+ print("⚠️ Could not initialize SentenceTransformer; disabling dense embeddings.\n", e)
255
+ USE_DENSE = False
256
+ return None
257
+
258
+ def _collect_pdf_paths(pdf_dir: Path) -> List[Path]:
259
+ # Collect PDFs recursively from the local folder
260
+ return list(Path(pdf_dir).glob("**/*.pdf"))
261
+
262
+ def build_or_load_hybrid(pdf_dir: Path):
263
+ # If artifacts exist, load them
264
+ have_cache = (TFIDF_VECT_PATH.exists() and TFIDF_MAT_PATH.exists()
265
+ and BM25_TOK_PATH.exists() and RAG_META_PATH.exists()
266
+ and (EMB_NPY_PATH.exists() or not USE_DENSE))
267
+ if have_cache:
268
+ vectorizer = joblib.load(TFIDF_VECT_PATH)
269
+ X_tfidf = joblib.load(TFIDF_MAT_PATH)
270
+ meta = pd.read_parquet(RAG_META_PATH)
271
+ bm25_toks = joblib.load(BM25_TOK_PATH)
272
+ emb = np.load(EMB_NPY_PATH) if (USE_DENSE and EMB_NPY_PATH.exists()) else None
273
+ print("Loaded hybrid index.")
274
+ return vectorizer, X_tfidf, meta, bm25_toks, emb
275
+
276
+ # Fresh index
277
+ rows, all_tokens = [], []
278
+ pdf_paths = _collect_pdf_paths(pdf_dir)
279
+ print(f"Indexing PDFs from {pdf_dir}. Found {len(pdf_paths)} files.")
280
+ for pdf in pdf_paths:
281
+ raw = extract_text_pymupdf(pdf)
282
+ if not raw.strip():
283
+ continue
284
+ for i, ch in enumerate(chunk_by_sentence_windows(raw, win_size=8, overlap=2)):
285
+ rows.append({"doc_path": str(pdf), "chunk_id": i, "text": ch})
286
+ all_tokens.append(tokenize(ch))
287
+
288
+ if not rows:
289
+ raise RuntimeError(f"No PDF text found under: {pdf_dir}")
290
+
291
+ meta = pd.DataFrame(rows)
292
+
293
+ # TF-IDF
294
+ vectorizer = TfidfVectorizer(
295
+ ngram_range=(1,2),
296
+ min_df=1, max_df=0.95,
297
+ sublinear_tf=True, smooth_idf=True,
298
+ lowercase=True,
299
+ token_pattern=r"(?u)\b\w[\w\-\./%+#]*\b"
300
+ )
301
+ X_tfidf = vectorizer.fit_transform(meta["text"].tolist())
302
+
303
+ # Dense (optional)
304
+ emb = None
305
+ if USE_DENSE:
306
+ try:
307
+ st_model_tmp = _safe_init_st_model(EMB_MODEL_NAME)
308
+ if st_model_tmp is not None:
309
+ em = st_model_tmp.encode(meta["text"].tolist(), batch_size=64, show_progress_bar=False, convert_to_numpy=True)
310
+ emb = sk_normalize(em)
311
+ np.save(EMB_NPY_PATH, emb)
312
+ except Exception as e:
313
+ emb = None
314
+ print("⚠️ Dense embeddings failed; continuing without them.\n", e)
315
 
316
+ # Save artifacts
317
+ joblib.dump(vectorizer, TFIDF_VECT_PATH)
318
+ joblib.dump(X_tfidf, TFIDF_MAT_PATH)
319
+ joblib.dump(all_tokens, BM25_TOK_PATH)
320
+ meta.to_parquet(RAG_META_PATH, index=False)
321
+
322
+ print(f"Indexed {len(meta)} chunks from {meta['doc_path'].nunique()} PDFs.")
323
+ return vectorizer, X_tfidf, meta, all_tokens, emb
324
+
325
+ # ---------- Auto reindex if new/modified PDFs are detected ----------
326
+ from datetime import datetime
327
+
328
+ def auto_reindex_if_needed(pdf_dir: Path):
329
+ """Rebuilds RAG index if new or modified PDFs are detected."""
330
+ meta_path = RAG_META_PATH
331
+ pdfs = _collect_pdf_paths(pdf_dir)
332
+ if not meta_path.exists():
333
+ print("No existing index found — indexing now...")
334
+ # Remove stale artifacts if any partial set exists
335
+ for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH]:
336
+ try:
337
+ if p.exists(): p.unlink()
338
+ except Exception:
339
+ pass
340
+ return # build will happen below
341
+ last_index_time = datetime.fromtimestamp(meta_path.stat().st_mtime)
342
+ recent = [p for p in pdfs if datetime.fromtimestamp(p.stat().st_mtime) > last_index_time]
343
+ if recent:
344
+ print(f"Found {len(recent)} new/updated PDFs — rebuilding index...")
345
+ # Clear artifacts to force rebuild
346
+ for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH, RAG_META_PATH]:
347
  try:
348
+ if p.exists(): p.unlink()
349
+ except Exception:
350
+ pass
351
+
352
+ # Build hybrid index (local only)
353
+ auto_reindex_if_needed(LOCAL_PDF_DIR)
354
+ tfidf_vectorizer, tfidf_matrix, rag_meta, bm25_tokens, emb_matrix = build_or_load_hybrid(LOCAL_PDF_DIR)
355
+ bm25 = BM25Okapi(bm25_tokens)
356
+ st_query_model = _safe_init_st_model(EMB_MODEL_NAME) # safe init; may set USE_DENSE=False
357
+
358
+ # If dense failed at runtime, update default weights in case UI uses them
359
+ if not USE_DENSE:
360
+ W_TFIDF_DEFAULT, W_BM25_DEFAULT, W_EMB_DEFAULT = 0.50, 0.50, 0.00
361
+
362
+ def _extract_page(text_chunk: str) -> str:
363
+ m = list(re.finditer(r"\[\[PAGE=(\d+)\]\]", text_chunk))
364
+ return (m[-1].group(1) if m else "?")
365
+
366
+ # ---------------------- Hybrid search --------------------------------------
367
+ def hybrid_search(query: str, k=8, w_tfidf=W_TFIDF_DEFAULT, w_bm25=W_BM25_DEFAULT, w_emb=W_EMB_DEFAULT):
368
+ # Dense (optional)
369
+ if USE_DENSE and st_query_model is not None and emb_matrix is not None and w_emb > 0:
370
+ try:
371
+ q_emb = st_query_model.encode([query], convert_to_numpy=True)
372
+ q_emb = sk_normalize(q_emb)[0]
373
+ dense_scores = emb_matrix @ q_emb
374
+ except Exception as e:
375
+ print("⚠️ Dense query encoding failed; ignoring dense this run.\n", e)
376
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
377
+ w_emb = 0.0
378
+ else:
379
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
380
+ w_emb = 0.0 # force off
381
+
382
+ # TF-IDF
383
+ q_vec = tfidf_vectorizer.transform([query])
384
+ tfidf_scores = (tfidf_matrix @ q_vec.T).toarray().ravel()
385
+
386
+ # BM25
387
+ q_tokens = [t.lower() for t in TOKEN_RE.findall(query)]
388
+ bm25_scores = np.array(bm25.get_scores(q_tokens), dtype=float)
389
+
390
+ def _norm(x):
391
+ x = np.asarray(x, dtype=float)
392
+ if np.allclose(x.max(), x.min()):
393
+ return np.zeros_like(x)
394
+ return (x - x.min()) / (x.max() - x.min())
395
+
396
+ s_dense = _norm(dense_scores)
397
+ s_tfidf = _norm(tfidf_scores)
398
+ s_bm25 = _norm(bm25_scores)
399
+
400
+ total_w = (w_tfidf + w_bm25 + w_emb) or 1.0
401
+ w_tfidf, w_bm25, w_emb = w_tfidf/total_w, w_bm25/total_w, w_emb/total_w
402
+
403
+ combo = w_emb * s_dense + w_tfidf * s_tfidf + w_bm25 * s_bm25
404
+ idx = np.argsort(-combo)[:k]
405
+ hits = rag_meta.iloc[idx].copy()
406
+ hits["score_dense"] = s_dense[idx]
407
+ hits["score_tfidf"] = s_tfidf[idx]
408
+ hits["score_bm25"] = s_bm25[idx]
409
+ hits["score"] = combo[idx]
410
+ return hits.reset_index(drop=True)
411
+
412
+ # -------------- Sentence selection with MMR (diversity) --------------------
413
+ def split_sentences(text: str) -> List[str]:
414
+ sents = sent_split(text)
415
+ return [s for s in sents if 6 <= len(s.split()) <= 60]
416
+
417
+ def mmr_select_sentences(question: str, hits: pd.DataFrame, top_n=4, pool_per_chunk=6, lambda_div=0.7):
418
+ pool = []
419
+ for _, row in hits.iterrows():
420
+ doc = Path(row["doc_path"]).name
421
+ page = _extract_page(row["text"])
422
+ for s in split_sentences(row["text"])[:pool_per_chunk]:
423
+ pool.append({"sent": s, "doc": doc, "page": page})
424
+ if not pool:
425
+ return []
426
+
427
+ sent_texts = [p["sent"] for p in pool]
428
+
429
+ if USE_DENSE and st_query_model is not None:
430
+ try:
431
+ texts = [question] + sent_texts
432
+ enc = st_query_model.encode(texts, convert_to_numpy=True)
433
+ q_vec = sk_normalize(enc[:1])[0]
434
+ S = sk_normalize(enc[1:])
435
+ rel = (S @ q_vec)
436
+ def sim_fn(i, j): return float(S[i] @ S[j])
437
+ except Exception as e:
438
+ print("⚠️ Dense sentence encoding failed; falling back to TF-IDF for MMR.\n", e)
439
+ Q = tfidf_vectorizer.transform([question])
440
+ S = tfidf_vectorizer.transform(sent_texts)
441
+ rel = (S @ Q.T).toarray().ravel()
442
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
443
+ else:
444
+ Q = tfidf_vectorizer.transform([question])
445
+ S = tfidf_vectorizer.transform(sent_texts)
446
+ rel = (S @ Q.T).toarray().ravel()
447
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
448
+
449
+ selected, selected_idx = [], []
450
+ remain = list(range(len(pool)))
451
+ first = int(np.argmax(rel))
452
+ selected.append(pool[first]); selected_idx.append(first); remain.remove(first)
453
+
454
+ while len(selected) < top_n and remain:
455
+ cand_scores = []
456
+ for i in remain:
457
+ sim_to_sel = max(sim_fn(i, j) for j in selected_idx) if selected_idx else 0.0
458
+ score = lambda_div * rel[i] - (1 - lambda_div) * sim_to_sel
459
+ cand_scores.append((score, i))
460
+ cand_scores.sort(reverse=True)
461
+ best_i = cand_scores[0][1]
462
+ selected.append(pool[best_i]); selected_idx.append(best_i); remain.remove(best_i)
463
+ return selected
464
+
465
+ def compose_extractive(selected: List[Dict]) -> str:
466
+ if not selected:
467
+ return ""
468
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
469
+ return " ".join(lines)
470
+
471
+ # ------------------- Optional GPT-5 synthesis ------------------------------
472
+ # ------------------- Optional GPT-4o/GPT-5 synthesis ------------------------------
473
+ def synthesize_with_llm(question: str, sentence_lines: List[str], model: str = None, temperature: float = 0.2) -> str:
474
+ if OPENAI_API_KEY is None:
475
+ print("Skipping ChatGPT")
476
+ return None # not configured → skip synthesis
477
+
478
+ from openai import OpenAI
479
+ client = OpenAI(api_key=OPENAI_API_KEY)
480
+ if model is None:
481
+ model = OPENAI_MODEL
482
+
483
+ # --- Stronger, clean academic prompt ---
484
+ SYSTEM_PROMPT = (
485
+ "You are a scientific writing assistant specializing in self-sensing cementitious materials.\n"
486
+ "Write a short, fluent, and informative paragraph (3–6 sentences) answering the question using ONLY the provided evidence.\n"
487
+ "Rephrase and synthesize ideas; do not copy sentences verbatim.\n"
488
+ "Include parenthetical citations exactly as given (e.g., '(Paper.pdf, p.4)')."
489
+ )
490
+
491
+ user_prompt = (
492
+ f"Question: {question}\n\n"
493
+ "Evidence:\n" +
494
+ "\n".join(f"- {s}" for s in sentence_lines)
495
  )
496
 
497
+ try:
498
+ print("🔍 Calling GPT synthesis...")
499
+ response = client.chat.completions.create(
500
+ model=model,
501
+ temperature=temperature,
502
+ messages=[
503
+ {"role": "system", "content": SYSTEM_PROMPT},
504
+ {"role": "user", "content": user_prompt},
505
+ ],
506
+ )
507
+
508
+ answer = response.choices[0].message.content.strip()
509
+ return answer
510
+
511
+ except Exception as e:
512
+ print(f"❌ LLM synthesis error: {e}")
513
+ return None
514
+
515
+
516
+ # ------------------------ RAG reply ----------------------------------------
517
+ def rag_reply(
518
+ question: str,
519
+ k: int = 8,
520
+ n_sentences: int = 4,
521
+ include_passages: bool = False,
522
+ use_llm: bool = False,
523
+ model: str = None,
524
+ temperature: float = 0.2,
525
+ strict_quotes_only: bool = False,
526
+ w_tfidf: float = W_TFIDF_DEFAULT,
527
+ w_bm25: float = W_BM25_DEFAULT,
528
+ w_emb: float = W_EMB_DEFAULT
529
+ ) -> str:
530
+ hits = hybrid_search(question, k=k, w_tfidf=w_tfidf, w_bm25=w_bm25, w_emb=w_emb)
531
+ if hits.empty:
532
+ return "No relevant passages found. Add more PDFs in literature_pdfs/ or adjust your query."
533
+
534
+ selected = mmr_select_sentences(question, hits, top_n=int(n_sentences), pool_per_chunk=6, lambda_div=0.7)
535
+ header_cites = "; ".join(
536
+ f"{Path(r['doc_path']).name} (p.{_extract_page(r['text'])})" for _, r in hits.head(6).iterrows()
537
+ )
538
+ # Coverage note (helps debugging thin answers)
539
+ srcs = {Path(r['doc_path']).name for _, r in hits.iterrows()}
540
+ coverage_note = ""
541
+ if len(srcs) < 3:
542
+ coverage_note = f"\n\n> Note: Only {len(srcs)} unique source(s) contributed. Add more PDFs or increase Top-K."
543
+
544
+ if strict_quotes_only:
545
+ if not selected:
546
+ return f"**Quoted Passages:**\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2]) + \
547
+ f"\n\n**Citations:** {header_cites}{coverage_note}"
548
+ msg = "**Quoted Passages:**\n- " + "\n- ".join(f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected)
549
+ msg += f"\n\n**Citations:** {header_cites}{coverage_note}"
550
+ if include_passages:
551
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
552
+ return msg
553
+
554
+ # Extractive baseline
555
+ extractive = compose_extractive(selected)
556
+
557
+ # Optional LLM synthesis
558
+ if use_llm and selected:
559
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
560
+ llm_text = synthesize_with_llm(question, lines, model=model, temperature=temperature)
561
+ if llm_text:
562
+ msg = f"**Answer (GPT-5 synthesis):** {llm_text}\n\n**Citations:** {header_cites}{coverage_note}"
563
+ if include_passages:
564
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
565
+ return msg
566
+
567
+ # Fallback: purely extractive
568
+ if not extractive:
569
+ return f"**Answer:** Here are relevant passages.\n\n**Citations:** {header_cites}{coverage_note}\n\n---\n" + \
570
+ "\n\n".join(hits["text"].tolist()[:2])
571
+
572
+ msg = f"**Answer:** {extractive}\n\n**Citations:** {header_cites}{coverage_note}"
573
+ if include_passages:
574
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
575
+ return msg
576
+
577
+ # =========================== Gradio UI =====================================
578
+ INPUT_COLS = [
579
+ "Filler1_Type", "Filler1_Dimensions", "Filler1_Diameter_um", "Filler1_Length_mm",
580
+ "Filler2_Type", "Filler2_Dimensions", "Filler2_Diameter_um", "Filler2_Length_mm",
581
+ "AvgFiller_Density_g/cm3", "AvgFiller_weight_%", "AvgFiller_Volume_%",
582
+ "Sample_Volume_mm3", "Electrode/Probe_Count", "Electrode/Probe_Material",
583
+ "W/B", "S/B", "GaugeLength_mm", "Curing_Conditions", "Num_ConductiveFillers",
584
+ "DryingTemperature_C", "DryingDuration_hrs", "LoadingRate_MPa/s",
585
+ "ElasticModulus_Gpa", "Voltage_Type", "Applied_Voltage_V"
586
+ ]
587
+ NUMERIC_INPUTS = {
588
+ "Filler1_Diameter_um","Filler1_Length_mm","Filler2_Diameter_um","Filler2_Length_mm",
589
+ "AvgFiller_Density_g/cm3","AvgFiller_weight_%","AvgFiller_Volume_%","Sample_Volume_mm3",
590
+ "Electrode/Probe_Count","W/B","S/B","GaugeLength_mm","Num_ConductiveFillers",
591
+ "DryingTemperature_C","DryingDuration_hrs","LoadingRate_MPa/s","ElasticModulus_Gpa",
592
+ "Applied_Voltage_V"
593
+ }
594
+ CAT_DIM_CHOICES = ["0D","1D","2D","3D","NA"]
595
+
596
+ def _coerce_row(args):
597
+ row = {c: v for c, v in zip(INPUT_COLS, args)}
598
+ clean = {}
599
+ for k, v in row.items():
600
+ if k in NUMERIC_INPUTS:
601
+ if v in ("", None): clean[k] = None
602
  else:
603
+ try: clean[k] = float(v)
604
+ except: clean[k] = None
 
 
605
  else:
606
+ clean[k] = "" if v is None else str(v).strip()
607
+ return pd.DataFrame([clean], columns=INPUT_COLS)
608
 
609
+ def _load_model():
610
+ if not os.path.exists(MODEL_OUT):
611
+ raise FileNotFoundError(f"Model file not found at '{MODEL_OUT}'. Retrain above.")
612
+ return joblib.load(MODEL_OUT)
 
 
 
613
 
614
+ def predict_fn(*args):
615
  try:
616
+ mdl = _load_model()
617
+ X_new = _coerce_row(args)
618
+ y_log = mdl.predict(X_new)
619
+ y = float(np.expm1(y_log)[0])
620
+ if -1e-8 < y < 0: y = 0.0
621
  return y
622
  except Exception as e:
623
+ return f"Error during prediction: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
 
625
+ def rag_chat_fn(message, history, top_k, n_sentences, include_passages,
626
+ use_llm, model_name, temperature, strict_quotes_only,
627
+ w_tfidf, w_bm25, w_emb):
628
+ if not message or not message.strip():
629
+ return "Ask a literature question (e.g., *How does CNT length affect gauge factor?*)"
630
+ try:
631
+ return rag_reply(
632
+ question=message,
633
+ k=int(top_k),
634
+ n_sentences=int(n_sentences),
635
+ include_passages=bool(include_passages),
636
+ use_llm=bool(use_llm),
637
+ model=(model_name or None),
638
+ temperature=float(temperature),
639
+ strict_quotes_only=bool(strict_quotes_only),
640
+ w_tfidf=float(w_tfidf),
641
+ w_bm25=float(w_bm25),
642
+ w_emb=float(w_emb),
643
+ )
644
+ except Exception as e:
645
+ return f"RAG error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646
 
647
+ with gr.Blocks() as demo:
648
+ gr.Markdown("# 🧪 Self-Sensing Concrete Assistant — Hybrid RAG (Accurate Q&A)")
649
  gr.Markdown(
650
+ "- **Prediction**: XGBoost pipeline for **Stress Gauge Factor (MPa)**.\n"
651
+ "- **Literature (Hybrid RAG)**: BM25 + TF-IDF + Dense embeddings with **MMR** sentence selection.\n"
652
+ "- **Strict mode** shows only quoted sentences with citations; **GPT-5** can paraphrase strictly from those quotes.\n"
653
+ "- **Local-only RAG**: drop PDFs into `literature_pdfs/` and the index will auto-refresh on restart."
654
  )
655
 
656
+ with gr.Tabs():
657
+ with gr.Tab("🔮 Predict Gauge Factor (XGB)"):
658
+ with gr.Row():
659
+ with gr.Column():
660
+ inputs = [
661
+ gr.Textbox(label="Filler1_Type", placeholder="e.g., CNT, Graphite, Steel fiber"),
662
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler1_Dimensions", value="NA"),
663
+ gr.Number(label="Filler1_Diameter_um"),
664
+ gr.Number(label="Filler1_Length_mm"),
665
+ gr.Textbox(label="Filler2_Type", placeholder="Optional"),
666
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler2_Dimensions", value="NA"),
667
+ gr.Number(label="Filler2_Diameter_um"),
668
+ gr.Number(label="Filler2_Length_mm"),
669
+ gr.Number(label="AvgFiller_Density_g/cm3"),
670
+ gr.Number(label="AvgFiller_weight_%"),
671
+ gr.Number(label="AvgFiller_Volume_%"),
672
+ gr.Number(label="Sample_Volume_mm3"),
673
+ gr.Number(label="Electrode/Probe_Count"),
674
+ gr.Textbox(label="Electrode/Probe_Material", placeholder="e.g., Copper, Silver paste"),
675
+ gr.Number(label="W/B"),
676
+ gr.Number(label="S/B"),
677
+ gr.Number(label="GaugeLength_mm"),
678
+ gr.Textbox(label="Curing_Conditions", placeholder="e.g., 28d water, 20°C"),
679
+ gr.Number(label="Num_ConductiveFillers"),
680
+ gr.Number(label="DryingTemperature_C"),
681
+ gr.Number(label="DryingDuration_hrs"),
682
+ gr.Number(label="LoadingRate_MPa/s"),
683
+ gr.Number(label="ElasticModulus_Gpa"),
684
+ gr.Textbox(label="Voltage_Type", placeholder="AC / DC"),
685
+ gr.Number(label="Applied_Voltage_V"),
686
+ ]
687
+ with gr.Column():
688
+ out_pred = gr.Number(label="Predicted Stress_GF (MPa)", precision=6)
689
+ gr.Button("Predict", variant="primary").click(predict_fn, inputs, out_pred)
690
+
691
+ with gr.Tab("📚 Ask the Literature (Hybrid RAG + MMR)"):
692
+ with gr.Row():
693
+ top_k = gr.Slider(5, 12, value=8, step=1, label="Top-K chunks")
694
+ n_sentences = gr.Slider(2, 6, value=4, step=1, label="Answer length (sentences)")
695
+ include_passages = gr.Checkbox(value=False, label="Include supporting passages")
696
+ with gr.Accordion("Retriever weights (advanced)", open=False):
697
+ w_tfidf = gr.Slider(0.0, 1.0, value=W_TFIDF_DEFAULT, step=0.05, label="TF-IDF weight")
698
+ w_bm25 = gr.Slider(0.0, 1.0, value=W_BM25_DEFAULT, step=0.05, label="BM25 weight")
699
+ w_emb = gr.Slider(0.0, 1.0, value=W_EMB_DEFAULT, step=0.05, label="Dense weight (set 0 if disabled)")
700
+ with gr.Accordion("LLM & Controls", open=False):
701
+ strict_quotes_only = gr.Checkbox(value=False, label="Strict quotes only (no paraphrasing)")
702
+ use_llm = gr.Checkbox(value=False, label="Use GPT-5 to paraphrase selected sentences")
703
+ model_name = gr.Textbox(value=os.getenv("OPENAI_MODEL", OPENAI_MODEL), label="LLM model", placeholder="e.g., gpt-5 or gpt-5-mini")
704
+ temperature = gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="Temperature")
705
+ gr.ChatInterface(
706
+ fn=rag_chat_fn,
707
+ additional_inputs=[top_k, n_sentences, include_passages, use_llm, model_name, temperature, strict_quotes_only, w_tfidf, w_bm25, w_emb],
708
+ title="Literature Q&A",
709
+ description="Hybrid retrieval with diversity. Answers carry inline (Doc, p.X) citations. Toggle strict/LLM modes."
710
+ )
711
+
712
+ # Note: add share=True to expose publicly (for iframe embedding)
713
+ demo.queue().launch()