Inframat-x commited on
Commit
bdb51cc
·
1 Parent(s): 6b180ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +713 -713
app.py CHANGED
@@ -1,713 +1,713 @@
1
- # ================================================================
2
- # Self-Sensing Concrete Assistant — Hybrid RAG + XGB + (opt) GPT-5
3
- # FIXED for Windows/Conda import issues (transformers/quantizers)
4
- # - Pins compatible versions (transformers 4.44.2, sbert 2.7.0, torch 2.x)
5
- # - Disables TF/Flax backends; safe fallbacks if dense fails
6
- # - Hybrid retrieval (BM25 + TF-IDF + Dense*) + MMR sentence selection
7
- # - Local folder only (RAG reads from ./literature_pdfs); no online indexing
8
- # - Optional GPT-5 synthesis strictly from selected cited sentences
9
- # - Gradio UI with Prediction + Literature Q&A tabs
10
- # ================================================================
11
-
12
- # ---------------------- MUST RUN THESE FLAGS FIRST ----------------------
13
- import os
14
- os.environ["TRANSFORMERS_NO_TF"] = "1" # don't import TensorFlow
15
- os.environ["TRANSFORMERS_NO_FLAX"] = "1" # don't import Flax/JAX
16
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
17
-
18
- # ------------------------------- Imports -----------------------------------
19
- import re, json, time, joblib, warnings, math, hashlib
20
- from pathlib import Path
21
- from typing import List, Dict
22
- import numpy as np
23
- import pandas as pd
24
-
25
- from sklearn.model_selection import train_test_split
26
- from sklearn.impute import SimpleImputer
27
- from sklearn.pipeline import Pipeline
28
- from sklearn.compose import ColumnTransformer
29
- from sklearn.preprocessing import RobustScaler, OneHotEncoder
30
- from sklearn.preprocessing import normalize as sk_normalize
31
- from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
32
- from sklearn.feature_selection import VarianceThreshold
33
- from sklearn.feature_extraction.text import TfidfVectorizer
34
-
35
- from xgboost import XGBRegressor
36
- from pypdf import PdfReader
37
- import fitz # PyMuPDF
38
- import gradio as gr
39
-
40
- USE_DENSE = True
41
- try:
42
- from sentence_transformers import SentenceTransformer
43
- except Exception as e:
44
- USE_DENSE = False
45
- print("⚠️ sentence-transformers unavailable; continuing with TF-IDF + BM25 only.\n", e)
46
-
47
- from rank_bm25 import BM25Okapi
48
- from openai import OpenAI
49
-
50
- warnings.filterwarnings("ignore", category=UserWarning)
51
-
52
- # ============================ Config =======================================
53
- # --- Data & model paths ---
54
- DATA_PATH = "july3.xlsx" # <- update if needed
55
-
56
- # --- Local PDF folder for RAG (no online indexing) ---
57
- LOCAL_PDF_DIR = Path(r"C:\Users\nmoha13\Downloads\literature_pdfs") # <- your local folder
58
- LOCAL_PDF_DIR.mkdir(exist_ok=True)
59
-
60
- # --- RAG artifacts (kept in working dir) ---
61
- ARTIFACT_DIR = Path("rag_artifacts"); ARTIFACT_DIR.mkdir(exist_ok=True)
62
- MODEL_OUT = "stress_gf_xgb.joblib"
63
- TFIDF_VECT_PATH = ARTIFACT_DIR / "tfidf_vectorizer.joblib"
64
- TFIDF_MAT_PATH = ARTIFACT_DIR / "tfidf_matrix.joblib"
65
- BM25_TOK_PATH = ARTIFACT_DIR / "bm25_tokens.joblib"
66
- EMB_NPY_PATH = ARTIFACT_DIR / "chunk_embeddings.npy"
67
- RAG_META_PATH = ARTIFACT_DIR / "chunks.parquet"
68
-
69
- # --- Embedding model (fast CPU) ---
70
- EMB_MODEL_NAME = os.getenv("EMB_MODEL_NAME", "sentence-transformers/all-MiniLM-L6-v2")
71
-
72
- # --- OpenAI (optional LLM synthesis) ---
73
- OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") # e.g., "gpt-5-mini"
74
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) # set env var to enable LLM
75
-
76
- # --- Retrieval weights (UI defaults adapt if dense disabled) ---
77
- W_TFIDF_DEFAULT = 0.50 if not USE_DENSE else 0.30
78
- W_BM25_DEFAULT = 0.50 if not USE_DENSE else 0.30
79
- W_EMB_DEFAULT = 0.00 if not USE_DENSE else 0.40
80
-
81
- RANDOM_SEED = 42
82
-
83
- # ==================== XGB Pipeline (Prediction) ============================
84
- def make_onehot():
85
- try:
86
- return OneHotEncoder(handle_unknown="ignore", sparse_output=False)
87
- except TypeError:
88
- return OneHotEncoder(handle_unknown="ignore", sparse=False)
89
-
90
- def rmse(y_true, y_pred):
91
- return mean_squared_error(y_true, y_pred)
92
-
93
- def evaluate(m, X, y_log, name="Model"):
94
- y_pred_log = m.predict(X)
95
- y_pred = np.expm1(y_pred_log)
96
- y_true = np.expm1(y_log)
97
- r2 = r2_score(y_true, y_pred)
98
- r = rmse(y_true, y_pred)
99
- mae = mean_absolute_error(y_true, y_pred)
100
- print(f"{name}: R²={r2:.3f}, RMSE={r:.3f}, MAE={mae:.3f}")
101
- return r2, r, mae
102
-
103
- # --- Load data
104
- df = pd.read_excel(DATA_PATH)
105
- df.columns = df.columns.str.strip()
106
-
107
- drop_cols = [
108
- 'Loading rate (MPa/s)', 'Voltage (V) AC\\DC', 'Elastic Modulus (GPa)', 'Duration (hrs) of Dying Method'
109
- ]
110
- df = df.drop(columns=[c for c in drop_cols if c in df.columns], errors='ignore')
111
-
112
- main_variables = [
113
- 'Filler1_Type', 'Filler1_Diameter_um', 'Filler1_Length_mm',
114
- 'AvgFiller_Density_g/cm3', 'AvgFiller_weight_%', 'AvgFiller_Volume_%',
115
- 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Diameter_um', 'Filler2_Length_mm',
116
- 'Filler2_Dimensions', 'Sample_Volume_mm3', 'Electrode/Probe_Count', 'Electrode/Probe_Material',
117
- 'W/B', 'S/B', 'GaugeLength_mm', 'Curing_Conditions', 'Num_ConductiveFillers',
118
- 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
119
- 'ElasticModulus_Gpa', 'Voltage_Type', 'Applied_Voltage_V'
120
- ]
121
- target_col = 'Stress_GF_Mpa'
122
-
123
- df = df[main_variables + [target_col]].copy()
124
- df = df.dropna(subset=[target_col])
125
- df = df[df[target_col] > 0]
126
-
127
- numeric_cols = [
128
- 'Filler1_Diameter_um', 'Filler1_Length_mm', 'AvgFiller_Density_g/cm3',
129
- 'AvgFiller_weight_%', 'AvgFiller_Volume_%', 'Filler2_Diameter_um',
130
- 'Filler2_Length_mm', 'Sample_Volume_mm3', 'Electrode/Probe_Count',
131
- 'W/B', 'S/B', 'GaugeLength_mm', 'Num_ConductiveFillers',
132
- 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
133
- 'ElasticModulus_Gpa', 'Applied_Voltage_V'
134
- ]
135
- categorical_cols = [
136
- 'Filler1_Type', 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Dimensions',
137
- 'Electrode/Probe_Material', 'Curing_Conditions', 'Voltage_Type'
138
- ]
139
-
140
- for c in numeric_cols:
141
- df[c] = pd.to_numeric(df[c], errors='coerce')
142
- for c in categorical_cols:
143
- df[c] = df[c].astype(str)
144
-
145
- vt = VarianceThreshold(threshold=1e-3)
146
- vt.fit(df[numeric_cols])
147
- numeric_cols = [c for c in numeric_cols if c not in df[numeric_cols].columns[vt.variances_ < 1e-3]]
148
-
149
- corr = df[numeric_cols].corr().abs()
150
- upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
151
- to_drop = [c for c in upper.columns if any(upper[c] > 0.95)]
152
- numeric_cols = [c for c in numeric_cols if c not in to_drop]
153
-
154
- X = df[main_variables].copy()
155
- y = np.log1p(df[target_col])
156
-
157
- X_train, X_test, y_train, y_test = train_test_split(
158
- X, y, test_size=0.2, random_state=RANDOM_SEED
159
- )
160
-
161
- BEST_PARAMS = {
162
- "regressor__subsample": 1.0,
163
- "regressor__reg_lambda": 5,
164
- "regressor__reg_alpha": 0.05,
165
- "regressor__n_estimators": 300,
166
- "regressor__max_depth": 6,
167
- "regressor__learning_rate": 0.1,
168
- "regressor__gamma": 0,
169
- "regressor__colsample_bytree": 1.0
170
- }
171
-
172
- def train_and_save_model():
173
- num_tf = Pipeline([('imputer', SimpleImputer(strategy='median')),
174
- ('scaler', RobustScaler())])
175
- cat_tf = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')),
176
- ('onehot', make_onehot())])
177
-
178
- preprocessor = ColumnTransformer([
179
- ('num', num_tf, numeric_cols),
180
- ('cat', cat_tf, categorical_cols)
181
- ])
182
-
183
- xgb_pipe = Pipeline([
184
- ('preprocessor', preprocessor),
185
- ('regressor', XGBRegressor(random_state=RANDOM_SEED, n_jobs=-1, verbosity=0))
186
- ])
187
- xgb_pipe.set_params(**BEST_PARAMS).fit(X_train, y_train)
188
-
189
- joblib.dump(xgb_pipe, MODEL_OUT)
190
- print(f"✅ Trained new model and saved → {MODEL_OUT}")
191
- return xgb_pipe
192
-
193
- def load_or_train_model():
194
- if os.path.exists(MODEL_OUT):
195
- print(f"📂 Loading existing model from {MODEL_OUT}")
196
- return joblib.load(MODEL_OUT)
197
- else:
198
- print("⚠️ No saved model found. Training a new one...")
199
- return train_and_save_model()
200
-
201
- xgb_pipe = load_or_train_model()
202
-
203
- # ======================= Hybrid RAG Indexing ================================
204
- _SENT_SPLIT_RE = re.compile(r"(?<=[.!?])\s+|\n+")
205
- TOKEN_RE = re.compile(r"[A-Za-z0-9_#+\-/\.%]+")
206
-
207
- def sent_split(text: str) -> List[str]:
208
- sents = [s.strip() for s in _SENT_SPLIT_RE.split(text) if s.strip()]
209
- return [s for s in sents if len(s.split()) >= 5]
210
-
211
- def tokenize(text: str) -> List[str]:
212
- return [t.lower() for t in TOKEN_RE.findall(text)]
213
-
214
- def extract_text_pymupdf(pdf_path: Path) -> str:
215
- try:
216
- doc = fitz.open(pdf_path)
217
- buff = []
218
- for i, page in enumerate(doc):
219
- txt = page.get_text("text") or ""
220
- buff.append(f"[[PAGE={i+1}]]\n{txt}")
221
- return "\n\n".join(buff)
222
- except Exception:
223
- # Fallback to PyPDF
224
- try:
225
- reader = PdfReader(str(pdf_path))
226
- buff = []
227
- for i, p in enumerate(reader.pages):
228
- txt = p.extract_text() or ""
229
- buff.append(f"[[PAGE={i+1}]]\n{txt}")
230
- return "\n\n".join(buff)
231
- except Exception as e:
232
- print(f"PDF read error ({pdf_path}): {e}")
233
- return ""
234
-
235
- def chunk_by_sentence_windows(text: str, win_size=8, overlap=2) -> List[str]:
236
- sents = sent_split(text)
237
- chunks = []
238
- step = max(1, win_size - overlap)
239
- for i in range(0, len(sents), step):
240
- window = sents[i:i+win_size]
241
- if not window: break
242
- chunks.append(" ".join(window))
243
- return chunks
244
-
245
- def _safe_init_st_model(name: str):
246
- """Try to init SentenceTransformer; on failure, disable dense and return None."""
247
- global USE_DENSE
248
- if not USE_DENSE:
249
- return None
250
- try:
251
- m = SentenceTransformer(name)
252
- return m
253
- except Exception as e:
254
- print("⚠️ Could not initialize SentenceTransformer; disabling dense embeddings.\n", e)
255
- USE_DENSE = False
256
- return None
257
-
258
- def _collect_pdf_paths(pdf_dir: Path) -> List[Path]:
259
- # Collect PDFs recursively from the local folder
260
- return list(Path(pdf_dir).glob("**/*.pdf"))
261
-
262
- def build_or_load_hybrid(pdf_dir: Path):
263
- # If artifacts exist, load them
264
- have_cache = (TFIDF_VECT_PATH.exists() and TFIDF_MAT_PATH.exists()
265
- and BM25_TOK_PATH.exists() and RAG_META_PATH.exists()
266
- and (EMB_NPY_PATH.exists() or not USE_DENSE))
267
- if have_cache:
268
- vectorizer = joblib.load(TFIDF_VECT_PATH)
269
- X_tfidf = joblib.load(TFIDF_MAT_PATH)
270
- meta = pd.read_parquet(RAG_META_PATH)
271
- bm25_toks = joblib.load(BM25_TOK_PATH)
272
- emb = np.load(EMB_NPY_PATH) if (USE_DENSE and EMB_NPY_PATH.exists()) else None
273
- print("Loaded hybrid index.")
274
- return vectorizer, X_tfidf, meta, bm25_toks, emb
275
-
276
- # Fresh index
277
- rows, all_tokens = [], []
278
- pdf_paths = _collect_pdf_paths(pdf_dir)
279
- print(f"Indexing PDFs from {pdf_dir}. Found {len(pdf_paths)} files.")
280
- for pdf in pdf_paths:
281
- raw = extract_text_pymupdf(pdf)
282
- if not raw.strip():
283
- continue
284
- for i, ch in enumerate(chunk_by_sentence_windows(raw, win_size=8, overlap=2)):
285
- rows.append({"doc_path": str(pdf), "chunk_id": i, "text": ch})
286
- all_tokens.append(tokenize(ch))
287
-
288
- if not rows:
289
- raise RuntimeError(f"No PDF text found under: {pdf_dir}")
290
-
291
- meta = pd.DataFrame(rows)
292
-
293
- # TF-IDF
294
- vectorizer = TfidfVectorizer(
295
- ngram_range=(1,2),
296
- min_df=1, max_df=0.95,
297
- sublinear_tf=True, smooth_idf=True,
298
- lowercase=True,
299
- token_pattern=r"(?u)\b\w[\w\-\./%+#]*\b"
300
- )
301
- X_tfidf = vectorizer.fit_transform(meta["text"].tolist())
302
-
303
- # Dense (optional)
304
- emb = None
305
- if USE_DENSE:
306
- try:
307
- st_model_tmp = _safe_init_st_model(EMB_MODEL_NAME)
308
- if st_model_tmp is not None:
309
- em = st_model_tmp.encode(meta["text"].tolist(), batch_size=64, show_progress_bar=False, convert_to_numpy=True)
310
- emb = sk_normalize(em)
311
- np.save(EMB_NPY_PATH, emb)
312
- except Exception as e:
313
- emb = None
314
- print("⚠️ Dense embeddings failed; continuing without them.\n", e)
315
-
316
- # Save artifacts
317
- joblib.dump(vectorizer, TFIDF_VECT_PATH)
318
- joblib.dump(X_tfidf, TFIDF_MAT_PATH)
319
- joblib.dump(all_tokens, BM25_TOK_PATH)
320
- meta.to_parquet(RAG_META_PATH, index=False)
321
-
322
- print(f"Indexed {len(meta)} chunks from {meta['doc_path'].nunique()} PDFs.")
323
- return vectorizer, X_tfidf, meta, all_tokens, emb
324
-
325
- # ---------- Auto reindex if new/modified PDFs are detected ----------
326
- from datetime import datetime
327
-
328
- def auto_reindex_if_needed(pdf_dir: Path):
329
- """Rebuilds RAG index if new or modified PDFs are detected."""
330
- meta_path = RAG_META_PATH
331
- pdfs = _collect_pdf_paths(pdf_dir)
332
- if not meta_path.exists():
333
- print("No existing index found — indexing now...")
334
- # Remove stale artifacts if any partial set exists
335
- for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH]:
336
- try:
337
- if p.exists(): p.unlink()
338
- except Exception:
339
- pass
340
- return # build will happen below
341
- last_index_time = datetime.fromtimestamp(meta_path.stat().st_mtime)
342
- recent = [p for p in pdfs if datetime.fromtimestamp(p.stat().st_mtime) > last_index_time]
343
- if recent:
344
- print(f"Found {len(recent)} new/updated PDFs — rebuilding index...")
345
- # Clear artifacts to force rebuild
346
- for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH, RAG_META_PATH]:
347
- try:
348
- if p.exists(): p.unlink()
349
- except Exception:
350
- pass
351
-
352
- # Build hybrid index (local only)
353
- auto_reindex_if_needed(LOCAL_PDF_DIR)
354
- tfidf_vectorizer, tfidf_matrix, rag_meta, bm25_tokens, emb_matrix = build_or_load_hybrid(LOCAL_PDF_DIR)
355
- bm25 = BM25Okapi(bm25_tokens)
356
- st_query_model = _safe_init_st_model(EMB_MODEL_NAME) # safe init; may set USE_DENSE=False
357
-
358
- # If dense failed at runtime, update default weights in case UI uses them
359
- if not USE_DENSE:
360
- W_TFIDF_DEFAULT, W_BM25_DEFAULT, W_EMB_DEFAULT = 0.50, 0.50, 0.00
361
-
362
- def _extract_page(text_chunk: str) -> str:
363
- m = list(re.finditer(r"\[\[PAGE=(\d+)\]\]", text_chunk))
364
- return (m[-1].group(1) if m else "?")
365
-
366
- # ---------------------- Hybrid search --------------------------------------
367
- def hybrid_search(query: str, k=8, w_tfidf=W_TFIDF_DEFAULT, w_bm25=W_BM25_DEFAULT, w_emb=W_EMB_DEFAULT):
368
- # Dense (optional)
369
- if USE_DENSE and st_query_model is not None and emb_matrix is not None and w_emb > 0:
370
- try:
371
- q_emb = st_query_model.encode([query], convert_to_numpy=True)
372
- q_emb = sk_normalize(q_emb)[0]
373
- dense_scores = emb_matrix @ q_emb
374
- except Exception as e:
375
- print("⚠️ Dense query encoding failed; ignoring dense this run.\n", e)
376
- dense_scores = np.zeros(len(rag_meta), dtype=float)
377
- w_emb = 0.0
378
- else:
379
- dense_scores = np.zeros(len(rag_meta), dtype=float)
380
- w_emb = 0.0 # force off
381
-
382
- # TF-IDF
383
- q_vec = tfidf_vectorizer.transform([query])
384
- tfidf_scores = (tfidf_matrix @ q_vec.T).toarray().ravel()
385
-
386
- # BM25
387
- q_tokens = [t.lower() for t in TOKEN_RE.findall(query)]
388
- bm25_scores = np.array(bm25.get_scores(q_tokens), dtype=float)
389
-
390
- def _norm(x):
391
- x = np.asarray(x, dtype=float)
392
- if np.allclose(x.max(), x.min()):
393
- return np.zeros_like(x)
394
- return (x - x.min()) / (x.max() - x.min())
395
-
396
- s_dense = _norm(dense_scores)
397
- s_tfidf = _norm(tfidf_scores)
398
- s_bm25 = _norm(bm25_scores)
399
-
400
- total_w = (w_tfidf + w_bm25 + w_emb) or 1.0
401
- w_tfidf, w_bm25, w_emb = w_tfidf/total_w, w_bm25/total_w, w_emb/total_w
402
-
403
- combo = w_emb * s_dense + w_tfidf * s_tfidf + w_bm25 * s_bm25
404
- idx = np.argsort(-combo)[:k]
405
- hits = rag_meta.iloc[idx].copy()
406
- hits["score_dense"] = s_dense[idx]
407
- hits["score_tfidf"] = s_tfidf[idx]
408
- hits["score_bm25"] = s_bm25[idx]
409
- hits["score"] = combo[idx]
410
- return hits.reset_index(drop=True)
411
-
412
- # -------------- Sentence selection with MMR (diversity) --------------------
413
- def split_sentences(text: str) -> List[str]:
414
- sents = sent_split(text)
415
- return [s for s in sents if 6 <= len(s.split()) <= 60]
416
-
417
- def mmr_select_sentences(question: str, hits: pd.DataFrame, top_n=4, pool_per_chunk=6, lambda_div=0.7):
418
- pool = []
419
- for _, row in hits.iterrows():
420
- doc = Path(row["doc_path"]).name
421
- page = _extract_page(row["text"])
422
- for s in split_sentences(row["text"])[:pool_per_chunk]:
423
- pool.append({"sent": s, "doc": doc, "page": page})
424
- if not pool:
425
- return []
426
-
427
- sent_texts = [p["sent"] for p in pool]
428
-
429
- if USE_DENSE and st_query_model is not None:
430
- try:
431
- texts = [question] + sent_texts
432
- enc = st_query_model.encode(texts, convert_to_numpy=True)
433
- q_vec = sk_normalize(enc[:1])[0]
434
- S = sk_normalize(enc[1:])
435
- rel = (S @ q_vec)
436
- def sim_fn(i, j): return float(S[i] @ S[j])
437
- except Exception as e:
438
- print("⚠️ Dense sentence encoding failed; falling back to TF-IDF for MMR.\n", e)
439
- Q = tfidf_vectorizer.transform([question])
440
- S = tfidf_vectorizer.transform(sent_texts)
441
- rel = (S @ Q.T).toarray().ravel()
442
- def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
443
- else:
444
- Q = tfidf_vectorizer.transform([question])
445
- S = tfidf_vectorizer.transform(sent_texts)
446
- rel = (S @ Q.T).toarray().ravel()
447
- def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
448
-
449
- selected, selected_idx = [], []
450
- remain = list(range(len(pool)))
451
- first = int(np.argmax(rel))
452
- selected.append(pool[first]); selected_idx.append(first); remain.remove(first)
453
-
454
- while len(selected) < top_n and remain:
455
- cand_scores = []
456
- for i in remain:
457
- sim_to_sel = max(sim_fn(i, j) for j in selected_idx) if selected_idx else 0.0
458
- score = lambda_div * rel[i] - (1 - lambda_div) * sim_to_sel
459
- cand_scores.append((score, i))
460
- cand_scores.sort(reverse=True)
461
- best_i = cand_scores[0][1]
462
- selected.append(pool[best_i]); selected_idx.append(best_i); remain.remove(best_i)
463
- return selected
464
-
465
- def compose_extractive(selected: List[Dict]) -> str:
466
- if not selected:
467
- return ""
468
- lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
469
- return " ".join(lines)
470
-
471
- # ------------------- Optional GPT-5 synthesis ------------------------------
472
- # ------------------- Optional GPT-4o/GPT-5 synthesis ------------------------------
473
- def synthesize_with_llm(question: str, sentence_lines: List[str], model: str = None, temperature: float = 0.2) -> str:
474
- if OPENAI_API_KEY is None:
475
- print("Skipping ChatGPT")
476
- return None # not configured → skip synthesis
477
-
478
- from openai import OpenAI
479
- client = OpenAI(api_key=OPENAI_API_KEY)
480
- if model is None:
481
- model = OPENAI_MODEL
482
-
483
- # --- Stronger, clean academic prompt ---
484
- SYSTEM_PROMPT = (
485
- "You are a scientific writing assistant specializing in self-sensing cementitious materials.\n"
486
- "Write a short, fluent, and informative paragraph (3–6 sentences) answering the question using ONLY the provided evidence.\n"
487
- "Rephrase and synthesize ideas; do not copy sentences verbatim.\n"
488
- "Include parenthetical citations exactly as given (e.g., '(Paper.pdf, p.4)')."
489
- )
490
-
491
- user_prompt = (
492
- f"Question: {question}\n\n"
493
- "Evidence:\n" +
494
- "\n".join(f"- {s}" for s in sentence_lines)
495
- )
496
-
497
- try:
498
- print("🔍 Calling GPT synthesis...")
499
- response = client.chat.completions.create(
500
- model=model,
501
- temperature=temperature,
502
- messages=[
503
- {"role": "system", "content": SYSTEM_PROMPT},
504
- {"role": "user", "content": user_prompt},
505
- ],
506
- )
507
-
508
- answer = response.choices[0].message.content.strip()
509
- return answer
510
-
511
- except Exception as e:
512
- print(f"❌ LLM synthesis error: {e}")
513
- return None
514
-
515
-
516
- # ------------------------ RAG reply ----------------------------------------
517
- def rag_reply(
518
- question: str,
519
- k: int = 8,
520
- n_sentences: int = 4,
521
- include_passages: bool = False,
522
- use_llm: bool = False,
523
- model: str = None,
524
- temperature: float = 0.2,
525
- strict_quotes_only: bool = False,
526
- w_tfidf: float = W_TFIDF_DEFAULT,
527
- w_bm25: float = W_BM25_DEFAULT,
528
- w_emb: float = W_EMB_DEFAULT
529
- ) -> str:
530
- hits = hybrid_search(question, k=k, w_tfidf=w_tfidf, w_bm25=w_bm25, w_emb=w_emb)
531
- if hits.empty:
532
- return "No relevant passages found. Add more PDFs in literature_pdfs/ or adjust your query."
533
-
534
- selected = mmr_select_sentences(question, hits, top_n=int(n_sentences), pool_per_chunk=6, lambda_div=0.7)
535
- header_cites = "; ".join(
536
- f"{Path(r['doc_path']).name} (p.{_extract_page(r['text'])})" for _, r in hits.head(6).iterrows()
537
- )
538
- # Coverage note (helps debugging thin answers)
539
- srcs = {Path(r['doc_path']).name for _, r in hits.iterrows()}
540
- coverage_note = ""
541
- if len(srcs) < 3:
542
- coverage_note = f"\n\n> Note: Only {len(srcs)} unique source(s) contributed. Add more PDFs or increase Top-K."
543
-
544
- if strict_quotes_only:
545
- if not selected:
546
- return f"**Quoted Passages:**\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2]) + \
547
- f"\n\n**Citations:** {header_cites}{coverage_note}"
548
- msg = "**Quoted Passages:**\n- " + "\n- ".join(f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected)
549
- msg += f"\n\n**Citations:** {header_cites}{coverage_note}"
550
- if include_passages:
551
- msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
552
- return msg
553
-
554
- # Extractive baseline
555
- extractive = compose_extractive(selected)
556
-
557
- # Optional LLM synthesis
558
- if use_llm and selected:
559
- lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
560
- llm_text = synthesize_with_llm(question, lines, model=model, temperature=temperature)
561
- if llm_text:
562
- msg = f"**Answer (GPT-5 synthesis):** {llm_text}\n\n**Citations:** {header_cites}{coverage_note}"
563
- if include_passages:
564
- msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
565
- return msg
566
-
567
- # Fallback: purely extractive
568
- if not extractive:
569
- return f"**Answer:** Here are relevant passages.\n\n**Citations:** {header_cites}{coverage_note}\n\n---\n" + \
570
- "\n\n".join(hits["text"].tolist()[:2])
571
-
572
- msg = f"**Answer:** {extractive}\n\n**Citations:** {header_cites}{coverage_note}"
573
- if include_passages:
574
- msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
575
- return msg
576
-
577
- # =========================== Gradio UI =====================================
578
- INPUT_COLS = [
579
- "Filler1_Type", "Filler1_Dimensions", "Filler1_Diameter_um", "Filler1_Length_mm",
580
- "Filler2_Type", "Filler2_Dimensions", "Filler2_Diameter_um", "Filler2_Length_mm",
581
- "AvgFiller_Density_g/cm3", "AvgFiller_weight_%", "AvgFiller_Volume_%",
582
- "Sample_Volume_mm3", "Electrode/Probe_Count", "Electrode/Probe_Material",
583
- "W/B", "S/B", "GaugeLength_mm", "Curing_Conditions", "Num_ConductiveFillers",
584
- "DryingTemperature_C", "DryingDuration_hrs", "LoadingRate_MPa/s",
585
- "ElasticModulus_Gpa", "Voltage_Type", "Applied_Voltage_V"
586
- ]
587
- NUMERIC_INPUTS = {
588
- "Filler1_Diameter_um","Filler1_Length_mm","Filler2_Diameter_um","Filler2_Length_mm",
589
- "AvgFiller_Density_g/cm3","AvgFiller_weight_%","AvgFiller_Volume_%","Sample_Volume_mm3",
590
- "Electrode/Probe_Count","W/B","S/B","GaugeLength_mm","Num_ConductiveFillers",
591
- "DryingTemperature_C","DryingDuration_hrs","LoadingRate_MPa/s","ElasticModulus_Gpa",
592
- "Applied_Voltage_V"
593
- }
594
- CAT_DIM_CHOICES = ["0D","1D","2D","3D","NA"]
595
-
596
- def _coerce_row(args):
597
- row = {c: v for c, v in zip(INPUT_COLS, args)}
598
- clean = {}
599
- for k, v in row.items():
600
- if k in NUMERIC_INPUTS:
601
- if v in ("", None): clean[k] = None
602
- else:
603
- try: clean[k] = float(v)
604
- except: clean[k] = None
605
- else:
606
- clean[k] = "" if v is None else str(v).strip()
607
- return pd.DataFrame([clean], columns=INPUT_COLS)
608
-
609
- def _load_model():
610
- if not os.path.exists(MODEL_OUT):
611
- raise FileNotFoundError(f"Model file not found at '{MODEL_OUT}'. Retrain above.")
612
- return joblib.load(MODEL_OUT)
613
-
614
- def predict_fn(*args):
615
- try:
616
- mdl = _load_model()
617
- X_new = _coerce_row(args)
618
- y_log = mdl.predict(X_new)
619
- y = float(np.expm1(y_log)[0])
620
- if -1e-8 < y < 0: y = 0.0
621
- return y
622
- except Exception as e:
623
- return f"Error during prediction: {e}"
624
-
625
- def rag_chat_fn(message, history, top_k, n_sentences, include_passages,
626
- use_llm, model_name, temperature, strict_quotes_only,
627
- w_tfidf, w_bm25, w_emb):
628
- if not message or not message.strip():
629
- return "Ask a literature question (e.g., *How does CNT length affect gauge factor?*)"
630
- try:
631
- return rag_reply(
632
- question=message,
633
- k=int(top_k),
634
- n_sentences=int(n_sentences),
635
- include_passages=bool(include_passages),
636
- use_llm=bool(use_llm),
637
- model=(model_name or None),
638
- temperature=float(temperature),
639
- strict_quotes_only=bool(strict_quotes_only),
640
- w_tfidf=float(w_tfidf),
641
- w_bm25=float(w_bm25),
642
- w_emb=float(w_emb),
643
- )
644
- except Exception as e:
645
- return f"RAG error: {e}"
646
-
647
- with gr.Blocks() as demo:
648
- gr.Markdown("# 🧪 Self-Sensing Concrete Assistant — Hybrid RAG (Accurate Q&A)")
649
- gr.Markdown(
650
- "- **Prediction**: XGBoost pipeline for **Stress Gauge Factor (MPa)**.\n"
651
- "- **Literature (Hybrid RAG)**: BM25 + TF-IDF + Dense embeddings with **MMR** sentence selection.\n"
652
- "- **Strict mode** shows only quoted sentences with citations; **GPT-5** can paraphrase strictly from those quotes.\n"
653
- "- **Local-only RAG**: drop PDFs into `literature_pdfs/` and the index will auto-refresh on restart."
654
- )
655
-
656
- with gr.Tabs():
657
- with gr.Tab("🔮 Predict Gauge Factor (XGB)"):
658
- with gr.Row():
659
- with gr.Column():
660
- inputs = [
661
- gr.Textbox(label="Filler1_Type", placeholder="e.g., CNT, Graphite, Steel fiber"),
662
- gr.Dropdown(CAT_DIM_CHOICES, label="Filler1_Dimensions", value="NA"),
663
- gr.Number(label="Filler1_Diameter_um"),
664
- gr.Number(label="Filler1_Length_mm"),
665
- gr.Textbox(label="Filler2_Type", placeholder="Optional"),
666
- gr.Dropdown(CAT_DIM_CHOICES, label="Filler2_Dimensions", value="NA"),
667
- gr.Number(label="Filler2_Diameter_um"),
668
- gr.Number(label="Filler2_Length_mm"),
669
- gr.Number(label="AvgFiller_Density_g/cm3"),
670
- gr.Number(label="AvgFiller_weight_%"),
671
- gr.Number(label="AvgFiller_Volume_%"),
672
- gr.Number(label="Sample_Volume_mm3"),
673
- gr.Number(label="Electrode/Probe_Count"),
674
- gr.Textbox(label="Electrode/Probe_Material", placeholder="e.g., Copper, Silver paste"),
675
- gr.Number(label="W/B"),
676
- gr.Number(label="S/B"),
677
- gr.Number(label="GaugeLength_mm"),
678
- gr.Textbox(label="Curing_Conditions", placeholder="e.g., 28d water, 20°C"),
679
- gr.Number(label="Num_ConductiveFillers"),
680
- gr.Number(label="DryingTemperature_C"),
681
- gr.Number(label="DryingDuration_hrs"),
682
- gr.Number(label="LoadingRate_MPa/s"),
683
- gr.Number(label="ElasticModulus_Gpa"),
684
- gr.Textbox(label="Voltage_Type", placeholder="AC / DC"),
685
- gr.Number(label="Applied_Voltage_V"),
686
- ]
687
- with gr.Column():
688
- out_pred = gr.Number(label="Predicted Stress_GF (MPa)", precision=6)
689
- gr.Button("Predict", variant="primary").click(predict_fn, inputs, out_pred)
690
-
691
- with gr.Tab("📚 Ask the Literature (Hybrid RAG + MMR)"):
692
- with gr.Row():
693
- top_k = gr.Slider(5, 12, value=8, step=1, label="Top-K chunks")
694
- n_sentences = gr.Slider(2, 6, value=4, step=1, label="Answer length (sentences)")
695
- include_passages = gr.Checkbox(value=False, label="Include supporting passages")
696
- with gr.Accordion("Retriever weights (advanced)", open=False):
697
- w_tfidf = gr.Slider(0.0, 1.0, value=W_TFIDF_DEFAULT, step=0.05, label="TF-IDF weight")
698
- w_bm25 = gr.Slider(0.0, 1.0, value=W_BM25_DEFAULT, step=0.05, label="BM25 weight")
699
- w_emb = gr.Slider(0.0, 1.0, value=W_EMB_DEFAULT, step=0.05, label="Dense weight (set 0 if disabled)")
700
- with gr.Accordion("LLM & Controls", open=False):
701
- strict_quotes_only = gr.Checkbox(value=False, label="Strict quotes only (no paraphrasing)")
702
- use_llm = gr.Checkbox(value=False, label="Use GPT-5 to paraphrase selected sentences")
703
- model_name = gr.Textbox(value=os.getenv("OPENAI_MODEL", OPENAI_MODEL), label="LLM model", placeholder="e.g., gpt-5 or gpt-5-mini")
704
- temperature = gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="Temperature")
705
- gr.ChatInterface(
706
- fn=rag_chat_fn,
707
- additional_inputs=[top_k, n_sentences, include_passages, use_llm, model_name, temperature, strict_quotes_only, w_tfidf, w_bm25, w_emb],
708
- title="Literature Q&A",
709
- description="Hybrid retrieval with diversity. Answers carry inline (Doc, p.X) citations. Toggle strict/LLM modes."
710
- )
711
-
712
- # Note: add share=True to expose publicly (for iframe embedding)
713
- demo.queue().launch()
 
1
+ # ================================================================
2
+ # Self-Sensing Concrete Assistant — Hybrid RAG + XGB + (opt) GPT-5
3
+ # FIXED for Windows/Conda import issues (transformers/quantizers)
4
+ # - Pins compatible versions (transformers 4.44.2, sbert 2.7.0, torch 2.x)
5
+ # - Disables TF/Flax backends; safe fallbacks if dense fails
6
+ # - Hybrid retrieval (BM25 + TF-IDF + Dense*) + MMR sentence selection
7
+ # - Local folder only (RAG reads from ./literature_pdfs); no online indexing
8
+ # - Optional GPT-5 synthesis strictly from selected cited sentences
9
+ # - Gradio UI with Prediction + Literature Q&A tabs
10
+ # ================================================================
11
+
12
+ # ---------------------- MUST RUN THESE FLAGS FIRST ----------------------
13
+ import os
14
+ os.environ["TRANSFORMERS_NO_TF"] = "1" # don't import TensorFlow
15
+ os.environ["TRANSFORMERS_NO_FLAX"] = "1" # don't import Flax/JAX
16
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
17
+
18
+ # ------------------------------- Imports -----------------------------------
19
+ import re, json, time, joblib, warnings, math, hashlib
20
+ from pathlib import Path
21
+ from typing import List, Dict
22
+ import numpy as np
23
+ import pandas as pd
24
+
25
+ from sklearn.model_selection import train_test_split
26
+ from sklearn.impute import SimpleImputer
27
+ from sklearn.pipeline import Pipeline
28
+ from sklearn.compose import ColumnTransformer
29
+ from sklearn.preprocessing import RobustScaler, OneHotEncoder
30
+ from sklearn.preprocessing import normalize as sk_normalize
31
+ from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
32
+ from sklearn.feature_selection import VarianceThreshold
33
+ from sklearn.feature_extraction.text import TfidfVectorizer
34
+
35
+ from xgboost import XGBRegressor
36
+ from pypdf import PdfReader
37
+ import fitz # PyMuPDF
38
+ import gradio as gr
39
+
40
+ USE_DENSE = True
41
+ try:
42
+ from sentence_transformers import SentenceTransformer
43
+ except Exception as e:
44
+ USE_DENSE = False
45
+ print("⚠️ sentence-transformers unavailable; continuing with TF-IDF + BM25 only.\n", e)
46
+
47
+ from rank_bm25 import BM25Okapi
48
+ from openai import OpenAI
49
+
50
+ warnings.filterwarnings("ignore", category=UserWarning)
51
+
52
+ # ============================ Config =======================================
53
+ # --- Data & model paths ---
54
+ DATA_PATH = "july3.xlsx" # <- update if needed
55
+
56
+ # --- Local PDF folder for RAG (no online indexing) ---
57
+ LOCAL_PDF_DIR = Path("/literature_pdfs") # <- your local folder
58
+ LOCAL_PDF_DIR.mkdir(exist_ok=True)
59
+
60
+ # --- RAG artifacts (kept in working dir) ---
61
+ ARTIFACT_DIR = Path("rag_artifacts"); ARTIFACT_DIR.mkdir(exist_ok=True)
62
+ MODEL_OUT = "stress_gf_xgb.joblib"
63
+ TFIDF_VECT_PATH = ARTIFACT_DIR / "tfidf_vectorizer.joblib"
64
+ TFIDF_MAT_PATH = ARTIFACT_DIR / "tfidf_matrix.joblib"
65
+ BM25_TOK_PATH = ARTIFACT_DIR / "bm25_tokens.joblib"
66
+ EMB_NPY_PATH = ARTIFACT_DIR / "chunk_embeddings.npy"
67
+ RAG_META_PATH = ARTIFACT_DIR / "chunks.parquet"
68
+
69
+ # --- Embedding model (fast CPU) ---
70
+ EMB_MODEL_NAME = os.getenv("EMB_MODEL_NAME", "sentence-transformers/all-MiniLM-L6-v2")
71
+
72
+ # --- OpenAI (optional LLM synthesis) ---
73
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") # e.g., "gpt-5-mini"
74
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) # set env var to enable LLM
75
+
76
+ # --- Retrieval weights (UI defaults adapt if dense disabled) ---
77
+ W_TFIDF_DEFAULT = 0.50 if not USE_DENSE else 0.30
78
+ W_BM25_DEFAULT = 0.50 if not USE_DENSE else 0.30
79
+ W_EMB_DEFAULT = 0.00 if not USE_DENSE else 0.40
80
+
81
+ RANDOM_SEED = 42
82
+
83
+ # ==================== XGB Pipeline (Prediction) ============================
84
+ def make_onehot():
85
+ try:
86
+ return OneHotEncoder(handle_unknown="ignore", sparse_output=False)
87
+ except TypeError:
88
+ return OneHotEncoder(handle_unknown="ignore", sparse=False)
89
+
90
+ def rmse(y_true, y_pred):
91
+ return mean_squared_error(y_true, y_pred)
92
+
93
+ def evaluate(m, X, y_log, name="Model"):
94
+ y_pred_log = m.predict(X)
95
+ y_pred = np.expm1(y_pred_log)
96
+ y_true = np.expm1(y_log)
97
+ r2 = r2_score(y_true, y_pred)
98
+ r = rmse(y_true, y_pred)
99
+ mae = mean_absolute_error(y_true, y_pred)
100
+ print(f"{name}: R²={r2:.3f}, RMSE={r:.3f}, MAE={mae:.3f}")
101
+ return r2, r, mae
102
+
103
+ # --- Load data
104
+ df = pd.read_excel(DATA_PATH)
105
+ df.columns = df.columns.str.strip()
106
+
107
+ drop_cols = [
108
+ 'Loading rate (MPa/s)', 'Voltage (V) AC\\DC', 'Elastic Modulus (GPa)', 'Duration (hrs) of Dying Method'
109
+ ]
110
+ df = df.drop(columns=[c for c in drop_cols if c in df.columns], errors='ignore')
111
+
112
+ main_variables = [
113
+ 'Filler1_Type', 'Filler1_Diameter_um', 'Filler1_Length_mm',
114
+ 'AvgFiller_Density_g/cm3', 'AvgFiller_weight_%', 'AvgFiller_Volume_%',
115
+ 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Diameter_um', 'Filler2_Length_mm',
116
+ 'Filler2_Dimensions', 'Sample_Volume_mm3', 'Electrode/Probe_Count', 'Electrode/Probe_Material',
117
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Curing_Conditions', 'Num_ConductiveFillers',
118
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
119
+ 'ElasticModulus_Gpa', 'Voltage_Type', 'Applied_Voltage_V'
120
+ ]
121
+ target_col = 'Stress_GF_Mpa'
122
+
123
+ df = df[main_variables + [target_col]].copy()
124
+ df = df.dropna(subset=[target_col])
125
+ df = df[df[target_col] > 0]
126
+
127
+ numeric_cols = [
128
+ 'Filler1_Diameter_um', 'Filler1_Length_mm', 'AvgFiller_Density_g/cm3',
129
+ 'AvgFiller_weight_%', 'AvgFiller_Volume_%', 'Filler2_Diameter_um',
130
+ 'Filler2_Length_mm', 'Sample_Volume_mm3', 'Electrode/Probe_Count',
131
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Num_ConductiveFillers',
132
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
133
+ 'ElasticModulus_Gpa', 'Applied_Voltage_V'
134
+ ]
135
+ categorical_cols = [
136
+ 'Filler1_Type', 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Dimensions',
137
+ 'Electrode/Probe_Material', 'Curing_Conditions', 'Voltage_Type'
138
+ ]
139
+
140
+ for c in numeric_cols:
141
+ df[c] = pd.to_numeric(df[c], errors='coerce')
142
+ for c in categorical_cols:
143
+ df[c] = df[c].astype(str)
144
+
145
+ vt = VarianceThreshold(threshold=1e-3)
146
+ vt.fit(df[numeric_cols])
147
+ numeric_cols = [c for c in numeric_cols if c not in df[numeric_cols].columns[vt.variances_ < 1e-3]]
148
+
149
+ corr = df[numeric_cols].corr().abs()
150
+ upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
151
+ to_drop = [c for c in upper.columns if any(upper[c] > 0.95)]
152
+ numeric_cols = [c for c in numeric_cols if c not in to_drop]
153
+
154
+ X = df[main_variables].copy()
155
+ y = np.log1p(df[target_col])
156
+
157
+ X_train, X_test, y_train, y_test = train_test_split(
158
+ X, y, test_size=0.2, random_state=RANDOM_SEED
159
+ )
160
+
161
+ BEST_PARAMS = {
162
+ "regressor__subsample": 1.0,
163
+ "regressor__reg_lambda": 5,
164
+ "regressor__reg_alpha": 0.05,
165
+ "regressor__n_estimators": 300,
166
+ "regressor__max_depth": 6,
167
+ "regressor__learning_rate": 0.1,
168
+ "regressor__gamma": 0,
169
+ "regressor__colsample_bytree": 1.0
170
+ }
171
+
172
+ def train_and_save_model():
173
+ num_tf = Pipeline([('imputer', SimpleImputer(strategy='median')),
174
+ ('scaler', RobustScaler())])
175
+ cat_tf = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')),
176
+ ('onehot', make_onehot())])
177
+
178
+ preprocessor = ColumnTransformer([
179
+ ('num', num_tf, numeric_cols),
180
+ ('cat', cat_tf, categorical_cols)
181
+ ])
182
+
183
+ xgb_pipe = Pipeline([
184
+ ('preprocessor', preprocessor),
185
+ ('regressor', XGBRegressor(random_state=RANDOM_SEED, n_jobs=-1, verbosity=0))
186
+ ])
187
+ xgb_pipe.set_params(**BEST_PARAMS).fit(X_train, y_train)
188
+
189
+ joblib.dump(xgb_pipe, MODEL_OUT)
190
+ print(f"✅ Trained new model and saved → {MODEL_OUT}")
191
+ return xgb_pipe
192
+
193
+ def load_or_train_model():
194
+ if os.path.exists(MODEL_OUT):
195
+ print(f"📂 Loading existing model from {MODEL_OUT}")
196
+ return joblib.load(MODEL_OUT)
197
+ else:
198
+ print("⚠️ No saved model found. Training a new one...")
199
+ return train_and_save_model()
200
+
201
+ xgb_pipe = load_or_train_model()
202
+
203
+ # ======================= Hybrid RAG Indexing ================================
204
+ _SENT_SPLIT_RE = re.compile(r"(?<=[.!?])\s+|\n+")
205
+ TOKEN_RE = re.compile(r"[A-Za-z0-9_#+\-/\.%]+")
206
+
207
+ def sent_split(text: str) -> List[str]:
208
+ sents = [s.strip() for s in _SENT_SPLIT_RE.split(text) if s.strip()]
209
+ return [s for s in sents if len(s.split()) >= 5]
210
+
211
+ def tokenize(text: str) -> List[str]:
212
+ return [t.lower() for t in TOKEN_RE.findall(text)]
213
+
214
+ def extract_text_pymupdf(pdf_path: Path) -> str:
215
+ try:
216
+ doc = fitz.open(pdf_path)
217
+ buff = []
218
+ for i, page in enumerate(doc):
219
+ txt = page.get_text("text") or ""
220
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
221
+ return "\n\n".join(buff)
222
+ except Exception:
223
+ # Fallback to PyPDF
224
+ try:
225
+ reader = PdfReader(str(pdf_path))
226
+ buff = []
227
+ for i, p in enumerate(reader.pages):
228
+ txt = p.extract_text() or ""
229
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
230
+ return "\n\n".join(buff)
231
+ except Exception as e:
232
+ print(f"PDF read error ({pdf_path}): {e}")
233
+ return ""
234
+
235
+ def chunk_by_sentence_windows(text: str, win_size=8, overlap=2) -> List[str]:
236
+ sents = sent_split(text)
237
+ chunks = []
238
+ step = max(1, win_size - overlap)
239
+ for i in range(0, len(sents), step):
240
+ window = sents[i:i+win_size]
241
+ if not window: break
242
+ chunks.append(" ".join(window))
243
+ return chunks
244
+
245
+ def _safe_init_st_model(name: str):
246
+ """Try to init SentenceTransformer; on failure, disable dense and return None."""
247
+ global USE_DENSE
248
+ if not USE_DENSE:
249
+ return None
250
+ try:
251
+ m = SentenceTransformer(name)
252
+ return m
253
+ except Exception as e:
254
+ print("⚠️ Could not initialize SentenceTransformer; disabling dense embeddings.\n", e)
255
+ USE_DENSE = False
256
+ return None
257
+
258
+ def _collect_pdf_paths(pdf_dir: Path) -> List[Path]:
259
+ # Collect PDFs recursively from the local folder
260
+ return list(Path(pdf_dir).glob("**/*.pdf"))
261
+
262
+ def build_or_load_hybrid(pdf_dir: Path):
263
+ # If artifacts exist, load them
264
+ have_cache = (TFIDF_VECT_PATH.exists() and TFIDF_MAT_PATH.exists()
265
+ and BM25_TOK_PATH.exists() and RAG_META_PATH.exists()
266
+ and (EMB_NPY_PATH.exists() or not USE_DENSE))
267
+ if have_cache:
268
+ vectorizer = joblib.load(TFIDF_VECT_PATH)
269
+ X_tfidf = joblib.load(TFIDF_MAT_PATH)
270
+ meta = pd.read_parquet(RAG_META_PATH)
271
+ bm25_toks = joblib.load(BM25_TOK_PATH)
272
+ emb = np.load(EMB_NPY_PATH) if (USE_DENSE and EMB_NPY_PATH.exists()) else None
273
+ print("Loaded hybrid index.")
274
+ return vectorizer, X_tfidf, meta, bm25_toks, emb
275
+
276
+ # Fresh index
277
+ rows, all_tokens = [], []
278
+ pdf_paths = _collect_pdf_paths(pdf_dir)
279
+ print(f"Indexing PDFs from {pdf_dir}. Found {len(pdf_paths)} files.")
280
+ for pdf in pdf_paths:
281
+ raw = extract_text_pymupdf(pdf)
282
+ if not raw.strip():
283
+ continue
284
+ for i, ch in enumerate(chunk_by_sentence_windows(raw, win_size=8, overlap=2)):
285
+ rows.append({"doc_path": str(pdf), "chunk_id": i, "text": ch})
286
+ all_tokens.append(tokenize(ch))
287
+
288
+ if not rows:
289
+ raise RuntimeError(f"No PDF text found under: {pdf_dir}")
290
+
291
+ meta = pd.DataFrame(rows)
292
+
293
+ # TF-IDF
294
+ vectorizer = TfidfVectorizer(
295
+ ngram_range=(1,2),
296
+ min_df=1, max_df=0.95,
297
+ sublinear_tf=True, smooth_idf=True,
298
+ lowercase=True,
299
+ token_pattern=r"(?u)\b\w[\w\-\./%+#]*\b"
300
+ )
301
+ X_tfidf = vectorizer.fit_transform(meta["text"].tolist())
302
+
303
+ # Dense (optional)
304
+ emb = None
305
+ if USE_DENSE:
306
+ try:
307
+ st_model_tmp = _safe_init_st_model(EMB_MODEL_NAME)
308
+ if st_model_tmp is not None:
309
+ em = st_model_tmp.encode(meta["text"].tolist(), batch_size=64, show_progress_bar=False, convert_to_numpy=True)
310
+ emb = sk_normalize(em)
311
+ np.save(EMB_NPY_PATH, emb)
312
+ except Exception as e:
313
+ emb = None
314
+ print("⚠️ Dense embeddings failed; continuing without them.\n", e)
315
+
316
+ # Save artifacts
317
+ joblib.dump(vectorizer, TFIDF_VECT_PATH)
318
+ joblib.dump(X_tfidf, TFIDF_MAT_PATH)
319
+ joblib.dump(all_tokens, BM25_TOK_PATH)
320
+ meta.to_parquet(RAG_META_PATH, index=False)
321
+
322
+ print(f"Indexed {len(meta)} chunks from {meta['doc_path'].nunique()} PDFs.")
323
+ return vectorizer, X_tfidf, meta, all_tokens, emb
324
+
325
+ # ---------- Auto reindex if new/modified PDFs are detected ----------
326
+ from datetime import datetime
327
+
328
+ def auto_reindex_if_needed(pdf_dir: Path):
329
+ """Rebuilds RAG index if new or modified PDFs are detected."""
330
+ meta_path = RAG_META_PATH
331
+ pdfs = _collect_pdf_paths(pdf_dir)
332
+ if not meta_path.exists():
333
+ print("No existing index found — indexing now...")
334
+ # Remove stale artifacts if any partial set exists
335
+ for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH]:
336
+ try:
337
+ if p.exists(): p.unlink()
338
+ except Exception:
339
+ pass
340
+ return # build will happen below
341
+ last_index_time = datetime.fromtimestamp(meta_path.stat().st_mtime)
342
+ recent = [p for p in pdfs if datetime.fromtimestamp(p.stat().st_mtime) > last_index_time]
343
+ if recent:
344
+ print(f"Found {len(recent)} new/updated PDFs — rebuilding index...")
345
+ # Clear artifacts to force rebuild
346
+ for p in [TFIDF_VECT_PATH, TFIDF_MAT_PATH, BM25_TOK_PATH, EMB_NPY_PATH, RAG_META_PATH]:
347
+ try:
348
+ if p.exists(): p.unlink()
349
+ except Exception:
350
+ pass
351
+
352
+ # Build hybrid index (local only)
353
+ auto_reindex_if_needed(LOCAL_PDF_DIR)
354
+ tfidf_vectorizer, tfidf_matrix, rag_meta, bm25_tokens, emb_matrix = build_or_load_hybrid(LOCAL_PDF_DIR)
355
+ bm25 = BM25Okapi(bm25_tokens)
356
+ st_query_model = _safe_init_st_model(EMB_MODEL_NAME) # safe init; may set USE_DENSE=False
357
+
358
+ # If dense failed at runtime, update default weights in case UI uses them
359
+ if not USE_DENSE:
360
+ W_TFIDF_DEFAULT, W_BM25_DEFAULT, W_EMB_DEFAULT = 0.50, 0.50, 0.00
361
+
362
+ def _extract_page(text_chunk: str) -> str:
363
+ m = list(re.finditer(r"\[\[PAGE=(\d+)\]\]", text_chunk))
364
+ return (m[-1].group(1) if m else "?")
365
+
366
+ # ---------------------- Hybrid search --------------------------------------
367
+ def hybrid_search(query: str, k=8, w_tfidf=W_TFIDF_DEFAULT, w_bm25=W_BM25_DEFAULT, w_emb=W_EMB_DEFAULT):
368
+ # Dense (optional)
369
+ if USE_DENSE and st_query_model is not None and emb_matrix is not None and w_emb > 0:
370
+ try:
371
+ q_emb = st_query_model.encode([query], convert_to_numpy=True)
372
+ q_emb = sk_normalize(q_emb)[0]
373
+ dense_scores = emb_matrix @ q_emb
374
+ except Exception as e:
375
+ print("⚠️ Dense query encoding failed; ignoring dense this run.\n", e)
376
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
377
+ w_emb = 0.0
378
+ else:
379
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
380
+ w_emb = 0.0 # force off
381
+
382
+ # TF-IDF
383
+ q_vec = tfidf_vectorizer.transform([query])
384
+ tfidf_scores = (tfidf_matrix @ q_vec.T).toarray().ravel()
385
+
386
+ # BM25
387
+ q_tokens = [t.lower() for t in TOKEN_RE.findall(query)]
388
+ bm25_scores = np.array(bm25.get_scores(q_tokens), dtype=float)
389
+
390
+ def _norm(x):
391
+ x = np.asarray(x, dtype=float)
392
+ if np.allclose(x.max(), x.min()):
393
+ return np.zeros_like(x)
394
+ return (x - x.min()) / (x.max() - x.min())
395
+
396
+ s_dense = _norm(dense_scores)
397
+ s_tfidf = _norm(tfidf_scores)
398
+ s_bm25 = _norm(bm25_scores)
399
+
400
+ total_w = (w_tfidf + w_bm25 + w_emb) or 1.0
401
+ w_tfidf, w_bm25, w_emb = w_tfidf/total_w, w_bm25/total_w, w_emb/total_w
402
+
403
+ combo = w_emb * s_dense + w_tfidf * s_tfidf + w_bm25 * s_bm25
404
+ idx = np.argsort(-combo)[:k]
405
+ hits = rag_meta.iloc[idx].copy()
406
+ hits["score_dense"] = s_dense[idx]
407
+ hits["score_tfidf"] = s_tfidf[idx]
408
+ hits["score_bm25"] = s_bm25[idx]
409
+ hits["score"] = combo[idx]
410
+ return hits.reset_index(drop=True)
411
+
412
+ # -------------- Sentence selection with MMR (diversity) --------------------
413
+ def split_sentences(text: str) -> List[str]:
414
+ sents = sent_split(text)
415
+ return [s for s in sents if 6 <= len(s.split()) <= 60]
416
+
417
+ def mmr_select_sentences(question: str, hits: pd.DataFrame, top_n=4, pool_per_chunk=6, lambda_div=0.7):
418
+ pool = []
419
+ for _, row in hits.iterrows():
420
+ doc = Path(row["doc_path"]).name
421
+ page = _extract_page(row["text"])
422
+ for s in split_sentences(row["text"])[:pool_per_chunk]:
423
+ pool.append({"sent": s, "doc": doc, "page": page})
424
+ if not pool:
425
+ return []
426
+
427
+ sent_texts = [p["sent"] for p in pool]
428
+
429
+ if USE_DENSE and st_query_model is not None:
430
+ try:
431
+ texts = [question] + sent_texts
432
+ enc = st_query_model.encode(texts, convert_to_numpy=True)
433
+ q_vec = sk_normalize(enc[:1])[0]
434
+ S = sk_normalize(enc[1:])
435
+ rel = (S @ q_vec)
436
+ def sim_fn(i, j): return float(S[i] @ S[j])
437
+ except Exception as e:
438
+ print("⚠️ Dense sentence encoding failed; falling back to TF-IDF for MMR.\n", e)
439
+ Q = tfidf_vectorizer.transform([question])
440
+ S = tfidf_vectorizer.transform(sent_texts)
441
+ rel = (S @ Q.T).toarray().ravel()
442
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
443
+ else:
444
+ Q = tfidf_vectorizer.transform([question])
445
+ S = tfidf_vectorizer.transform(sent_texts)
446
+ rel = (S @ Q.T).toarray().ravel()
447
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
448
+
449
+ selected, selected_idx = [], []
450
+ remain = list(range(len(pool)))
451
+ first = int(np.argmax(rel))
452
+ selected.append(pool[first]); selected_idx.append(first); remain.remove(first)
453
+
454
+ while len(selected) < top_n and remain:
455
+ cand_scores = []
456
+ for i in remain:
457
+ sim_to_sel = max(sim_fn(i, j) for j in selected_idx) if selected_idx else 0.0
458
+ score = lambda_div * rel[i] - (1 - lambda_div) * sim_to_sel
459
+ cand_scores.append((score, i))
460
+ cand_scores.sort(reverse=True)
461
+ best_i = cand_scores[0][1]
462
+ selected.append(pool[best_i]); selected_idx.append(best_i); remain.remove(best_i)
463
+ return selected
464
+
465
+ def compose_extractive(selected: List[Dict]) -> str:
466
+ if not selected:
467
+ return ""
468
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
469
+ return " ".join(lines)
470
+
471
+ # ------------------- Optional GPT-5 synthesis ------------------------------
472
+ # ------------------- Optional GPT-4o/GPT-5 synthesis ------------------------------
473
+ def synthesize_with_llm(question: str, sentence_lines: List[str], model: str = None, temperature: float = 0.2) -> str:
474
+ if OPENAI_API_KEY is None:
475
+ print("Skipping ChatGPT")
476
+ return None # not configured → skip synthesis
477
+
478
+ from openai import OpenAI
479
+ client = OpenAI(api_key=OPENAI_API_KEY)
480
+ if model is None:
481
+ model = OPENAI_MODEL
482
+
483
+ # --- Stronger, clean academic prompt ---
484
+ SYSTEM_PROMPT = (
485
+ "You are a scientific writing assistant specializing in self-sensing cementitious materials.\n"
486
+ "Write a short, fluent, and informative paragraph (3–6 sentences) answering the question using ONLY the provided evidence.\n"
487
+ "Rephrase and synthesize ideas; do not copy sentences verbatim.\n"
488
+ "Include parenthetical citations exactly as given (e.g., '(Paper.pdf, p.4)')."
489
+ )
490
+
491
+ user_prompt = (
492
+ f"Question: {question}\n\n"
493
+ "Evidence:\n" +
494
+ "\n".join(f"- {s}" for s in sentence_lines)
495
+ )
496
+
497
+ try:
498
+ print("🔍 Calling GPT synthesis...")
499
+ response = client.chat.completions.create(
500
+ model=model,
501
+ temperature=temperature,
502
+ messages=[
503
+ {"role": "system", "content": SYSTEM_PROMPT},
504
+ {"role": "user", "content": user_prompt},
505
+ ],
506
+ )
507
+
508
+ answer = response.choices[0].message.content.strip()
509
+ return answer
510
+
511
+ except Exception as e:
512
+ print(f"❌ LLM synthesis error: {e}")
513
+ return None
514
+
515
+
516
+ # ------------------------ RAG reply ----------------------------------------
517
+ def rag_reply(
518
+ question: str,
519
+ k: int = 8,
520
+ n_sentences: int = 4,
521
+ include_passages: bool = False,
522
+ use_llm: bool = False,
523
+ model: str = None,
524
+ temperature: float = 0.2,
525
+ strict_quotes_only: bool = False,
526
+ w_tfidf: float = W_TFIDF_DEFAULT,
527
+ w_bm25: float = W_BM25_DEFAULT,
528
+ w_emb: float = W_EMB_DEFAULT
529
+ ) -> str:
530
+ hits = hybrid_search(question, k=k, w_tfidf=w_tfidf, w_bm25=w_bm25, w_emb=w_emb)
531
+ if hits.empty:
532
+ return "No relevant passages found. Add more PDFs in literature_pdfs/ or adjust your query."
533
+
534
+ selected = mmr_select_sentences(question, hits, top_n=int(n_sentences), pool_per_chunk=6, lambda_div=0.7)
535
+ header_cites = "; ".join(
536
+ f"{Path(r['doc_path']).name} (p.{_extract_page(r['text'])})" for _, r in hits.head(6).iterrows()
537
+ )
538
+ # Coverage note (helps debugging thin answers)
539
+ srcs = {Path(r['doc_path']).name for _, r in hits.iterrows()}
540
+ coverage_note = ""
541
+ if len(srcs) < 3:
542
+ coverage_note = f"\n\n> Note: Only {len(srcs)} unique source(s) contributed. Add more PDFs or increase Top-K."
543
+
544
+ if strict_quotes_only:
545
+ if not selected:
546
+ return f"**Quoted Passages:**\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2]) + \
547
+ f"\n\n**Citations:** {header_cites}{coverage_note}"
548
+ msg = "**Quoted Passages:**\n- " + "\n- ".join(f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected)
549
+ msg += f"\n\n**Citations:** {header_cites}{coverage_note}"
550
+ if include_passages:
551
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
552
+ return msg
553
+
554
+ # Extractive baseline
555
+ extractive = compose_extractive(selected)
556
+
557
+ # Optional LLM synthesis
558
+ if use_llm and selected:
559
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
560
+ llm_text = synthesize_with_llm(question, lines, model=model, temperature=temperature)
561
+ if llm_text:
562
+ msg = f"**Answer (GPT-5 synthesis):** {llm_text}\n\n**Citations:** {header_cites}{coverage_note}"
563
+ if include_passages:
564
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
565
+ return msg
566
+
567
+ # Fallback: purely extractive
568
+ if not extractive:
569
+ return f"**Answer:** Here are relevant passages.\n\n**Citations:** {header_cites}{coverage_note}\n\n---\n" + \
570
+ "\n\n".join(hits["text"].tolist()[:2])
571
+
572
+ msg = f"**Answer:** {extractive}\n\n**Citations:** {header_cites}{coverage_note}"
573
+ if include_passages:
574
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
575
+ return msg
576
+
577
+ # =========================== Gradio UI =====================================
578
+ INPUT_COLS = [
579
+ "Filler1_Type", "Filler1_Dimensions", "Filler1_Diameter_um", "Filler1_Length_mm",
580
+ "Filler2_Type", "Filler2_Dimensions", "Filler2_Diameter_um", "Filler2_Length_mm",
581
+ "AvgFiller_Density_g/cm3", "AvgFiller_weight_%", "AvgFiller_Volume_%",
582
+ "Sample_Volume_mm3", "Electrode/Probe_Count", "Electrode/Probe_Material",
583
+ "W/B", "S/B", "GaugeLength_mm", "Curing_Conditions", "Num_ConductiveFillers",
584
+ "DryingTemperature_C", "DryingDuration_hrs", "LoadingRate_MPa/s",
585
+ "ElasticModulus_Gpa", "Voltage_Type", "Applied_Voltage_V"
586
+ ]
587
+ NUMERIC_INPUTS = {
588
+ "Filler1_Diameter_um","Filler1_Length_mm","Filler2_Diameter_um","Filler2_Length_mm",
589
+ "AvgFiller_Density_g/cm3","AvgFiller_weight_%","AvgFiller_Volume_%","Sample_Volume_mm3",
590
+ "Electrode/Probe_Count","W/B","S/B","GaugeLength_mm","Num_ConductiveFillers",
591
+ "DryingTemperature_C","DryingDuration_hrs","LoadingRate_MPa/s","ElasticModulus_Gpa",
592
+ "Applied_Voltage_V"
593
+ }
594
+ CAT_DIM_CHOICES = ["0D","1D","2D","3D","NA"]
595
+
596
+ def _coerce_row(args):
597
+ row = {c: v for c, v in zip(INPUT_COLS, args)}
598
+ clean = {}
599
+ for k, v in row.items():
600
+ if k in NUMERIC_INPUTS:
601
+ if v in ("", None): clean[k] = None
602
+ else:
603
+ try: clean[k] = float(v)
604
+ except: clean[k] = None
605
+ else:
606
+ clean[k] = "" if v is None else str(v).strip()
607
+ return pd.DataFrame([clean], columns=INPUT_COLS)
608
+
609
+ def _load_model():
610
+ if not os.path.exists(MODEL_OUT):
611
+ raise FileNotFoundError(f"Model file not found at '{MODEL_OUT}'. Retrain above.")
612
+ return joblib.load(MODEL_OUT)
613
+
614
+ def predict_fn(*args):
615
+ try:
616
+ mdl = _load_model()
617
+ X_new = _coerce_row(args)
618
+ y_log = mdl.predict(X_new)
619
+ y = float(np.expm1(y_log)[0])
620
+ if -1e-8 < y < 0: y = 0.0
621
+ return y
622
+ except Exception as e:
623
+ return f"Error during prediction: {e}"
624
+
625
+ def rag_chat_fn(message, history, top_k, n_sentences, include_passages,
626
+ use_llm, model_name, temperature, strict_quotes_only,
627
+ w_tfidf, w_bm25, w_emb):
628
+ if not message or not message.strip():
629
+ return "Ask a literature question (e.g., *How does CNT length affect gauge factor?*)"
630
+ try:
631
+ return rag_reply(
632
+ question=message,
633
+ k=int(top_k),
634
+ n_sentences=int(n_sentences),
635
+ include_passages=bool(include_passages),
636
+ use_llm=bool(use_llm),
637
+ model=(model_name or None),
638
+ temperature=float(temperature),
639
+ strict_quotes_only=bool(strict_quotes_only),
640
+ w_tfidf=float(w_tfidf),
641
+ w_bm25=float(w_bm25),
642
+ w_emb=float(w_emb),
643
+ )
644
+ except Exception as e:
645
+ return f"RAG error: {e}"
646
+
647
+ with gr.Blocks() as demo:
648
+ gr.Markdown("# 🧪 Self-Sensing Concrete Assistant — Hybrid RAG (Accurate Q&A)")
649
+ gr.Markdown(
650
+ "- **Prediction**: XGBoost pipeline for **Stress Gauge Factor (MPa)**.\n"
651
+ "- **Literature (Hybrid RAG)**: BM25 + TF-IDF + Dense embeddings with **MMR** sentence selection.\n"
652
+ "- **Strict mode** shows only quoted sentences with citations; **GPT-5** can paraphrase strictly from those quotes.\n"
653
+ "- **Local-only RAG**: drop PDFs into `literature_pdfs/` and the index will auto-refresh on restart."
654
+ )
655
+
656
+ with gr.Tabs():
657
+ with gr.Tab("🔮 Predict Gauge Factor (XGB)"):
658
+ with gr.Row():
659
+ with gr.Column():
660
+ inputs = [
661
+ gr.Textbox(label="Filler1_Type", placeholder="e.g., CNT, Graphite, Steel fiber"),
662
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler1_Dimensions", value="NA"),
663
+ gr.Number(label="Filler1_Diameter_um"),
664
+ gr.Number(label="Filler1_Length_mm"),
665
+ gr.Textbox(label="Filler2_Type", placeholder="Optional"),
666
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler2_Dimensions", value="NA"),
667
+ gr.Number(label="Filler2_Diameter_um"),
668
+ gr.Number(label="Filler2_Length_mm"),
669
+ gr.Number(label="AvgFiller_Density_g/cm3"),
670
+ gr.Number(label="AvgFiller_weight_%"),
671
+ gr.Number(label="AvgFiller_Volume_%"),
672
+ gr.Number(label="Sample_Volume_mm3"),
673
+ gr.Number(label="Electrode/Probe_Count"),
674
+ gr.Textbox(label="Electrode/Probe_Material", placeholder="e.g., Copper, Silver paste"),
675
+ gr.Number(label="W/B"),
676
+ gr.Number(label="S/B"),
677
+ gr.Number(label="GaugeLength_mm"),
678
+ gr.Textbox(label="Curing_Conditions", placeholder="e.g., 28d water, 20°C"),
679
+ gr.Number(label="Num_ConductiveFillers"),
680
+ gr.Number(label="DryingTemperature_C"),
681
+ gr.Number(label="DryingDuration_hrs"),
682
+ gr.Number(label="LoadingRate_MPa/s"),
683
+ gr.Number(label="ElasticModulus_Gpa"),
684
+ gr.Textbox(label="Voltage_Type", placeholder="AC / DC"),
685
+ gr.Number(label="Applied_Voltage_V"),
686
+ ]
687
+ with gr.Column():
688
+ out_pred = gr.Number(label="Predicted Stress_GF (MPa)", precision=6)
689
+ gr.Button("Predict", variant="primary").click(predict_fn, inputs, out_pred)
690
+
691
+ with gr.Tab("📚 Ask the Literature (Hybrid RAG + MMR)"):
692
+ with gr.Row():
693
+ top_k = gr.Slider(5, 12, value=8, step=1, label="Top-K chunks")
694
+ n_sentences = gr.Slider(2, 6, value=4, step=1, label="Answer length (sentences)")
695
+ include_passages = gr.Checkbox(value=False, label="Include supporting passages")
696
+ with gr.Accordion("Retriever weights (advanced)", open=False):
697
+ w_tfidf = gr.Slider(0.0, 1.0, value=W_TFIDF_DEFAULT, step=0.05, label="TF-IDF weight")
698
+ w_bm25 = gr.Slider(0.0, 1.0, value=W_BM25_DEFAULT, step=0.05, label="BM25 weight")
699
+ w_emb = gr.Slider(0.0, 1.0, value=W_EMB_DEFAULT, step=0.05, label="Dense weight (set 0 if disabled)")
700
+ with gr.Accordion("LLM & Controls", open=False):
701
+ strict_quotes_only = gr.Checkbox(value=False, label="Strict quotes only (no paraphrasing)")
702
+ use_llm = gr.Checkbox(value=False, label="Use GPT-5 to paraphrase selected sentences")
703
+ model_name = gr.Textbox(value=os.getenv("OPENAI_MODEL", OPENAI_MODEL), label="LLM model", placeholder="e.g., gpt-5 or gpt-5-mini")
704
+ temperature = gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="Temperature")
705
+ gr.ChatInterface(
706
+ fn=rag_chat_fn,
707
+ additional_inputs=[top_k, n_sentences, include_passages, use_llm, model_name, temperature, strict_quotes_only, w_tfidf, w_bm25, w_emb],
708
+ title="Literature Q&A",
709
+ description="Hybrid retrieval with diversity. Answers carry inline (Doc, p.X) citations. Toggle strict/LLM modes."
710
+ )
711
+
712
+ # Note: add share=True to expose publicly (for iframe embedding)
713
+ demo.queue().launch()