Cuervo-x commited on
Commit
23ba6eb
·
verified ·
1 Parent(s): 3c598db

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. july3.xlsx +3 -0
  3. main.py +760 -0
  4. requirements.txt +0 -0
  5. stress_gf_xgb.joblib +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ july3.xlsx filter=lfs diff=lfs merge=lfs -text
july3.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc4f44468eaf6f73112a550fcc058c200f617394bdad9f3db45a09293c0eec1
3
+ size 100575
main.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ================================================================
2
+ # Self-Sensing Concrete Assistant — Hybrid RAG + XGB + (opt) GPT-5
3
+ # FIXED for Windows/Conda import issues (transformers/quantizers)
4
+ # - Pins compatible versions (transformers 4.44.2, sbert 2.7.0, torch 2.x)
5
+ # - Disables TF/Flax backends; safe fallbacks if dense fails
6
+ # - Hybrid retrieval (BM25 + TF-IDF + Dense*) + MMR sentence selection
7
+ # - Optional OA PDF harvest (Crossref + Unpaywall) OR local folder
8
+ # - Optional GPT-5 synthesis strictly from selected cited sentences
9
+ # - Gradio UI with Prediction + Literature Q&A tabs
10
+ # ================================================================
11
+
12
+ # ---------------------- MUST RUN THESE FLAGS FIRST ----------------------
13
+ import os
14
+ os.environ["TRANSFORMERS_NO_TF"] = "1" # don't import TensorFlow
15
+ os.environ["TRANSFORMERS_NO_FLAX"] = "1" # don't import Flax/JAX
16
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
17
+
18
+ # ------------------------------- Imports -----------------------------------
19
+ import re, json, time, joblib, warnings, math, hashlib, urllib.parse, requests
20
+ from pathlib import Path
21
+ from typing import List, Dict
22
+ import numpy as np
23
+ import pandas as pd
24
+
25
+ from sklearn.model_selection import train_test_split
26
+ from sklearn.impute import SimpleImputer
27
+ from sklearn.pipeline import Pipeline
28
+ from sklearn.compose import ColumnTransformer
29
+ from sklearn.preprocessing import RobustScaler, OneHotEncoder
30
+ from sklearn.preprocessing import normalize as sk_normalize
31
+ from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
32
+ from sklearn.feature_selection import VarianceThreshold
33
+ from sklearn.feature_extraction.text import TfidfVectorizer
34
+
35
+ from xgboost import XGBRegressor
36
+ from pypdf import PdfReader
37
+ import fitz # PyMuPDF
38
+ import gradio as gr
39
+
40
+ USE_DENSE = True
41
+ try:
42
+ from sentence_transformers import SentenceTransformer
43
+ except Exception as e:
44
+ USE_DENSE = False
45
+ print("⚠️ sentence-transformers unavailable; continuing with TF-IDF + BM25 only.\n", e)
46
+
47
+ from rank_bm25 import BM25Okapi
48
+ from openai import OpenAI
49
+
50
+ warnings.filterwarnings("ignore", category=UserWarning)
51
+
52
+ # ============================ Config =======================================
53
+ # --- Data & model paths ---
54
+ DATA_PATH = r"C:\Users\nmoha13\OneDrive - Louisiana State University\School\Grad School\ChatBot\LLM\july3.xlsx" # <- update if needed
55
+ LOCAL_PDF_DIR = Path(r"C:\Users\nmoha13\OneDrive - Louisiana State University\School\Grad School\ChatBot\LLM") # <- optional local folder
56
+
57
+ # --- RAG artifacts (kept in working dir) ---
58
+ ARTIFACT_DIR = Path("rag_artifacts"); ARTIFACT_DIR.mkdir(exist_ok=True)
59
+ MODEL_OUT = "stress_gf_xgb.joblib"
60
+ TFIDF_VECT_PATH = ARTIFACT_DIR / "tfidf_vectorizer.joblib"
61
+ TFIDF_MAT_PATH = ARTIFACT_DIR / "tfidf_matrix.joblib"
62
+ BM25_TOK_PATH = ARTIFACT_DIR / "bm25_tokens.joblib"
63
+ EMB_NPY_PATH = ARTIFACT_DIR / "chunk_embeddings.npy"
64
+ RAG_META_PATH = ARTIFACT_DIR / "chunks.parquet"
65
+
66
+ # --- Online OA sourcing (set USE_ONLINE_SOURCES=False to rely only on local PDFs) ---
67
+ USE_ONLINE_SOURCES = True
68
+ DOWNLOAD_DIR = Path("oa_pdfs"); DOWNLOAD_DIR.mkdir(exist_ok=True)
69
+
70
+ KEYWORDS = [
71
+ "self-sensing concrete gauge factor",
72
+ "piezoresistive cementitious composite",
73
+ "carbon nanotube cement gauge factor",
74
+ "graphene nanoplatelet self-sensing mortar",
75
+ ]
76
+ YEAR_FROM, YEAR_TO = 2010, 2025
77
+ MAX_DOIS_PER_QUERY = 25
78
+
79
+ # Required identifiers for APIs (use institutional email)
80
+ UNPAYWALL_EMAIL = os.getenv("UNPAYWALL_EMAIL", "your_email@lsu.edu")
81
+ CROSSREF_MAILTO = os.getenv("CROSSREF_MAILTO", "your_email@lsu.edu")
82
+
83
+ # --- Embedding model (fast CPU) ---
84
+ EMB_MODEL_NAME = os.getenv("EMB_MODEL_NAME", "sentence-transformers/all-MiniLM-L6-v2")
85
+
86
+ # --- OpenAI (optional LLM synthesis) ---
87
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-5") # e.g., "gpt-5-mini"
88
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None) # set env var to enable LLM
89
+
90
+ # --- Retrieval weights (UI defaults adapt if dense disabled) ---
91
+ W_TFIDF_DEFAULT = 0.50 if not USE_DENSE else 0.30
92
+ W_BM25_DEFAULT = 0.50 if not USE_DENSE else 0.30
93
+ W_EMB_DEFAULT = 0.00 if not USE_DENSE else 0.40
94
+
95
+ RANDOM_SEED = 42
96
+
97
+ # ====================== OA PDF Harvesting (Crossref + Unpaywall) ===========
98
+ def crossref_search_dois(query, year_from=YEAR_FROM, year_to=YEAR_TO, rows=MAX_DOIS_PER_QUERY):
99
+ base = "https://api.crossref.org/works"
100
+ params = {
101
+ "query": query,
102
+ "filter": f"type:journal-article,from-pub-date:{year_from}-01-01,until-pub-date:{year_to}-12-31",
103
+ "rows": rows,
104
+ "mailto": CROSSREF_MAILTO
105
+ }
106
+ r = requests.get(base, params=params, timeout=30)
107
+ r.raise_for_status()
108
+ items = r.json().get("message", {}).get("items", [])
109
+ dois = []
110
+ for it in items:
111
+ doi = it.get("DOI")
112
+ title = " ".join(it.get("title", [])[:1]).strip() if it.get("title") else ""
113
+ if doi:
114
+ dois.append((doi, title))
115
+ return dois
116
+
117
+ def unpaywall_pdf_url(doi, email=UNPAYWALL_EMAIL):
118
+ url = f"https://api.unpaywall.org/v2/{urllib.parse.quote(doi)}?email={urllib.parse.quote(email)}"
119
+ r = requests.get(url, timeout=30)
120
+ if r.status_code != 200:
121
+ return None
122
+ data = r.json()
123
+ loc = data.get("best_oa_location") or {}
124
+ return loc.get("url_for_pdf") or loc.get("url")
125
+
126
+ def _sanitize_filename(s):
127
+ return "".join(c for c in s if c.isalnum() or c in "._- ()").strip()[:180] or "paper"
128
+
129
+ def fetch_oa_pdfs(keywords=KEYWORDS, out_dir=DOWNLOAD_DIR, throttle=1.2):
130
+ saved = []
131
+ for q in keywords:
132
+ try:
133
+ dois = crossref_search_dois(q)
134
+ except Exception as e:
135
+ print(f"[Crossref] {q}: {e}")
136
+ continue
137
+ for doi, title in dois:
138
+ try:
139
+ pdf_url = unpaywall_pdf_url(doi)
140
+ except Exception as e:
141
+ print(f"[Unpaywall] {doi}: {e}")
142
+ pdf_url = None
143
+ time.sleep(throttle)
144
+ if not pdf_url:
145
+ continue
146
+ fname = _sanitize_filename(f"{title or doi}.pdf")
147
+ fp = out_dir / fname
148
+ if fp.exists():
149
+ saved.append(fp); continue
150
+ try:
151
+ rr = requests.get(pdf_url, timeout=60)
152
+ ctype = rr.headers.get("Content-Type", "")
153
+ if rr.status_code == 200 and "pdf" in ctype.lower():
154
+ with open(fp, "wb") as f:
155
+ f.write(rr.content)
156
+ print(f"Saved: {fp.name}")
157
+ saved.append(fp)
158
+ else:
159
+ print(f"Skipped (not pdf): {pdf_url}")
160
+ except Exception as e:
161
+ print(f"[Download] {pdf_url}: {e}")
162
+ print(f"Total OA PDFs cached: {len(saved)}")
163
+ return saved
164
+
165
+ def get_pdf_source_dir():
166
+ if USE_ONLINE_SOURCES:
167
+ fetch_oa_pdfs() # idempotent; skips existing
168
+ return DOWNLOAD_DIR
169
+ else:
170
+ return LOCAL_PDF_DIR
171
+
172
+ # ==================== XGB Pipeline (Prediction) ============================
173
+ def make_onehot():
174
+ try:
175
+ return OneHotEncoder(handle_unknown="ignore", sparse_output=False)
176
+ except TypeError:
177
+ return OneHotEncoder(handle_unknown="ignore", sparse=False)
178
+
179
+ def rmse(y_true, y_pred):
180
+ return mean_squared_error(y_true, y_pred)
181
+
182
+ def evaluate(m, X, y_log, name="Model"):
183
+ y_pred_log = m.predict(X)
184
+ y_pred = np.expm1(y_pred_log)
185
+ y_true = np.expm1(y_log)
186
+ r2 = r2_score(y_true, y_pred)
187
+ r = rmse(y_true, y_pred)
188
+ mae = mean_absolute_error(y_true, y_pred)
189
+ print(f"{name}: R²={r2:.3f}, RMSE={r:.3f}, MAE={mae:.3f}")
190
+ return r2, r, mae
191
+
192
+ # --- Load data
193
+ df = pd.read_excel(DATA_PATH)
194
+ df.columns = df.columns.str.strip()
195
+
196
+ drop_cols = [
197
+ 'Loading rate (MPa/s)', 'Voltage (V) AC\\DC', 'Elastic Modulus (GPa)', 'Duration (hrs) of Dying Method'
198
+ ]
199
+ df = df.drop(columns=[c for c in drop_cols if c in df.columns], errors='ignore')
200
+
201
+ main_variables = [
202
+ 'Filler1_Type', 'Filler1_Diameter_um', 'Filler1_Length_mm',
203
+ 'AvgFiller_Density_g/cm3', 'AvgFiller_weight_%', 'AvgFiller_Volume_%',
204
+ 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Diameter_um', 'Filler2_Length_mm',
205
+ 'Filler2_Dimensions', 'Sample_Volume_mm3', 'Electrode/Probe_Count', 'Electrode/Probe_Material',
206
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Curing_Conditions', 'Num_ConductiveFillers',
207
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
208
+ 'ElasticModulus_Gpa', 'Voltage_Type', 'Applied_Voltage_V'
209
+ ]
210
+ target_col = 'Stress_GF_Mpa'
211
+
212
+ df = df[main_variables + [target_col]].copy()
213
+ df = df.dropna(subset=[target_col])
214
+ df = df[df[target_col] > 0]
215
+
216
+ numeric_cols = [
217
+ 'Filler1_Diameter_um', 'Filler1_Length_mm', 'AvgFiller_Density_g/cm3',
218
+ 'AvgFiller_weight_%', 'AvgFiller_Volume_%', 'Filler2_Diameter_um',
219
+ 'Filler2_Length_mm', 'Sample_Volume_mm3', 'Electrode/Probe_Count',
220
+ 'W/B', 'S/B', 'GaugeLength_mm', 'Num_ConductiveFillers',
221
+ 'DryingTemperature_C', 'DryingDuration_hrs', 'LoadingRate_MPa/s',
222
+ 'ElasticModulus_Gpa', 'Applied_Voltage_V'
223
+ ]
224
+ categorical_cols = [
225
+ 'Filler1_Type', 'Filler1_Dimensions', 'Filler2_Type', 'Filler2_Dimensions',
226
+ 'Electrode/Probe_Material', 'Curing_Conditions', 'Voltage_Type'
227
+ ]
228
+
229
+ for c in numeric_cols:
230
+ df[c] = pd.to_numeric(df[c], errors='coerce')
231
+ for c in categorical_cols:
232
+ df[c] = df[c].astype(str)
233
+
234
+ vt = VarianceThreshold(threshold=1e-3)
235
+ vt.fit(df[numeric_cols])
236
+ numeric_cols = [c for c in numeric_cols if c not in df[numeric_cols].columns[vt.variances_ < 1e-3]]
237
+
238
+ corr = df[numeric_cols].corr().abs()
239
+ upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
240
+ to_drop = [c for c in upper.columns if any(upper[c] > 0.95)]
241
+ numeric_cols = [c for c in numeric_cols if c not in to_drop]
242
+
243
+ X = df[main_variables].copy()
244
+ y = np.log1p(df[target_col])
245
+
246
+ X_train, X_test, y_train, y_test = train_test_split(
247
+ X, y, test_size=0.2, random_state=RANDOM_SEED
248
+ )
249
+
250
+ BEST_PARAMS = {
251
+ "regressor__subsample": 1.0,
252
+ "regressor__reg_lambda": 5,
253
+ "regressor__reg_alpha": 0.05,
254
+ "regressor__n_estimators": 300,
255
+ "regressor__max_depth": 6,
256
+ "regressor__learning_rate": 0.1,
257
+ "regressor__gamma": 0,
258
+ "regressor__colsample_bytree": 1.0
259
+ }
260
+
261
+ def train_and_save_model():
262
+ num_tf = Pipeline([('imputer', SimpleImputer(strategy='median')),
263
+ ('scaler', RobustScaler())])
264
+ cat_tf = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')),
265
+ ('onehot', make_onehot())])
266
+
267
+ preprocessor = ColumnTransformer([
268
+ ('num', num_tf, numeric_cols),
269
+ ('cat', cat_tf, categorical_cols)
270
+ ])
271
+
272
+ xgb_pipe = Pipeline([
273
+ ('preprocessor', preprocessor),
274
+ ('regressor', XGBRegressor(random_state=RANDOM_SEED, n_jobs=-1, verbosity=0))
275
+ ])
276
+ xgb_pipe.set_params(**BEST_PARAMS).fit(X_train, y_train)
277
+
278
+ joblib.dump(xgb_pipe, MODEL_OUT)
279
+ print(f"✅ Trained new model and saved → {MODEL_OUT}")
280
+ return xgb_pipe
281
+
282
+ def load_or_train_model():
283
+ if os.path.exists(MODEL_OUT):
284
+ print(f"📂 Loading existing model from {MODEL_OUT}")
285
+ return joblib.load(MODEL_OUT)
286
+ else:
287
+ print("⚠️ No saved model found. Training a new one...")
288
+ return train_and_save_model()
289
+
290
+ xgb_pipe = load_or_train_model()
291
+
292
+
293
+ # ======================= Hybrid RAG Indexing ================================
294
+ _SENT_SPLIT_RE = re.compile(r"(?<=[.!?])\s+|\n+")
295
+ TOKEN_RE = re.compile(r"[A-Za-z0-9_#+\-/\.%]+")
296
+
297
+ def sent_split(text: str) -> List[str]:
298
+ sents = [s.strip() for s in _SENT_SPLIT_RE.split(text) if s.strip()]
299
+ return [s for s in sents if len(s.split()) >= 5]
300
+
301
+ def tokenize(text: str) -> List[str]:
302
+ return [t.lower() for t in TOKEN_RE.findall(text)]
303
+
304
+ def extract_text_pymupdf(pdf_path: Path) -> str:
305
+ try:
306
+ doc = fitz.open(pdf_path)
307
+ buff = []
308
+ for i, page in enumerate(doc):
309
+ txt = page.get_text("text") or ""
310
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
311
+ return "\n\n".join(buff)
312
+ except Exception:
313
+ # Fallback to PyPDF
314
+ try:
315
+ reader = PdfReader(str(pdf_path))
316
+ buff = []
317
+ for i, p in enumerate(reader.pages):
318
+ txt = p.extract_text() or ""
319
+ buff.append(f"[[PAGE={i+1}]]\n{txt}")
320
+ return "\n\n".join(buff)
321
+ except Exception as e:
322
+ print(f"PDF read error ({pdf_path}): {e}")
323
+ return ""
324
+
325
+ def chunk_by_sentence_windows(text: str, win_size=8, overlap=2) -> List[str]:
326
+ sents = sent_split(text)
327
+ chunks = []
328
+ step = max(1, win_size - overlap)
329
+ for i in range(0, len(sents), step):
330
+ window = sents[i:i+win_size]
331
+ if not window: break
332
+ chunks.append(" ".join(window))
333
+ return chunks
334
+
335
+ def _safe_init_st_model(name: str):
336
+ """Try to init SentenceTransformer; on failure, disable dense and return None."""
337
+ global USE_DENSE
338
+ if not USE_DENSE:
339
+ return None
340
+ try:
341
+ m = SentenceTransformer(name)
342
+ return m
343
+ except Exception as e:
344
+ print("⚠️ Could not initialize SentenceTransformer; disabling dense embeddings.\n", e)
345
+ USE_DENSE = False
346
+ return None
347
+
348
+ def build_or_load_hybrid(pdf_dir: Path):
349
+ have_cache = (TFIDF_VECT_PATH.exists() and TFIDF_MAT_PATH.exists()
350
+ and BM25_TOK_PATH.exists() and RAG_META_PATH.exists()
351
+ and (EMB_NPY_PATH.exists() or not USE_DENSE))
352
+ if have_cache:
353
+ vectorizer = joblib.load(TFIDF_VECT_PATH)
354
+ X_tfidf = joblib.load(TFIDF_MAT_PATH)
355
+ meta = pd.read_parquet(RAG_META_PATH)
356
+ bm25_toks = joblib.load(BM25_TOK_PATH)
357
+ emb = np.load(EMB_NPY_PATH) if (USE_DENSE and EMB_NPY_PATH.exists()) else None
358
+ print("Loaded hybrid index.")
359
+ return vectorizer, X_tfidf, meta, bm25_toks, emb
360
+
361
+ rows, all_tokens = [], []
362
+ pdf_paths = list(Path(pdf_dir).glob("**/*.pdf"))
363
+ if LOCAL_PDF_DIR.exists() and LOCAL_PDF_DIR != pdf_dir:
364
+ pdf_paths += list(LOCAL_PDF_DIR.glob("**/*.pdf"))
365
+
366
+ print(f"Indexing PDFs from {pdf_dir} (+ local merge if present). Found {len(pdf_paths)} files.")
367
+ for pdf in pdf_paths:
368
+ raw = extract_text_pymupdf(pdf)
369
+ if not raw.strip():
370
+ continue
371
+ for i, ch in enumerate(chunk_by_sentence_windows(raw, win_size=8, overlap=2)):
372
+ rows.append({"doc_path": str(pdf), "chunk_id": i, "text": ch})
373
+ all_tokens.append(tokenize(ch))
374
+
375
+ if not rows:
376
+ raise RuntimeError(f"No PDF text found under: {pdf_dir}")
377
+
378
+ meta = pd.DataFrame(rows)
379
+
380
+ # TF-IDF
381
+ vectorizer = TfidfVectorizer(
382
+ ngram_range=(1,2),
383
+ min_df=1, max_df=0.95,
384
+ sublinear_tf=True, smooth_idf=True,
385
+ lowercase=True,
386
+ token_pattern=r"(?u)\b\w[\w\-\./%+#]*\b"
387
+ )
388
+ X_tfidf = vectorizer.fit_transform(meta["text"].tolist())
389
+
390
+ # Dense (optional)
391
+ emb = None
392
+ if USE_DENSE:
393
+ try:
394
+ st_model_tmp = _safe_init_st_model(EMB_MODEL_NAME)
395
+ if st_model_tmp is not None:
396
+ em = st_model_tmp.encode(meta["text"].tolist(), batch_size=64, show_progress_bar=False, convert_to_numpy=True)
397
+ emb = sk_normalize(em)
398
+ np.save(EMB_NPY_PATH, emb)
399
+ except Exception as e:
400
+ emb = None
401
+ print("⚠️ Dense embeddings failed; continuing without them.\n", e)
402
+
403
+ # Save artifacts
404
+ joblib.dump(vectorizer, TFIDF_VECT_PATH)
405
+ joblib.dump(X_tfidf, TFIDF_MAT_PATH)
406
+ joblib.dump(all_tokens, BM25_TOK_PATH)
407
+ meta.to_parquet(RAG_META_PATH, index=False)
408
+
409
+ print(f"Indexed {len(meta)} chunks from {meta['doc_path'].nunique()} PDFs.")
410
+ return vectorizer, X_tfidf, meta, all_tokens, emb
411
+
412
+ # Build hybrid index
413
+ pdf_source_dir = get_pdf_source_dir()
414
+ tfidf_vectorizer, tfidf_matrix, rag_meta, bm25_tokens, emb_matrix = build_or_load_hybrid(pdf_source_dir)
415
+ bm25 = BM25Okapi(bm25_tokens)
416
+ st_query_model = _safe_init_st_model(EMB_MODEL_NAME) # safe init; may set USE_DENSE=False
417
+
418
+ # If dense failed at runtime, update default weights in case UI uses them
419
+ if not USE_DENSE:
420
+ W_TFIDF_DEFAULT, W_BM25_DEFAULT, W_EMB_DEFAULT = 0.50, 0.50, 0.00
421
+
422
+ def _extract_page(text_chunk: str) -> str:
423
+ m = list(re.finditer(r"\[\[PAGE=(\d+)\]\]", text_chunk))
424
+ return (m[-1].group(1) if m else "?")
425
+
426
+ # ---------------------- Hybrid search --------------------------------------
427
+ def hybrid_search(query: str, k=8, w_tfidf=W_TFIDF_DEFAULT, w_bm25=W_BM25_DEFAULT, w_emb=W_EMB_DEFAULT):
428
+ # Dense (optional)
429
+ if USE_DENSE and st_query_model is not None and emb_matrix is not None and w_emb > 0:
430
+ try:
431
+ q_emb = st_query_model.encode([query], convert_to_numpy=True)
432
+ q_emb = sk_normalize(q_emb)[0]
433
+ dense_scores = emb_matrix @ q_emb
434
+ except Exception as e:
435
+ print("⚠️ Dense query encoding failed; ignoring dense this run.\n", e)
436
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
437
+ w_emb = 0.0
438
+ else:
439
+ dense_scores = np.zeros(len(rag_meta), dtype=float)
440
+ w_emb = 0.0 # force off
441
+
442
+ # TF-IDF
443
+ q_vec = tfidf_vectorizer.transform([query])
444
+ tfidf_scores = (tfidf_matrix @ q_vec.T).toarray().ravel()
445
+
446
+ # BM25
447
+ q_tokens = [t.lower() for t in TOKEN_RE.findall(query)]
448
+ bm25_scores = np.array(bm25.get_scores(q_tokens), dtype=float)
449
+
450
+ def _norm(x):
451
+ x = np.asarray(x, dtype=float)
452
+ if np.allclose(x.max(), x.min()):
453
+ return np.zeros_like(x)
454
+ return (x - x.min()) / (x.max() - x.min())
455
+
456
+ s_dense = _norm(dense_scores)
457
+ s_tfidf = _norm(tfidf_scores)
458
+ s_bm25 = _norm(bm25_scores)
459
+
460
+ total_w = (w_tfidf + w_bm25 + w_emb) or 1.0
461
+ w_tfidf, w_bm25, w_emb = w_tfidf/total_w, w_bm25/total_w, w_emb/total_w
462
+
463
+ combo = w_emb * s_dense + w_tfidf * s_tfidf + w_bm25 * s_bm25
464
+ idx = np.argsort(-combo)[:k]
465
+ hits = rag_meta.iloc[idx].copy()
466
+ hits["score_dense"] = s_dense[idx]
467
+ hits["score_tfidf"] = s_tfidf[idx]
468
+ hits["score_bm25"] = s_bm25[idx]
469
+ hits["score"] = combo[idx]
470
+ return hits.reset_index(drop=True)
471
+
472
+ # -------------- Sentence selection with MMR (diversity) --------------------
473
+ def split_sentences(text: str) -> List[str]:
474
+ sents = sent_split(text)
475
+ return [s for s in sents if 6 <= len(s.split()) <= 60]
476
+
477
+ def mmr_select_sentences(question: str, hits: pd.DataFrame, top_n=4, pool_per_chunk=6, lambda_div=0.7):
478
+ pool = []
479
+ for _, row in hits.iterrows():
480
+ doc = Path(row["doc_path"]).name
481
+ page = _extract_page(row["text"])
482
+ for s in split_sentences(row["text"])[:pool_per_chunk]:
483
+ pool.append({"sent": s, "doc": doc, "page": page})
484
+ if not pool:
485
+ return []
486
+
487
+ sent_texts = [p["sent"] for p in pool]
488
+
489
+ if USE_DENSE and st_query_model is not None:
490
+ try:
491
+ texts = [question] + sent_texts
492
+ enc = st_query_model.encode(texts, convert_to_numpy=True)
493
+ q_vec = sk_normalize(enc[:1])[0]
494
+ S = sk_normalize(enc[1:])
495
+ rel = (S @ q_vec)
496
+ def sim_fn(i, j): return float(S[i] @ S[j])
497
+ except Exception as e:
498
+ print("⚠️ Dense sentence encoding failed; falling back to TF-IDF for MMR.\n", e)
499
+ Q = tfidf_vectorizer.transform([question])
500
+ S = tfidf_vectorizer.transform(sent_texts)
501
+ rel = (S @ Q.T).toarray().ravel()
502
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
503
+ else:
504
+ Q = tfidf_vectorizer.transform([question])
505
+ S = tfidf_vectorizer.transform(sent_texts)
506
+ rel = (S @ Q.T).toarray().ravel()
507
+ def sim_fn(i, j): return float((S[i] @ S[j].T).toarray()[0, 0])
508
+
509
+ selected, selected_idx = [], []
510
+ remain = list(range(len(pool)))
511
+ first = int(np.argmax(rel))
512
+ selected.append(pool[first]); selected_idx.append(first); remain.remove(first)
513
+
514
+ while len(selected) < top_n and remain:
515
+ cand_scores = []
516
+ for i in remain:
517
+ sim_to_sel = max(sim_fn(i, j) for j in selected_idx) if selected_idx else 0.0
518
+ score = lambda_div * rel[i] - (1 - lambda_div) * sim_to_sel
519
+ cand_scores.append((score, i))
520
+ cand_scores.sort(reverse=True)
521
+ best_i = cand_scores[0][1]
522
+ selected.append(pool[best_i]); selected_idx.append(best_i); remain.remove(best_i)
523
+ return selected
524
+
525
+ def compose_extractive(selected: List[Dict]) -> str:
526
+ if not selected:
527
+ return ""
528
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
529
+ return " ".join(lines)
530
+
531
+ # ------------------- Optional GPT-5 synthesis ------------------------------
532
+ def synthesize_with_llm(question: str, sentence_lines: List[str], model: str = None, temperature: float = 0.2) -> str:
533
+ if OPENAI_API_KEY is None:
534
+ return None # not configured → skip synthesis
535
+ client = OpenAI(api_key=OPENAI_API_KEY)
536
+ if model is None:
537
+ model = OPENAI_MODEL
538
+
539
+ SYSTEM_PROMPT = (
540
+ "You are a scientific assistant for self-sensing cementitious materials.\n"
541
+ "Answer STRICTLY using the provided sentences.\n"
542
+ "Do not invent facts. Keep it concise (3–6 sentences).\n"
543
+ "Retain inline citations like (Doc.pdf, p.X) exactly as given."
544
+ )
545
+ user_prompt = (
546
+ f"Question: {question}\n\n"
547
+ f"Use ONLY these sentences to answer; keep their inline citations:\n" +
548
+ "\n".join(f"- {s}" for s in sentence_lines)
549
+ )
550
+ try:
551
+ resp = client.responses.create(
552
+ model=model,
553
+ input=[
554
+ {"role": "system", "content": SYSTEM_PROMPT},
555
+ {"role": "user", "content": user_prompt},
556
+ ],
557
+ temperature=temperature,
558
+ )
559
+ return getattr(resp, "output_text", None) or str(resp)
560
+ except Exception:
561
+ return None
562
+
563
+ # ------------------------ RAG reply ----------------------------------------
564
+ def rag_reply(
565
+ question: str,
566
+ k: int = 8,
567
+ n_sentences: int = 4,
568
+ include_passages: bool = False,
569
+ use_llm: bool = False,
570
+ model: str = None,
571
+ temperature: float = 0.2,
572
+ strict_quotes_only: bool = False,
573
+ w_tfidf: float = W_TFIDF_DEFAULT,
574
+ w_bm25: float = W_BM25_DEFAULT,
575
+ w_emb: float = W_EMB_DEFAULT
576
+ ) -> str:
577
+ hits = hybrid_search(question, k=k, w_tfidf=w_tfidf, w_bm25=w_bm25, w_emb=w_emb)
578
+ if hits.empty:
579
+ return "No relevant passages found. Try rephrasing or adding more PDFs."
580
+
581
+ selected = mmr_select_sentences(question, hits, top_n=int(n_sentences), pool_per_chunk=6, lambda_div=0.7)
582
+ header_cites = "; ".join(
583
+ f"{Path(r['doc_path']).name} (p.{_extract_page(r['text'])})" for _, r in hits.head(6).iterrows()
584
+ )
585
+ # Coverage note (helps debugging thin answers)
586
+ srcs = {Path(r['doc_path']).name for _, r in hits.iterrows()}
587
+ coverage_note = ""
588
+ if len(srcs) < 3:
589
+ coverage_note = f"\n\n> Note: Only {len(srcs)} unique source(s) contributed. Add more PDFs or increase Top-K."
590
+
591
+ if strict_quotes_only:
592
+ if not selected:
593
+ return f"**Quoted Passages:**\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2]) + \
594
+ f"\n\n**Citations:** {header_cites}{coverage_note}"
595
+ msg = "**Quoted Passages:**\n- " + "\n- ".join(f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected)
596
+ msg += f"\n\n**Citations:** {header_cites}{coverage_note}"
597
+ if include_passages:
598
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
599
+ return msg
600
+
601
+ # Extractive baseline
602
+ extractive = compose_extractive(selected)
603
+
604
+ # Optional LLM synthesis
605
+ if use_llm and selected:
606
+ lines = [f"{s['sent']} ({s['doc']}, p.{s['page']})" for s in selected]
607
+ llm_text = synthesize_with_llm(question, lines, model=model, temperature=temperature)
608
+ if llm_text:
609
+ msg = f"**Answer (GPT-5 synthesis):** {llm_text}\n\n**Citations:** {header_cites}{coverage_note}"
610
+ if include_passages:
611
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
612
+ return msg
613
+
614
+ # Fallback: purely extractive
615
+ if not extractive:
616
+ return f"**Answer:** Here are relevant passages.\n\n**Citations:** {header_cites}{coverage_note}\n\n---\n" + \
617
+ "\n\n".join(hits["text"].tolist()[:2])
618
+
619
+ msg = f"**Answer:** {extractive}\n\n**Citations:** {header_cites}{coverage_note}"
620
+ if include_passages:
621
+ msg += "\n\n---\n" + "\n\n".join(hits["text"].tolist()[:2])
622
+ return msg
623
+
624
+ # =========================== Gradio UI =====================================
625
+ INPUT_COLS = [
626
+ "Filler1_Type", "Filler1_Dimensions", "Filler1_Diameter_um", "Filler1_Length_mm",
627
+ "Filler2_Type", "Filler2_Dimensions", "Filler2_Diameter_um", "Filler2_Length_mm",
628
+ "AvgFiller_Density_g/cm3", "AvgFiller_weight_%", "AvgFiller_Volume_%",
629
+ "Sample_Volume_mm3", "Electrode/Probe_Count", "Electrode/Probe_Material",
630
+ "W/B", "S/B", "GaugeLength_mm", "Curing_Conditions", "Num_ConductiveFillers",
631
+ "DryingTemperature_C", "DryingDuration_hrs", "LoadingRate_MPa/s",
632
+ "ElasticModulus_Gpa", "Voltage_Type", "Applied_Voltage_V"
633
+ ]
634
+ NUMERIC_INPUTS = {
635
+ "Filler1_Diameter_um","Filler1_Length_mm","Filler2_Diameter_um","Filler2_Length_mm",
636
+ "AvgFiller_Density_g/cm3","AvgFiller_weight_%","AvgFiller_Volume_%","Sample_Volume_mm3",
637
+ "Electrode/Probe_Count","W/B","S/B","GaugeLength_mm","Num_ConductiveFillers",
638
+ "DryingTemperature_C","DryingDuration_hrs","LoadingRate_MPa/s","ElasticModulus_Gpa",
639
+ "Applied_Voltage_V"
640
+ }
641
+ CAT_DIM_CHOICES = ["0D","1D","2D","3D","NA"]
642
+
643
+ def _coerce_row(args):
644
+ row = {c: v for c, v in zip(INPUT_COLS, args)}
645
+ clean = {}
646
+ for k, v in row.items():
647
+ if k in NUMERIC_INPUTS:
648
+ if v in ("", None): clean[k] = None
649
+ else:
650
+ try: clean[k] = float(v)
651
+ except: clean[k] = None
652
+ else:
653
+ clean[k] = "" if v is None else str(v).strip()
654
+ return pd.DataFrame([clean], columns=INPUT_COLS)
655
+
656
+ def _load_model():
657
+ if not os.path.exists(MODEL_OUT):
658
+ raise FileNotFoundError(f"Model file not found at '{MODEL_OUT}'. Retrain above.")
659
+ return joblib.load(MODEL_OUT)
660
+
661
+ def predict_fn(*args):
662
+ try:
663
+ mdl = _load_model()
664
+ X_new = _coerce_row(args)
665
+ y_log = mdl.predict(X_new)
666
+ y = float(np.expm1(y_log)[0])
667
+ if -1e-8 < y < 0: y = 0.0
668
+ return y
669
+ except Exception as e:
670
+ return f"Error during prediction: {e}"
671
+
672
+ def rag_chat_fn(message, history, top_k, n_sentences, include_passages,
673
+ use_llm, model_name, temperature, strict_quotes_only,
674
+ w_tfidf, w_bm25, w_emb):
675
+ if not message or not message.strip():
676
+ return "Ask a literature question (e.g., *How does CNT length affect gauge factor?*)"
677
+ try:
678
+ return rag_reply(
679
+ question=message,
680
+ k=int(top_k),
681
+ n_sentences=int(n_sentences),
682
+ include_passages=bool(include_passages),
683
+ use_llm=bool(use_llm),
684
+ model=(model_name or None),
685
+ temperature=float(temperature),
686
+ strict_quotes_only=bool(strict_quotes_only),
687
+ w_tfidf=float(w_tfidf),
688
+ w_bm25=float(w_bm25),
689
+ w_emb=float(w_emb),
690
+ )
691
+ except Exception as e:
692
+ return f"RAG error: {e}"
693
+
694
+ with gr.Blocks() as demo:
695
+ gr.Markdown("# 🧪 Self-Sensing Concrete Assistant — Hybrid RAG (Accurate Q&A)")
696
+ gr.Markdown(
697
+ "- **Prediction**: XGBoost pipeline for **Stress Gauge Factor (MPa)**.\n"
698
+ "- **Literature (Hybrid RAG)**: BM25 + TF-IDF + Dense embeddings with **MMR** sentence selection.\n"
699
+ "- **Strict mode** shows only quoted sentences with citations; **GPT-5** can paraphrase strictly from those quotes.\n"
700
+ "- Toggle **online OA harvesting** in config (Crossref + Unpaywall) or use a local PDF folder."
701
+ )
702
+
703
+ with gr.Tabs():
704
+ with gr.Tab("🔮 Predict Gauge Factor (XGB)"):
705
+ with gr.Row():
706
+ with gr.Column():
707
+ inputs = [
708
+ gr.Textbox(label="Filler1_Type", placeholder="e.g., CNT, Graphite, Steel fiber"),
709
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler1_Dimensions", value="NA"),
710
+ gr.Number(label="Filler1_Diameter_um"),
711
+ gr.Number(label="Filler1_Length_mm"),
712
+ gr.Textbox(label="Filler2_Type", placeholder="Optional"),
713
+ gr.Dropdown(CAT_DIM_CHOICES, label="Filler2_Dimensions", value="NA"),
714
+ gr.Number(label="Filler2_Diameter_um"),
715
+ gr.Number(label="Filler2_Length_mm"),
716
+ gr.Number(label="AvgFiller_Density_g/cm3"),
717
+ gr.Number(label="AvgFiller_weight_%"),
718
+ gr.Number(label="AvgFiller_Volume_%"),
719
+ gr.Number(label="Sample_Volume_mm3"),
720
+ gr.Number(label="Electrode/Probe_Count"),
721
+ gr.Textbox(label="Electrode/Probe_Material", placeholder="e.g., Copper, Silver paste"),
722
+ gr.Number(label="W/B"),
723
+ gr.Number(label="S/B"),
724
+ gr.Number(label="GaugeLength_mm"),
725
+ gr.Textbox(label="Curing_Conditions", placeholder="e.g., 28d water, 20°C"),
726
+ gr.Number(label="Num_ConductiveFillers"),
727
+ gr.Number(label="DryingTemperature_C"),
728
+ gr.Number(label="DryingDuration_hrs"),
729
+ gr.Number(label="LoadingRate_MPa/s"),
730
+ gr.Number(label="ElasticModulus_Gpa"),
731
+ gr.Textbox(label="Voltage_Type", placeholder="AC / DC"),
732
+ gr.Number(label="Applied_Voltage_V"),
733
+ ]
734
+ with gr.Column():
735
+ out_pred = gr.Number(label="Predicted Stress_GF (MPa)", precision=6)
736
+ gr.Button("Predict", variant="primary").click(predict_fn, inputs, out_pred)
737
+
738
+ with gr.Tab("📚 Ask the Literature (Hybrid RAG + MMR)"):
739
+ with gr.Row():
740
+ top_k = gr.Slider(5, 12, value=8, step=1, label="Top-K chunks")
741
+ n_sentences = gr.Slider(2, 6, value=4, step=1, label="Answer length (sentences)")
742
+ include_passages = gr.Checkbox(value=False, label="Include supporting passages")
743
+ with gr.Accordion("Retriever weights (advanced)", open=False):
744
+ w_tfidf = gr.Slider(0.0, 1.0, value=W_TFIDF_DEFAULT, step=0.05, label="TF-IDF weight")
745
+ w_bm25 = gr.Slider(0.0, 1.0, value=W_BM25_DEFAULT, step=0.05, label="BM25 weight")
746
+ w_emb = gr.Slider(0.0, 1.0, value=W_EMB_DEFAULT, step=0.05, label="Dense weight (set 0 if disabled)")
747
+ with gr.Accordion("LLM & Controls", open=False):
748
+ strict_quotes_only = gr.Checkbox(value=False, label="Strict quotes only (no paraphrasing)")
749
+ use_llm = gr.Checkbox(value=False, label="Use GPT-5 to paraphrase selected sentences")
750
+ model_name = gr.Textbox(value=os.getenv("OPENAI_MODEL", OPENAI_MODEL), label="LLM model", placeholder="e.g., gpt-5 or gpt-5-mini")
751
+ temperature = gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="Temperature")
752
+ gr.ChatInterface(
753
+ fn=rag_chat_fn,
754
+ additional_inputs=[top_k, n_sentences, include_passages, use_llm, model_name, temperature, strict_quotes_only, w_tfidf, w_bm25, w_emb],
755
+ title="Literature Q&A",
756
+ description="Hybrid retrieval with diversity. Answers carry inline (Doc, p.X) citations. Toggle strict/LLM modes."
757
+ )
758
+
759
+ # Note: add share=True to expose publicly (for iframe embedding)
760
+ demo.queue().launch()
requirements.txt ADDED
Binary file (2.89 kB). View file
 
stress_gf_xgb.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb87e629c6d09fcd4a0465ce7584fdecf4f90ac8363bad40458a4b19e855afa
3
+ size 416235