Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -28,12 +28,56 @@ KNOWHOW = ("MCL: Sylgard 184 PDMS 10:1 ratio 48hr cure green laser PIV 70bpm 5L/
|
|
| 28 |
"TGT: Arduino Uno Stepper Motor 150mL blood sampled at 0 20 40 60 minutes. "
|
| 29 |
"NORMAL RANGES: TAT below 8 ng/mL. PF1.2 below 2.0 nmol/L. Free hemoglobin below 20 mg/L. Platelets above 150 thousand per uL. "
|
| 30 |
"HIGH RISK: TAT above 15. PF1.2 above 3.0. Hemoglobin above 50. Platelets below 100. "
|
| 31 |
-
"uPAD: Jaffe reaction creatinine picric acid orange-red. Normal creatinine 0.6-1.2 mg/dL. Borderline 1.2-1.5. CKD above 1.5.
|
| 32 |
-
"
|
|
|
|
| 33 |
"PIV: green laser 532nm time-resolved. Normal velocity 0.5-2.0 m/s. Normal shear below 5 Pa. Risk above 10 Pa. "
|
| 34 |
"Equipment: Heska Element HT5 hematology analyzer time-resolved PIV Tygon tubing Arduino Uno stepper motor.")
|
| 35 |
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
CHUNKS = []
|
| 38 |
METADATA = []
|
| 39 |
EMBEDDINGS = None
|
|
@@ -55,10 +99,10 @@ def load_papers():
|
|
| 55 |
EMBEDDINGS = np.load(emb_path)
|
| 56 |
EMBEDDER = SentenceTransformer("all-MiniLM-L6-v2")
|
| 57 |
PAPERS_LOADED = True
|
| 58 |
-
print(
|
| 59 |
return True
|
| 60 |
except Exception as e:
|
| 61 |
-
print(
|
| 62 |
return False
|
| 63 |
|
| 64 |
def load_cardiolab_model():
|
|
@@ -66,29 +110,26 @@ def load_cardiolab_model():
|
|
| 66 |
try:
|
| 67 |
import torch
|
| 68 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 69 |
-
from peft import PeftModel
|
| 70 |
print("Loading CardioLab fine-tuned model...")
|
| 71 |
-
base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 72 |
CARDIOLAB_TOKENIZER = AutoTokenizer.from_pretrained(CARDIOLAB_MODEL, token=HF_TOKEN)
|
| 73 |
CARDIOLAB_TOKENIZER.pad_token = CARDIOLAB_TOKENIZER.eos_token
|
| 74 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 75 |
CARDIOLAB_LLM = AutoModelForCausalLM.from_pretrained(
|
| 76 |
CARDIOLAB_MODEL, token=HF_TOKEN,
|
| 77 |
-
torch_dtype=torch.float16 if device=="cuda" else torch.float32,
|
| 78 |
-
device_map="auto" if device=="cuda" else None,
|
| 79 |
low_cpu_mem_usage=True
|
| 80 |
)
|
| 81 |
CARDIOLAB_MODEL_LOADED = True
|
| 82 |
-
print(
|
| 83 |
return True
|
| 84 |
except Exception as e:
|
| 85 |
-
print(
|
| 86 |
return False
|
| 87 |
|
| 88 |
load_papers()
|
| 89 |
load_cardiolab_model()
|
| 90 |
|
| 91 |
-
# ββ SEMANTIC SEARCH ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 92 |
def search_papers(query, n=4):
|
| 93 |
if not PAPERS_LOADED or EMBEDDINGS is None or EMBEDDER is None:
|
| 94 |
return "", []
|
|
@@ -107,85 +148,16 @@ def search_papers(query, n=4):
|
|
| 107 |
meta = METADATA[idx]
|
| 108 |
score = float(scores[idx])
|
| 109 |
if score > 0.25:
|
| 110 |
-
results.append({"chunk":chunk,"paper":meta["paper"],"
|
| 111 |
if meta["paper"] not in seen:
|
| 112 |
-
context += chr(10)+"=== FROM: "+meta["paper"]+" ==="+chr(10)
|
| 113 |
seen.add(meta["paper"])
|
| 114 |
-
context += chunk[:500]+chr(10)
|
| 115 |
return context, results
|
| 116 |
except Exception as e:
|
| 117 |
return "", []
|
| 118 |
|
| 119 |
-
|
| 120 |
-
if not CARDIOLAB_MODEL_LOADED:
|
| 121 |
-
return None
|
| 122 |
-
try:
|
| 123 |
-
import torch
|
| 124 |
-
system = "You are CardioLab AI for SJSU Biomedical Engineering."
|
| 125 |
-
if paper_context:
|
| 126 |
-
system += " Use these SJSU research papers: "+paper_context[:500]
|
| 127 |
-
prompt = f"<|system|>{system}</s><|user|>{question}</s><|assistant|>"
|
| 128 |
-
inputs = CARDIOLAB_TOKENIZER(prompt, return_tensors="pt", truncation=True, max_length=512)
|
| 129 |
-
device = next(CARDIOLAB_LLM.parameters()).device
|
| 130 |
-
inputs = {k:v.to(device) for k,v in inputs.items()}
|
| 131 |
-
with torch.no_grad():
|
| 132 |
-
outputs = CARDIOLAB_LLM.generate(
|
| 133 |
-
**inputs, max_new_tokens=200, do_sample=True,
|
| 134 |
-
temperature=0.3, pad_token_id=CARDIOLAB_TOKENIZER.eos_token_id
|
| 135 |
-
)
|
| 136 |
-
response = CARDIOLAB_TOKENIZER.decode(outputs[0], skip_special_tokens=True)
|
| 137 |
-
if "<|assistant|>" in response:
|
| 138 |
-
answer = response.split("<|assistant|>")[-1].strip()
|
| 139 |
-
else:
|
| 140 |
-
answer = response[len(prompt):].strip() if len(response) > len(prompt) else response
|
| 141 |
-
return answer if len(answer) > 20 else None
|
| 142 |
-
except Exception as e:
|
| 143 |
-
print(f"CardioLab model error: {e}")
|
| 144 |
-
return None
|
| 145 |
-
|
| 146 |
-
CSS = """
|
| 147 |
-
body, .gradio-container { background: #f7f7f8 !important; font-family: -apple-system, BlinkMacSystemFont, Segoe UI, sans-serif !important; }
|
| 148 |
-
.tab-nav { background: #ffffff !important; border-bottom: 1px solid #e5e7eb !important; padding: 0 16px !important; display: flex !important; flex-wrap: wrap !important; }
|
| 149 |
-
.tab-nav button { background: transparent !important; color: #6b7280 !important; border: none !important; border-bottom: 2px solid transparent !important; padding: 10px 12px !important; font-weight: 500 !important; font-size: 0.8em !important; white-space: nowrap !important; border-radius: 0 !important; }
|
| 150 |
-
.tab-nav button:hover { color: #111827 !important; background: #f9fafb !important; }
|
| 151 |
-
.tab-nav button.selected { color: #c1121f !important; border-bottom: 2px solid #c1121f !important; font-weight: 700 !important; background: transparent !important; }
|
| 152 |
-
.message.user { background: #f3f4f6 !important; color: #1a202c !important; border-radius: 12px !important; }
|
| 153 |
-
.message.bot { background: #ffffff !important; color: #1a202c !important; border-left: 3px solid #c1121f !important; }
|
| 154 |
-
textarea { background: #ffffff !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 10px !important; }
|
| 155 |
-
button.primary { background: #c1121f !important; color: white !important; border: none !important; border-radius: 8px !important; font-weight: 600 !important; }
|
| 156 |
-
button.secondary { background: #f3f4f6 !important; color: #374151 !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
|
| 157 |
-
input[type=number] { background: #f9fafb !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
|
| 158 |
-
"""
|
| 159 |
-
|
| 160 |
-
HEADER = """<div style="background:linear-gradient(135deg,#0a0f2e 0%,#1a0a0a 100%);padding:0;border-bottom:3px solid #c1121f;overflow:hidden;">
|
| 161 |
-
<svg style="position:absolute;opacity:0.07;width:100%;height:100%;" viewBox="0 0 1200 120" preserveAspectRatio="none">
|
| 162 |
-
<polyline points="0,60 100,60 130,20 150,100 170,10 200,90 220,60 400,60 430,20 450,100 470,10 500,90 520,60 700,60 730,20 750,100 770,10 800,90 820,60 1000,60 1030,20 1050,100 1070,10 1100,90 1120,60 1200,60" fill="none" stroke="#c1121f" stroke-width="3"/>
|
| 163 |
-
</svg>
|
| 164 |
-
<div style="max-width:1200px;margin:0 auto;padding:16px 24px;display:flex;align-items:center;justify-content:space-between;position:relative;z-index:1;">
|
| 165 |
-
<div style="display:flex;align-items:center;gap:14px;">
|
| 166 |
-
<svg width="55" height="55" viewBox="0 0 100 100"><circle cx="50" cy="35" r="28" fill="#0057a8" opacity="0.9"/><ellipse cx="50" cy="14" rx="22" ry="10" fill="#0057a8"/>
|
| 167 |
-
<polygon points="30,14 33,4 36,14" fill="#e8a020"/><polygon points="36,12 39,2 42,12" fill="#e8a020"/>
|
| 168 |
-
<polygon points="42,11 45,1 48,11" fill="#e8a020"/><polygon points="48,11 51,1 54,11" fill="#e8a020"/>
|
| 169 |
-
<polygon points="54,12 57,2 60,12" fill="#e8a020"/><polygon points="60,14 63,4 66,14" fill="#e8a020"/>
|
| 170 |
-
<rect x="36" y="30" width="28" height="22" rx="4" fill="#0057a8"/><rect x="40" y="35" width="8" height="12" rx="2" fill="#e8a020"/>
|
| 171 |
-
<rect x="34" y="50" width="32" height="8" rx="4" fill="#0057a8"/></svg>
|
| 172 |
-
<div><div style="color:#9ca3af;font-size:0.7em;letter-spacing:2px;text-transform:uppercase;">San Jose State University</div>
|
| 173 |
-
<div style="color:#e8a020;font-size:0.82em;font-weight:700;">Biomedical Engineering</div></div></div>
|
| 174 |
-
<div style="text-align:center;flex:1;padding:0 20px;">
|
| 175 |
-
<div style="display:flex;align-items:center;justify-content:center;gap:10px;margin-bottom:3px;">
|
| 176 |
-
<svg width="100" height="28" viewBox="0 0 120 32"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg>
|
| 177 |
-
<div style="font-size:2em;font-weight:900;letter-spacing:2px;"><span style="color:#ffffff;">Cardio</span><span style="color:#c1121f;">Lab</span><span style="color:#ffffff;"> AI</span></div>
|
| 178 |
-
<svg width="100" height="28" viewBox="0 0 120 32" style="transform:scaleX(-1);"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg></div>
|
| 179 |
-
<div style="color:#9ca3af;font-size:0.68em;letter-spacing:2px;text-transform:uppercase;">RAG + Fine-tuned | BioGPT | ClinicalTrials | Weekly Updates | 5 AI Models</div></div>
|
| 180 |
-
<div style="display:flex;align-items:center;gap:14px;">
|
| 181 |
-
<div style="text-align:right;"><div style="color:#9ca3af;font-size:0.68em;text-transform:uppercase;">Research Pillars</div>
|
| 182 |
-
<div style="color:#ffffff;font-size:0.72em;margin-top:3px;">MHV CKD FSI</div>
|
| 183 |
-
<div style="color:#9ca3af;font-size:0.62em;margin-top:2px;">MCL PIV TGT uPAD COMSOL</div></div>
|
| 184 |
-
<svg width="48" height="48" viewBox="0 0 100 90">
|
| 185 |
-
<path d="M50 85 C50 85 5 55 5 30 C5 15 18 5 30 5 C38 5 45 9 50 15 C55 9 62 5 70 5 C82 5 95 15 95 30 C95 55 50 85 50 85Z" fill="#c1121f" opacity="0.9"/>
|
| 186 |
-
<polyline points="25,45 32,45 35,35 38,55 41,30 44,50 50,45 75,45" fill="none" stroke="white" stroke-width="2.5" stroke-linecap="round" opacity="0.9"/></svg></div></div>
|
| 187 |
-
<div style="height:3px;background:linear-gradient(90deg,#0057a8,#c1121f,#e8a020,#c1121f,#0057a8);"></div></div>"""
|
| 188 |
-
|
| 189 |
def load_all_sessions():
|
| 190 |
if not HF_TOKEN: return {}
|
| 191 |
try:
|
|
@@ -209,17 +181,17 @@ def get_session_list():
|
|
| 209 |
|
| 210 |
def save_session(history, name):
|
| 211 |
if not history: return "Nothing to save", gr.update()
|
| 212 |
-
if not name or not name.strip(): name = "Chat "+datetime.now().strftime("%b %d %H:%M")
|
| 213 |
sessions = load_all_sessions()
|
| 214 |
-
sessions[name] = {"messages":history,"saved_at":datetime.now().isoformat()}
|
| 215 |
ok = save_all_sessions(sessions)
|
| 216 |
choices = get_session_list()
|
| 217 |
-
return ("Saved: "+name if ok else "Save failed"), gr.update(choices=choices, value=name)
|
| 218 |
|
| 219 |
def load_session(name):
|
| 220 |
if not name or "No saved" in name: return [], "Select a session"
|
| 221 |
sessions = load_all_sessions()
|
| 222 |
-
return (sessions[name]["messages"], "Loaded: "+name) if name in sessions else ([], "Not found")
|
| 223 |
|
| 224 |
def delete_session(name):
|
| 225 |
if not name or "No saved" in name: return "Select a session", gr.update()
|
|
@@ -227,11 +199,12 @@ def delete_session(name):
|
|
| 227 |
if name in sessions:
|
| 228 |
del sessions[name]; save_all_sessions(sessions)
|
| 229 |
choices = get_session_list()
|
| 230 |
-
return "Deleted: "+name, gr.update(choices=choices, value=choices[0] if choices else None)
|
| 231 |
return "Not found", gr.update()
|
| 232 |
|
| 233 |
def new_chat(): return [], "", "New chat started"
|
| 234 |
|
|
|
|
| 235 |
def get_pubmed_chat(query, n=3):
|
| 236 |
try:
|
| 237 |
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
|
@@ -240,190 +213,104 @@ def get_pubmed_chat(query, n=3):
|
|
| 240 |
return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids]) if ids else ""
|
| 241 |
except: return ""
|
| 242 |
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
def search_biogpt(query):
|
| 247 |
-
"""Search BioGPT β trained on 15M PubMed papers via HuggingFace API"""
|
| 248 |
-
if not HF_TOKEN: return ""
|
| 249 |
try:
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
)
|
| 257 |
-
if r.status_code == 200:
|
| 258 |
-
result = r.json()
|
| 259 |
-
if isinstance(result, list) and len(result) > 0:
|
| 260 |
-
text = result[0].get("generated_text","")
|
| 261 |
-
# Extract just the answer part
|
| 262 |
-
if "[SEP]" in text:
|
| 263 |
-
text = text.split("[SEP]")[-1].strip()
|
| 264 |
-
return text[:400] if text else ""
|
| 265 |
-
return ""
|
| 266 |
-
except: return ""
|
| 267 |
|
| 268 |
-
def
|
| 269 |
-
|
|
|
|
|
|
|
| 270 |
try:
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
params={
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
"
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
nct = ident.get("nctId","")
|
| 290 |
-
title = ident.get("briefTitle","")
|
| 291 |
-
phase = status.get("phase","")
|
| 292 |
-
overall = status.get("overallStatus","")
|
| 293 |
-
if nct and title:
|
| 294 |
-
results.append({
|
| 295 |
-
"nct": nct,
|
| 296 |
-
"title": title,
|
| 297 |
-
"status": overall,
|
| 298 |
-
"phase": phase,
|
| 299 |
-
"url": "https://clinicaltrials.gov/study/"+nct
|
| 300 |
-
})
|
| 301 |
-
return results
|
| 302 |
-
except: return []
|
| 303 |
-
|
| 304 |
-
def get_weekly_pubmed_update(topics=None):
|
| 305 |
-
"""Get papers published in last 7 days on CardioLab topics"""
|
| 306 |
-
if topics is None:
|
| 307 |
-
topics = [
|
| 308 |
-
"mechanical heart valve thrombogenicity",
|
| 309 |
-
"microfluidic creatinine CKD diagnosis",
|
| 310 |
-
"PIV hemodynamics prosthetic valve",
|
| 311 |
-
"Mock Circulatory Loop cardiac",
|
| 312 |
-
"bileaflet valve fluid structure interaction"
|
| 313 |
-
]
|
| 314 |
-
all_new = []
|
| 315 |
try:
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
for
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
})
|
| 339 |
-
return all_new
|
| 340 |
-
except: return []
|
| 341 |
-
|
| 342 |
-
def full_research_search(query, search_model="Llama 3.3 70B (Best)"):
|
| 343 |
-
"""Complete search across ALL sources including Phase C additions"""
|
| 344 |
-
if not query.strip(): return "Please enter a research topic."
|
| 345 |
-
|
| 346 |
-
model_id = CHAT_MODELS.get(search_model, "llama-3.3-70b-versatile")
|
| 347 |
-
expanded = expand_query_ai(query, model_id) if GROQ_KEY else query
|
| 348 |
-
|
| 349 |
-
# All search sources
|
| 350 |
-
pubmed = fetch_pubmed(expanded, n=6)
|
| 351 |
-
scholar = fetch_scholar(expanded, n=5)
|
| 352 |
-
europe = fetch_europe_pmc(expanded, n=4)
|
| 353 |
-
trials = search_clinical_trials(query, n=4)
|
| 354 |
-
weekly = get_weekly_pubmed_update()
|
| 355 |
-
biogpt_answer = search_biogpt(query)
|
| 356 |
-
|
| 357 |
-
# Format output
|
| 358 |
-
out = "QUERY: "+query+chr(10)
|
| 359 |
-
out += "AI EXPANDED: "+expanded+chr(10)
|
| 360 |
-
out += "SOURCES: PubMed + Scholar + EuropePMC + ClinicalTrials + SJSU + BioGPT"+chr(10)
|
| 361 |
-
out += "="*50+chr(10)+chr(10)
|
| 362 |
-
|
| 363 |
-
# BioGPT answer first
|
| 364 |
-
if biogpt_answer:
|
| 365 |
-
out += "BIOGPT ANSWER (trained on 15M PubMed papers):"+chr(10)
|
| 366 |
-
out += biogpt_answer+chr(10)+chr(10)
|
| 367 |
-
out += "="*50+chr(10)+chr(10)
|
| 368 |
-
|
| 369 |
-
# PubMed results
|
| 370 |
-
if pubmed:
|
| 371 |
-
out += "PUBMED ("+str(len(pubmed))+" papers):"+chr(10)
|
| 372 |
-
for p in pubmed[:6]:
|
| 373 |
-
out += p["title"][:85]+" ("+p["year"]+")"+chr(10)
|
| 374 |
-
out += " "+p["url"]+chr(10)+chr(10)
|
| 375 |
-
|
| 376 |
-
# Scholar results
|
| 377 |
-
if scholar:
|
| 378 |
-
out += "SEMANTIC SCHOLAR ("+str(len(scholar))+" papers):"+chr(10)
|
| 379 |
-
for p in scholar[:5]:
|
| 380 |
-
out += p["title"][:85]+" ("+p["year"]+")"
|
| 381 |
-
if p["citations"] not in ("N/A","","0"): out += " | "+p["citations"]+" citations"
|
| 382 |
-
out += chr(10)+" "+p["url"]+chr(10)+chr(10)
|
| 383 |
-
|
| 384 |
-
# Clinical trials
|
| 385 |
-
if trials:
|
| 386 |
-
out += "CLINICALTRIALS.GOV ("+str(len(trials))+" trials):"+chr(10)
|
| 387 |
-
for t in trials:
|
| 388 |
-
out += t["title"][:80]+" | "+t["status"]+" | "+t.get("phase","")+" "+chr(10)
|
| 389 |
-
out += " "+t["url"]+chr(10)+chr(10)
|
| 390 |
-
|
| 391 |
-
# Weekly updates
|
| 392 |
-
weekly_relevant = [w for w in weekly if any(
|
| 393 |
-
kw in query.lower() for kw in ["valve","heart","ckd","creatinine","piv","tgt","mcl"]
|
| 394 |
-
)]
|
| 395 |
-
if weekly_relevant:
|
| 396 |
-
out += "NEW THIS WEEK (last 7 days):"+chr(10)
|
| 397 |
-
for w in weekly_relevant[:5]:
|
| 398 |
-
out += " "+w["url"]+" ["+w["topic"][:40]+"]"+chr(10)
|
| 399 |
-
|
| 400 |
-
# SJSU ScholarWorks
|
| 401 |
-
out += chr(10)+"SJSU SCHOLARWORKS:"+chr(10)
|
| 402 |
-
out += " https://scholarworks.sjsu.edu/do/search/?q="+requests.utils.quote(query)+"&context=6781027"
|
| 403 |
-
|
| 404 |
return out
|
| 405 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
|
| 407 |
def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
| 408 |
if not message.strip(): return "", history
|
| 409 |
paper_context, paper_results = search_papers(message, n=4)
|
| 410 |
-
|
| 411 |
-
# Use fine-tuned CardioLab model if selected
|
| 412 |
if chat_model == "CardioLab Fine-tuned (SJSU)" and CARDIOLAB_MODEL_LOADED:
|
| 413 |
answer = answer_with_cardiolab_model(message, paper_context)
|
| 414 |
if answer:
|
| 415 |
if paper_results:
|
| 416 |
unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
|
| 417 |
-
answer += chr(10)+chr(10)+"Sources from SJSU CardioLab papers:"
|
| 418 |
for p in unique_papers[:3]:
|
| 419 |
-
answer += chr(10)+" - "+p.replace(
|
| 420 |
pubmed = get_pubmed_chat(message, n=2)
|
| 421 |
-
if pubmed: answer += chr(10)+"PubMed: "+pubmed
|
| 422 |
history.append({"role":"user","content":message})
|
| 423 |
-
history.append({"role":"assistant","content":"[CardioLab Fine-tuned
|
| 424 |
return "", history
|
| 425 |
-
|
| 426 |
-
# Fall back to Groq models
|
| 427 |
if not GROQ_KEY:
|
| 428 |
history.append({"role":"user","content":message})
|
| 429 |
history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings."})
|
|
@@ -433,12 +320,11 @@ def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
|
| 433 |
client = Groq(api_key=GROQ_KEY)
|
| 434 |
if paper_context:
|
| 435 |
system_prompt = ("You are CardioLab AI for SJSU Biomedical Engineering. "
|
| 436 |
-
"Answer using SJSU CardioLab research papers below. "
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
"ADDITIONAL KNOWLEDGE: "+KNOWHOW)
|
| 440 |
else:
|
| 441 |
-
system_prompt = "You are CardioLab AI for SJSU Biomedical Engineering. Expert in MHV MCL PIV TGT uPAD CKD FSI. "+KNOWHOW
|
| 442 |
msgs = [{"role":"system","content":system_prompt}]
|
| 443 |
for item in history:
|
| 444 |
if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
|
|
@@ -447,17 +333,17 @@ def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
|
| 447 |
answer = resp.choices[0].message.content
|
| 448 |
if paper_results:
|
| 449 |
unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
|
| 450 |
-
answer += chr(10)+chr(10)+"Sources from SJSU CardioLab papers:"
|
| 451 |
for p in unique_papers[:3]:
|
| 452 |
-
answer += chr(10)+" - "+p.replace(
|
| 453 |
pubmed = get_pubmed_chat(message, n=2)
|
| 454 |
-
if pubmed: answer += chr(10)+"PubMed: "+pubmed
|
| 455 |
history.append({"role":"user","content":message})
|
| 456 |
history.append({"role":"assistant","content":answer})
|
| 457 |
return "", history
|
| 458 |
except Exception as e:
|
| 459 |
history.append({"role":"user","content":message})
|
| 460 |
-
history.append({"role":"assistant","content":"Error: "+str(e)})
|
| 461 |
return "", history
|
| 462 |
|
| 463 |
def voice_chat(audio, history):
|
|
@@ -469,79 +355,120 @@ def voice_chat(audio, history):
|
|
| 469 |
with open(audio, "rb") as f:
|
| 470 |
tx = client.audio.transcriptions.create(file=("audio.wav", f, "audio/wav"), model="whisper-large-v3")
|
| 471 |
paper_context, _ = search_papers(tx.text, n=3)
|
| 472 |
-
system = "You are CardioLab AI. "+KNOWHOW
|
| 473 |
-
if paper_context: system = "You are CardioLab AI. Use these SJSU papers:"+chr(10)+paper_context+chr(10)+KNOWHOW
|
| 474 |
msgs = [{"role":"system","content":system}]
|
| 475 |
for item in history:
|
| 476 |
if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
|
| 477 |
msgs.append({"role":"user","content":tx.text})
|
| 478 |
-
resp = client.chat.completions.create(model="llama-3.3-70b-versatile",messages=msgs,max_tokens=500)
|
| 479 |
-
history.append({"role":"user","content":"Voice: "+tx.text})
|
| 480 |
history.append({"role":"assistant","content":resp.choices[0].message.content})
|
| 481 |
return history
|
| 482 |
except Exception as e:
|
| 483 |
-
history.append({"role":"assistant","content":"Voice error: "+str(e)})
|
| 484 |
return history
|
| 485 |
|
| 486 |
-
|
| 487 |
-
|
|
|
|
|
|
|
| 488 |
try:
|
| 489 |
client = Groq(api_key=GROQ_KEY)
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 495 |
|
| 496 |
-
def
|
| 497 |
-
if not
|
| 498 |
-
|
| 499 |
-
results = []
|
| 500 |
try:
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 519 |
try:
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
for
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
out += "https://scholarworks.sjsu.edu/do/search/?q="+requests.utils.quote(query)+"&context=6781027"
|
| 543 |
-
return out
|
| 544 |
|
|
|
|
| 545 |
def analyze_upad_photo(image):
|
| 546 |
if image is None: return None, "Upload a uPAD photo first."
|
| 547 |
try:
|
|
@@ -587,7 +514,7 @@ def analyze_piv_csv(file,theme="White"):
|
|
| 587 |
if vc:
|
| 588 |
ax.plot(xv,df[vc],color="#c1121f",linewidth=2.5,marker="o",markersize=5)
|
| 589 |
ax.fill_between(xv,df[vc],alpha=0.15,color="#c1121f")
|
| 590 |
-
ax.axhline(y=2.0,color="#f59e0b",linestyle="--",linewidth=2,label="Risk
|
| 591 |
ax.set_ylabel("Velocity (m/s)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 592 |
def ps(ax):
|
| 593 |
if sc2:
|
|
@@ -595,7 +522,7 @@ def analyze_piv_csv(file,theme="White"):
|
|
| 595 |
ax.plot(xp,df[sc2],color="#0057a8",linewidth=2.5,marker="s",markersize=5)
|
| 596 |
ax.fill_between(xp,df[sc2],alpha=0.15,color="#0057a8")
|
| 597 |
ax.axhline(y=5,color="#f59e0b",linestyle="--",linewidth=2,label="Caution 5 Pa")
|
| 598 |
-
ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2,label="
|
| 599 |
ax.set_ylabel("Shear (Pa)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 600 |
def psc(ax):
|
| 601 |
if vc and sc2:
|
|
@@ -665,7 +592,7 @@ def analyze_tgt_csv(file,theme="White"):
|
|
| 665 |
try:
|
| 666 |
client=Groq(api_key=GROQ_KEY)
|
| 667 |
resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
|
| 668 |
-
messages=[{"role":"system","content":"Hematology expert.
|
| 669 |
{"role":"user","content":"TGT:"+chr(10)+df.describe().to_string()[:500]}],max_tokens=250)
|
| 670 |
ai=chr(10)+"AI: "+resp.choices[0].message.content
|
| 671 |
except: pass
|
|
@@ -701,118 +628,27 @@ def generate_image(prompt):
|
|
| 701 |
def piv_manual(v,s,h):
|
| 702 |
vr="HIGH-stenosis" if float(v)>2.0 else "NORMAL"
|
| 703 |
sr="HIGH-thrombosis" if float(s)>10 else "ELEVATED" if float(s)>5 else "NORMAL"
|
| 704 |
-
return "Velocity: "+str(v)+" m/s
|
| 705 |
|
| 706 |
def tgt_manual(t,p,h,pl,tm):
|
| 707 |
risk=sum([float(t)>15,float(p)>2.0,float(h)>50,float(pl)<150])
|
| 708 |
return "TAT:"+str(t)+" PF1.2:"+str(p)+chr(10)+"Hemo:"+str(h)+" Plt:"+str(pl)+chr(10)+"RESULT: "+("HIGH RISK" if risk>=3 else "MODERATE" if risk>=2 else "LOW RISK")
|
| 709 |
|
| 710 |
-
|
| 711 |
-
# ββ PHASE D: PROTOCOL GENERATOR + REPORT WRITER + HYPOTHESIS ββββββ
|
| 712 |
-
|
| 713 |
-
def generate_protocol(experiment_type, specific_params=""):
|
| 714 |
-
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 715 |
-
try:
|
| 716 |
-
client = Groq(api_key=GROQ_KEY)
|
| 717 |
-
paper_context, _ = search_papers(experiment_type, n=4)
|
| 718 |
-
lab_context = {
|
| 719 |
-
"MCL": "Sylgard 184 PDMS 10:1 ratio 48hr cure. Tygon tubing. 70bpm 5L/min 80-120mmHg. Blood analog glycerin water.",
|
| 720 |
-
"PIV": "Green laser 532nm time-resolved. Seeding particles. Velocity 0.5-2.0 m/s normal. Shear below 5 Pa normal.",
|
| 721 |
-
"TGT": "Arduino Uno stepper motor 48V. 150mL fresh blood. Sample 0 20 40 60 min. Heska HT5 analyzer.",
|
| 722 |
-
"uPAD": "Whatman filter paper. Wax printer 120C baking. Picric acid alkaline solution. Jaffe reaction orange-red color.",
|
| 723 |
-
"FSI": "COMSOL Multiphysics ALE mesh. Blood 1060 kg/m3 0.0035 Pa.s. SJM bileaflet geometry.",
|
| 724 |
-
}
|
| 725 |
-
extra = next((v for k,v in lab_context.items() if k.lower() in experiment_type.lower()), "")
|
| 726 |
-
system = """You are CardioLab AI protocol generator for SJSU Biomedical Engineering.
|
| 727 |
-
Generate a COMPLETE detailed lab protocol. Include these sections:
|
| 728 |
-
1. OBJECTIVE
|
| 729 |
-
2. MATERIALS AND EQUIPMENT (with exact quantities)
|
| 730 |
-
3. SAFETY CONSIDERATIONS
|
| 731 |
-
4. STEP-BY-STEP PROCEDURE (numbered, detailed)
|
| 732 |
-
5. DATA COLLECTION
|
| 733 |
-
6. ANALYSIS METHOD
|
| 734 |
-
7. EXPECTED RESULTS (with normal ranges)
|
| 735 |
-
8. TROUBLESHOOTING
|
| 736 |
-
Use exact SJSU CardioLab values and equipment."""
|
| 737 |
-
user_msg = f"Generate complete protocol for: {experiment_type}"
|
| 738 |
-
if specific_params: user_msg += f"
|
| 739 |
-
Parameters: {specific_params}"
|
| 740 |
-
if extra: user_msg += f"
|
| 741 |
-
CardioLab context: {extra}"
|
| 742 |
-
if paper_context: user_msg += f"
|
| 743 |
-
From SJSU papers: {paper_context[:600]}"
|
| 744 |
-
resp = client.chat.completions.create(
|
| 745 |
-
model="llama-3.3-70b-versatile",
|
| 746 |
-
messages=[{"role":"system","content":system},{"role":"user","content":user_msg}],
|
| 747 |
-
max_tokens=1200)
|
| 748 |
-
return resp.choices[0].message.content
|
| 749 |
-
except Exception as e: return "Error: "+str(e)
|
| 750 |
-
|
| 751 |
-
def generate_report(data_description, experiment_type, results=""):
|
| 752 |
-
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 753 |
-
try:
|
| 754 |
-
client = Groq(api_key=GROQ_KEY)
|
| 755 |
-
paper_context, _ = search_papers(experiment_type, n=3)
|
| 756 |
-
system = """You are CardioLab AI report writer for SJSU Biomedical Engineering.
|
| 757 |
-
Generate a professional research report with these sections:
|
| 758 |
-
1. ABSTRACT (150 words)
|
| 759 |
-
2. INTRODUCTION (background and objectives)
|
| 760 |
-
3. MATERIALS AND METHODS
|
| 761 |
-
4. RESULTS AND DISCUSSION
|
| 762 |
-
5. CONCLUSION
|
| 763 |
-
6. RECOMMENDATIONS
|
| 764 |
-
7. REFERENCES (cite SJSU CardioLab papers)
|
| 765 |
-
Use specific values. Write in professional academic style."""
|
| 766 |
-
user_msg = f"Write research report for: {experiment_type}"+chr(10)+f"Description: {data_description}"
|
| 767 |
-
if results: user_msg += chr(10)+f"Results: {results}"
|
| 768 |
-
if paper_context: user_msg += chr(10)+f"SJSU papers: {paper_context[:600]}"
|
| 769 |
-
resp = client.chat.completions.create(
|
| 770 |
-
model="llama-3.3-70b-versatile",
|
| 771 |
-
messages=[{"role":"system","content":system},{"role":"user","content":user_msg}],
|
| 772 |
-
max_tokens=1500)
|
| 773 |
-
return resp.choices[0].message.content
|
| 774 |
-
except Exception as e: return "Error: "+str(e)
|
| 775 |
-
|
| 776 |
-
def generate_hypothesis(research_area, current_findings=""):
|
| 777 |
-
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 778 |
-
try:
|
| 779 |
-
client = Groq(api_key=GROQ_KEY)
|
| 780 |
-
paper_context, _ = search_papers(research_area, n=3)
|
| 781 |
-
system = """You are CardioLab AI research assistant for SJSU Biomedical Engineering.
|
| 782 |
-
Generate 3 specific testable research hypotheses. For each:
|
| 783 |
-
- H0 (null hypothesis)
|
| 784 |
-
- H1 (alternative hypothesis)
|
| 785 |
-
- Scientific rationale
|
| 786 |
-
- Suggested experiment
|
| 787 |
-
- Expected outcome and measurable metrics
|
| 788 |
-
Base on SJSU CardioLab research."""
|
| 789 |
-
user_msg = f"Generate hypotheses for: {research_area}"
|
| 790 |
-
if current_findings: user_msg += chr(10)+f"Current findings: {current_findings}"
|
| 791 |
-
if paper_context: user_msg += chr(10)+f"SJSU papers: {paper_context[:500]}"
|
| 792 |
-
resp = client.chat.completions.create(
|
| 793 |
-
model="llama-3.3-70b-versatile",
|
| 794 |
-
messages=[{"role":"system","content":system},{"role":"user","content":user_msg}],
|
| 795 |
-
max_tokens=1000)
|
| 796 |
-
return resp.choices[0].message.content
|
| 797 |
-
except Exception as e: return "Error: "+str(e)
|
| 798 |
-
|
| 799 |
-
|
| 800 |
with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
| 801 |
gr.HTML(HEADER)
|
| 802 |
|
| 803 |
papers_count = len(set(m["paper"] for m in METADATA)) if PAPERS_LOADED else 0
|
| 804 |
-
model_status = "
|
| 805 |
-
rag_status =
|
| 806 |
-
gr.HTML(
|
| 807 |
-
{rag_status} | {model_status} | Select "CardioLab Fine-tuned (SJSU)" in Model dropdown to use your custom model!</div>''')
|
| 808 |
|
| 809 |
with gr.Tabs():
|
|
|
|
| 810 |
with gr.Tab("Chat"):
|
| 811 |
with gr.Row():
|
| 812 |
with gr.Column(scale=1, min_width=200):
|
| 813 |
-
gr.HTML(
|
| 814 |
-
<div style="color:#e8a020;font-weight:700;font-size:0.85em;">SJSU CARDIOLAB</div>
|
| 815 |
-
<div style="color:#9ca3af;font-size:0.7em;">Conversations</div></div>''')
|
| 816 |
new_chat_btn = gr.Button("New Chat", variant="secondary")
|
| 817 |
session_dropdown = gr.Dropdown(choices=get_session_list(), label="Saved Sessions", interactive=True)
|
| 818 |
load_btn = gr.Button("Load Session", variant="primary")
|
|
@@ -847,14 +683,14 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 847 |
voice_clear.click(lambda: [], outputs=voice_chatbot)
|
| 848 |
|
| 849 |
with gr.Tab("Papers"):
|
| 850 |
-
gr.Markdown("### Search PubMed + Scholar +
|
| 851 |
with gr.Row():
|
| 852 |
search_input = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve thrombogenicity hemodynamics", label="Research Topic", scale=3)
|
| 853 |
search_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model", scale=1)
|
| 854 |
search_btn = gr.Button("Search", variant="primary", scale=1)
|
| 855 |
search_output = gr.Textbox(label="Results", lines=22)
|
| 856 |
-
search_btn.click(
|
| 857 |
-
search_input.submit(
|
| 858 |
|
| 859 |
with gr.Tab("PIV CSV"):
|
| 860 |
with gr.Row():
|
|
@@ -930,8 +766,7 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 930 |
"uPAD Fabrication","uPAD Creatinine Test",
|
| 931 |
"FSI COMSOL Simulation","Valve Testing"],
|
| 932 |
value="TGT Blood Testing", label="Experiment Type")
|
| 933 |
-
proto_params = gr.Textbox(
|
| 934 |
-
placeholder="e.g. 27mm SJM valve 70bpm porcine blood",
|
| 935 |
label="Specific Parameters", lines=2)
|
| 936 |
proto_btn = gr.Button("Generate Protocol", variant="primary")
|
| 937 |
with gr.Column(scale=2):
|
|
@@ -948,7 +783,7 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 948 |
"Heart Valve Comparison"],
|
| 949 |
value="TGT Thrombogenicity Study", label="Study Type")
|
| 950 |
report_desc = gr.Textbox(
|
| 951 |
-
placeholder="
|
| 952 |
label="Experiment Description", lines=3)
|
| 953 |
report_results = gr.Textbox(
|
| 954 |
placeholder="e.g. TAT=12.3 ng/mL PF1.2=2.8 Hemo=45 Plt=142",
|
|
@@ -978,6 +813,6 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 978 |
hyp_btn.click(generate_hypothesis, inputs=[hyp_area, hyp_findings], outputs=hyp_output)
|
| 979 |
|
| 980 |
gr.HTML("""<div style="text-align:center;padding:10px;border-top:1px solid #e5e7eb;background:#f9fafb;">
|
| 981 |
-
<span style="color:#9ca3af;font-size:0.75em;">CardioLab AI v38 | SJSU Biomedical Engineering | Fine-tuned
|
| 982 |
|
| 983 |
demo.launch()
|
|
|
|
| 28 |
"TGT: Arduino Uno Stepper Motor 150mL blood sampled at 0 20 40 60 minutes. "
|
| 29 |
"NORMAL RANGES: TAT below 8 ng/mL. PF1.2 below 2.0 nmol/L. Free hemoglobin below 20 mg/L. Platelets above 150 thousand per uL. "
|
| 30 |
"HIGH RISK: TAT above 15. PF1.2 above 3.0. Hemoglobin above 50. Platelets below 100. "
|
| 31 |
+
"uPAD: Jaffe reaction creatinine picric acid orange-red. Normal creatinine 0.6-1.2 mg/dL. Borderline 1.2-1.5. CKD above 1.5. "
|
| 32 |
+
"Stage2 1.5-3.0. Stage3-4 3.0-6.0. Stage5 above 6.0. "
|
| 33 |
+
"MHV: 27mm SJM Regent bileaflet also trileaflet monoleaflet pediatric. "
|
| 34 |
"PIV: green laser 532nm time-resolved. Normal velocity 0.5-2.0 m/s. Normal shear below 5 Pa. Risk above 10 Pa. "
|
| 35 |
"Equipment: Heska Element HT5 hematology analyzer time-resolved PIV Tygon tubing Arduino Uno stepper motor.")
|
| 36 |
|
| 37 |
+
CSS = """
|
| 38 |
+
body, .gradio-container { background: #f7f7f8 !important; font-family: -apple-system, BlinkMacSystemFont, Segoe UI, sans-serif !important; }
|
| 39 |
+
.tab-nav { background: #ffffff !important; border-bottom: 1px solid #e5e7eb !important; padding: 0 16px !important; display: flex !important; flex-wrap: wrap !important; }
|
| 40 |
+
.tab-nav button { background: transparent !important; color: #6b7280 !important; border: none !important; border-bottom: 2px solid transparent !important; padding: 10px 12px !important; font-weight: 500 !important; font-size: 0.8em !important; white-space: nowrap !important; border-radius: 0 !important; }
|
| 41 |
+
.tab-nav button:hover { color: #111827 !important; background: #f9fafb !important; }
|
| 42 |
+
.tab-nav button.selected { color: #c1121f !important; border-bottom: 2px solid #c1121f !important; font-weight: 700 !important; background: transparent !important; }
|
| 43 |
+
.message.user { background: #f3f4f6 !important; color: #1a202c !important; border-radius: 12px !important; }
|
| 44 |
+
.message.bot { background: #ffffff !important; color: #1a202c !important; border-left: 3px solid #c1121f !important; }
|
| 45 |
+
textarea { background: #ffffff !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 10px !important; }
|
| 46 |
+
button.primary { background: #c1121f !important; color: white !important; border: none !important; border-radius: 8px !important; font-weight: 600 !important; }
|
| 47 |
+
button.secondary { background: #f3f4f6 !important; color: #374151 !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
|
| 48 |
+
input[type=number] { background: #f9fafb !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
HEADER = """<div style="background:linear-gradient(135deg,#0a0f2e 0%,#1a0a0a 100%);padding:0;border-bottom:3px solid #c1121f;overflow:hidden;">
|
| 52 |
+
<svg style="position:absolute;opacity:0.07;width:100%;height:100%;" viewBox="0 0 1200 120" preserveAspectRatio="none">
|
| 53 |
+
<polyline points="0,60 100,60 130,20 150,100 170,10 200,90 220,60 400,60 430,20 450,100 470,10 500,90 520,60 700,60 730,20 750,100 770,10 800,90 820,60 1000,60 1030,20 1050,100 1070,10 1100,90 1120,60 1200,60" fill="none" stroke="#c1121f" stroke-width="3"/>
|
| 54 |
+
</svg>
|
| 55 |
+
<div style="max-width:1200px;margin:0 auto;padding:16px 24px;display:flex;align-items:center;justify-content:space-between;position:relative;z-index:1;">
|
| 56 |
+
<div style="display:flex;align-items:center;gap:14px;">
|
| 57 |
+
<svg width="55" height="55" viewBox="0 0 100 100"><circle cx="50" cy="35" r="28" fill="#0057a8" opacity="0.9"/><ellipse cx="50" cy="14" rx="22" ry="10" fill="#0057a8"/>
|
| 58 |
+
<polygon points="30,14 33,4 36,14" fill="#e8a020"/><polygon points="36,12 39,2 42,12" fill="#e8a020"/>
|
| 59 |
+
<polygon points="42,11 45,1 48,11" fill="#e8a020"/><polygon points="48,11 51,1 54,11" fill="#e8a020"/>
|
| 60 |
+
<polygon points="54,12 57,2 60,12" fill="#e8a020"/><polygon points="60,14 63,4 66,14" fill="#e8a020"/>
|
| 61 |
+
<rect x="36" y="30" width="28" height="22" rx="4" fill="#0057a8"/><rect x="40" y="35" width="8" height="12" rx="2" fill="#e8a020"/>
|
| 62 |
+
<rect x="34" y="50" width="32" height="8" rx="4" fill="#0057a8"/></svg>
|
| 63 |
+
<div><div style="color:#9ca3af;font-size:0.7em;letter-spacing:2px;text-transform:uppercase;">San Jose State University</div>
|
| 64 |
+
<div style="color:#e8a020;font-size:0.82em;font-weight:700;">Biomedical Engineering</div></div></div>
|
| 65 |
+
<div style="text-align:center;flex:1;padding:0 20px;">
|
| 66 |
+
<div style="display:flex;align-items:center;justify-content:center;gap:10px;margin-bottom:3px;">
|
| 67 |
+
<svg width="100" height="28" viewBox="0 0 120 32"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg>
|
| 68 |
+
<div style="font-size:2em;font-weight:900;letter-spacing:2px;"><span style="color:#ffffff;">Cardio</span><span style="color:#c1121f;">Lab</span><span style="color:#ffffff;"> AI</span></div>
|
| 69 |
+
<svg width="100" height="28" viewBox="0 0 120 32" style="transform:scaleX(-1);"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg></div>
|
| 70 |
+
<div style="color:#9ca3af;font-size:0.68em;letter-spacing:2px;text-transform:uppercase;">RAG + Fine-tuned | Protocol Generator | Report Writer | BioGPT | 5 AI Models</div></div>
|
| 71 |
+
<div style="display:flex;align-items:center;gap:14px;">
|
| 72 |
+
<div style="text-align:right;"><div style="color:#9ca3af;font-size:0.68em;text-transform:uppercase;">Research Pillars</div>
|
| 73 |
+
<div style="color:#ffffff;font-size:0.72em;margin-top:3px;">MHV CKD FSI</div>
|
| 74 |
+
<div style="color:#9ca3af;font-size:0.62em;margin-top:2px;">MCL PIV TGT uPAD COMSOL</div></div>
|
| 75 |
+
<svg width="48" height="48" viewBox="0 0 100 90">
|
| 76 |
+
<path d="M50 85 C50 85 5 55 5 30 C5 15 18 5 30 5 C38 5 45 9 50 15 C55 9 62 5 70 5 C82 5 95 15 95 30 C95 55 50 85 50 85Z" fill="#c1121f" opacity="0.9"/>
|
| 77 |
+
<polyline points="25,45 32,45 35,35 38,55 41,30 44,50 50,45 75,45" fill="none" stroke="white" stroke-width="2.5" stroke-linecap="round" opacity="0.9"/></svg></div></div>
|
| 78 |
+
<div style="height:3px;background:linear-gradient(90deg,#0057a8,#c1121f,#e8a020,#c1121f,#0057a8);"></div></div>"""
|
| 79 |
+
|
| 80 |
+
# ββ PAPER DATABASE βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 81 |
CHUNKS = []
|
| 82 |
METADATA = []
|
| 83 |
EMBEDDINGS = None
|
|
|
|
| 99 |
EMBEDDINGS = np.load(emb_path)
|
| 100 |
EMBEDDER = SentenceTransformer("all-MiniLM-L6-v2")
|
| 101 |
PAPERS_LOADED = True
|
| 102 |
+
print("Papers loaded: " + str(len(CHUNKS)) + " chunks")
|
| 103 |
return True
|
| 104 |
except Exception as e:
|
| 105 |
+
print("Paper load error: " + str(e))
|
| 106 |
return False
|
| 107 |
|
| 108 |
def load_cardiolab_model():
|
|
|
|
| 110 |
try:
|
| 111 |
import torch
|
| 112 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 113 |
print("Loading CardioLab fine-tuned model...")
|
|
|
|
| 114 |
CARDIOLAB_TOKENIZER = AutoTokenizer.from_pretrained(CARDIOLAB_MODEL, token=HF_TOKEN)
|
| 115 |
CARDIOLAB_TOKENIZER.pad_token = CARDIOLAB_TOKENIZER.eos_token
|
| 116 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 117 |
CARDIOLAB_LLM = AutoModelForCausalLM.from_pretrained(
|
| 118 |
CARDIOLAB_MODEL, token=HF_TOKEN,
|
| 119 |
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
| 120 |
+
device_map="auto" if device == "cuda" else None,
|
| 121 |
low_cpu_mem_usage=True
|
| 122 |
)
|
| 123 |
CARDIOLAB_MODEL_LOADED = True
|
| 124 |
+
print("CardioLab model loaded!")
|
| 125 |
return True
|
| 126 |
except Exception as e:
|
| 127 |
+
print("CardioLab model error: " + str(e))
|
| 128 |
return False
|
| 129 |
|
| 130 |
load_papers()
|
| 131 |
load_cardiolab_model()
|
| 132 |
|
|
|
|
| 133 |
def search_papers(query, n=4):
|
| 134 |
if not PAPERS_LOADED or EMBEDDINGS is None or EMBEDDER is None:
|
| 135 |
return "", []
|
|
|
|
| 148 |
meta = METADATA[idx]
|
| 149 |
score = float(scores[idx])
|
| 150 |
if score > 0.25:
|
| 151 |
+
results.append({"chunk": chunk, "paper": meta["paper"], "score": score})
|
| 152 |
if meta["paper"] not in seen:
|
| 153 |
+
context += chr(10) + "=== FROM: " + meta["paper"] + " ===" + chr(10)
|
| 154 |
seen.add(meta["paper"])
|
| 155 |
+
context += chunk[:500] + chr(10)
|
| 156 |
return context, results
|
| 157 |
except Exception as e:
|
| 158 |
return "", []
|
| 159 |
|
| 160 |
+
# ββ SESSION MANAGEMENT βββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
def load_all_sessions():
|
| 162 |
if not HF_TOKEN: return {}
|
| 163 |
try:
|
|
|
|
| 181 |
|
| 182 |
def save_session(history, name):
|
| 183 |
if not history: return "Nothing to save", gr.update()
|
| 184 |
+
if not name or not name.strip(): name = "Chat " + datetime.now().strftime("%b %d %H:%M")
|
| 185 |
sessions = load_all_sessions()
|
| 186 |
+
sessions[name] = {"messages": history, "saved_at": datetime.now().isoformat()}
|
| 187 |
ok = save_all_sessions(sessions)
|
| 188 |
choices = get_session_list()
|
| 189 |
+
return ("Saved: " + name if ok else "Save failed"), gr.update(choices=choices, value=name)
|
| 190 |
|
| 191 |
def load_session(name):
|
| 192 |
if not name or "No saved" in name: return [], "Select a session"
|
| 193 |
sessions = load_all_sessions()
|
| 194 |
+
return (sessions[name]["messages"], "Loaded: " + name) if name in sessions else ([], "Not found")
|
| 195 |
|
| 196 |
def delete_session(name):
|
| 197 |
if not name or "No saved" in name: return "Select a session", gr.update()
|
|
|
|
| 199 |
if name in sessions:
|
| 200 |
del sessions[name]; save_all_sessions(sessions)
|
| 201 |
choices = get_session_list()
|
| 202 |
+
return "Deleted: " + name, gr.update(choices=choices, value=choices[0] if choices else None)
|
| 203 |
return "Not found", gr.update()
|
| 204 |
|
| 205 |
def new_chat(): return [], "", "New chat started"
|
| 206 |
|
| 207 |
+
# ββ SEARCH βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 208 |
def get_pubmed_chat(query, n=3):
|
| 209 |
try:
|
| 210 |
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
|
|
|
| 213 |
return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids]) if ids else ""
|
| 214 |
except: return ""
|
| 215 |
|
| 216 |
+
def expand_query_ai(query):
|
| 217 |
+
if not GROQ_KEY: return query
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
try:
|
| 219 |
+
client = Groq(api_key=GROQ_KEY)
|
| 220 |
+
resp = client.chat.completions.create(model="llama-3.1-8b-instant",
|
| 221 |
+
messages=[{"role":"system","content":"Biomedical PubMed expert. Convert to MeSH terms for heart valves hemodynamics PIV thrombogenicity FSI microfluidics CKD. Return ONLY terms."},
|
| 222 |
+
{"role":"user","content":"Optimize: " + query}], max_tokens=80)
|
| 223 |
+
return resp.choices[0].message.content.strip() or query
|
| 224 |
+
except: return query
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
+
def quick_search(query, search_model="Llama 3.3 70B (Best)"):
|
| 227 |
+
if not query.strip(): return "Please enter a topic."
|
| 228 |
+
expanded = expand_query_ai(query)
|
| 229 |
+
results = []
|
| 230 |
try:
|
| 231 |
+
forced = expanded + " AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR PIV OR CFD OR CKD)"
|
| 232 |
+
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
| 233 |
+
params={"db":"pubmed","term":forced,"retmax":8,"retmode":"json","sort":"date","field":"tiab"},timeout=12)
|
| 234 |
+
ids = r.json()["esearchresult"]["idlist"]
|
| 235 |
+
if ids:
|
| 236 |
+
r2 = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi",
|
| 237 |
+
params={"db":"pubmed","id":",".join(ids),"retmode":"xml","rettype":"abstract"},timeout=12)
|
| 238 |
+
import xml.etree.ElementTree as ET
|
| 239 |
+
root = ET.fromstring(r2.content)
|
| 240 |
+
for article in root.findall(".//PubmedArticle"):
|
| 241 |
+
try:
|
| 242 |
+
title = article.find(".//ArticleTitle").text or "No title"
|
| 243 |
+
pmid = article.find(".//PMID").text or ""
|
| 244 |
+
year_el = article.find(".//PubDate/Year")
|
| 245 |
+
year = year_el.text if year_el is not None else ""
|
| 246 |
+
results.append({"source":"PubMed","title":str(title),"year":year,"url":"https://pubmed.ncbi.nlm.nih.gov/"+pmid})
|
| 247 |
+
except: continue
|
| 248 |
+
except: pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
try:
|
| 250 |
+
r = requests.get("https://api.semanticscholar.org/graph/v1/paper/search",
|
| 251 |
+
params={"query":expanded,"limit":6,"fields":"title,year,url,citationCount"},timeout=12)
|
| 252 |
+
for p in r.json().get("data",[]):
|
| 253 |
+
year = p.get("year",0) or 0
|
| 254 |
+
if int(year) >= 2015:
|
| 255 |
+
results.append({"source":"Scholar","title":p.get("title",""),"year":str(year),"url":p.get("url",""),"citations":str(p.get("citationCount",0))})
|
| 256 |
+
except: pass
|
| 257 |
+
out = "QUERY: " + query + chr(10) + "AI EXPANDED: " + expanded + chr(10) + "="*45 + chr(10) + chr(10)
|
| 258 |
+
groups = {"PubMed":[],"Scholar":[]}
|
| 259 |
+
seen = set()
|
| 260 |
+
for r in results:
|
| 261 |
+
key = r["title"][:50].lower()
|
| 262 |
+
if key not in seen and r["url"]:
|
| 263 |
+
seen.add(key); groups[r["source"]].append(r)
|
| 264 |
+
for source, papers in groups.items():
|
| 265 |
+
if not papers: continue
|
| 266 |
+
out += "--- " + source + " ---" + chr(10)
|
| 267 |
+
for p in papers[:8]:
|
| 268 |
+
out += p["title"][:85] + " (" + p["year"] + ")" + chr(10)
|
| 269 |
+
out += " " + p["url"] + chr(10) + chr(10)
|
| 270 |
+
out += "--- SJSU ScholarWorks ---" + chr(10)
|
| 271 |
+
out += "https://scholarworks.sjsu.edu/do/search/?q=" + requests.utils.quote(query) + "&context=6781027"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
return out
|
| 273 |
|
| 274 |
+
# ββ CHAT βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 275 |
+
def answer_with_cardiolab_model(question, paper_context=""):
|
| 276 |
+
if not CARDIOLAB_MODEL_LOADED: return None
|
| 277 |
+
try:
|
| 278 |
+
import torch
|
| 279 |
+
system = "You are CardioLab AI for SJSU Biomedical Engineering."
|
| 280 |
+
if paper_context: system += " Use these SJSU research papers: " + paper_context[:400]
|
| 281 |
+
prompt = "<|system|>" + system + "</s><|user|>" + question + "</s><|assistant|>"
|
| 282 |
+
inputs = CARDIOLAB_TOKENIZER(prompt, return_tensors="pt", truncation=True, max_length=512)
|
| 283 |
+
device = next(CARDIOLAB_LLM.parameters()).device
|
| 284 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 285 |
+
with torch.no_grad():
|
| 286 |
+
outputs = CARDIOLAB_LLM.generate(**inputs, max_new_tokens=200, do_sample=True,
|
| 287 |
+
temperature=0.3, pad_token_id=CARDIOLAB_TOKENIZER.eos_token_id)
|
| 288 |
+
response = CARDIOLAB_TOKENIZER.decode(outputs[0], skip_special_tokens=True)
|
| 289 |
+
if "<|assistant|>" in response:
|
| 290 |
+
answer = response.split("<|assistant|>")[-1].strip()
|
| 291 |
+
else:
|
| 292 |
+
answer = response[-300:].strip()
|
| 293 |
+
return answer if len(answer) > 20 else None
|
| 294 |
+
except Exception as e:
|
| 295 |
+
print("CardioLab model error: " + str(e))
|
| 296 |
+
return None
|
| 297 |
|
| 298 |
def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
| 299 |
if not message.strip(): return "", history
|
| 300 |
paper_context, paper_results = search_papers(message, n=4)
|
|
|
|
|
|
|
| 301 |
if chat_model == "CardioLab Fine-tuned (SJSU)" and CARDIOLAB_MODEL_LOADED:
|
| 302 |
answer = answer_with_cardiolab_model(message, paper_context)
|
| 303 |
if answer:
|
| 304 |
if paper_results:
|
| 305 |
unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
|
| 306 |
+
answer += chr(10) + chr(10) + "Sources from SJSU CardioLab papers:"
|
| 307 |
for p in unique_papers[:3]:
|
| 308 |
+
answer += chr(10) + " - " + p.replace(".pdf","").replace("_"," ")
|
| 309 |
pubmed = get_pubmed_chat(message, n=2)
|
| 310 |
+
if pubmed: answer += chr(10) + "PubMed: " + pubmed
|
| 311 |
history.append({"role":"user","content":message})
|
| 312 |
+
history.append({"role":"assistant","content":"[CardioLab Fine-tuned] " + answer})
|
| 313 |
return "", history
|
|
|
|
|
|
|
| 314 |
if not GROQ_KEY:
|
| 315 |
history.append({"role":"user","content":message})
|
| 316 |
history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings."})
|
|
|
|
| 320 |
client = Groq(api_key=GROQ_KEY)
|
| 321 |
if paper_context:
|
| 322 |
system_prompt = ("You are CardioLab AI for SJSU Biomedical Engineering. "
|
| 323 |
+
"Answer using SJSU CardioLab research papers below. Cite paper names with specific data." +
|
| 324 |
+
chr(10) + chr(10) + "SJSU CARDIOLAB PAPERS:" + chr(10) + paper_context +
|
| 325 |
+
chr(10) + chr(10) + "ADDITIONAL KNOWLEDGE: " + KNOWHOW)
|
|
|
|
| 326 |
else:
|
| 327 |
+
system_prompt = "You are CardioLab AI for SJSU Biomedical Engineering. Expert in MHV MCL PIV TGT uPAD CKD FSI. " + KNOWHOW
|
| 328 |
msgs = [{"role":"system","content":system_prompt}]
|
| 329 |
for item in history:
|
| 330 |
if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
|
|
|
|
| 333 |
answer = resp.choices[0].message.content
|
| 334 |
if paper_results:
|
| 335 |
unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
|
| 336 |
+
answer += chr(10) + chr(10) + "Sources from SJSU CardioLab papers:"
|
| 337 |
for p in unique_papers[:3]:
|
| 338 |
+
answer += chr(10) + " - " + p.replace(".pdf","").replace("_"," ")
|
| 339 |
pubmed = get_pubmed_chat(message, n=2)
|
| 340 |
+
if pubmed: answer += chr(10) + "PubMed: " + pubmed
|
| 341 |
history.append({"role":"user","content":message})
|
| 342 |
history.append({"role":"assistant","content":answer})
|
| 343 |
return "", history
|
| 344 |
except Exception as e:
|
| 345 |
history.append({"role":"user","content":message})
|
| 346 |
+
history.append({"role":"assistant","content":"Error: " + str(e)})
|
| 347 |
return "", history
|
| 348 |
|
| 349 |
def voice_chat(audio, history):
|
|
|
|
| 355 |
with open(audio, "rb") as f:
|
| 356 |
tx = client.audio.transcriptions.create(file=("audio.wav", f, "audio/wav"), model="whisper-large-v3")
|
| 357 |
paper_context, _ = search_papers(tx.text, n=3)
|
| 358 |
+
system = "You are CardioLab AI. " + KNOWHOW
|
| 359 |
+
if paper_context: system = "You are CardioLab AI. Use these SJSU papers:" + chr(10) + paper_context + chr(10) + KNOWHOW
|
| 360 |
msgs = [{"role":"system","content":system}]
|
| 361 |
for item in history:
|
| 362 |
if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
|
| 363 |
msgs.append({"role":"user","content":tx.text})
|
| 364 |
+
resp = client.chat.completions.create(model="llama-3.3-70b-versatile", messages=msgs, max_tokens=500)
|
| 365 |
+
history.append({"role":"user","content":"Voice: " + tx.text})
|
| 366 |
history.append({"role":"assistant","content":resp.choices[0].message.content})
|
| 367 |
return history
|
| 368 |
except Exception as e:
|
| 369 |
+
history.append({"role":"assistant","content":"Voice error: " + str(e)})
|
| 370 |
return history
|
| 371 |
|
| 372 |
+
# ββ PHASE D: PROTOCOL GENERATOR + REPORT WRITER + HYPOTHESIS ββββββ
|
| 373 |
+
def generate_protocol(experiment_type, specific_params):
|
| 374 |
+
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 375 |
+
if not experiment_type: return "Please select an experiment type."
|
| 376 |
try:
|
| 377 |
client = Groq(api_key=GROQ_KEY)
|
| 378 |
+
paper_context, _ = search_papers(experiment_type, n=4)
|
| 379 |
+
lab_context = {
|
| 380 |
+
"MCL": "Sylgard 184 PDMS 10:1 ratio 48hr cure. Tygon tubing. 70bpm 5L/min 80-120mmHg.",
|
| 381 |
+
"PIV": "Green laser 532nm time-resolved. Normal velocity 0.5-2.0 m/s. Shear below 5 Pa.",
|
| 382 |
+
"TGT": "Arduino Uno stepper motor 48V. 150mL fresh blood. Sample at 0 20 40 60 min. Heska HT5.",
|
| 383 |
+
"uPAD": "Whatman filter paper. Wax printer 120C. Picric acid alkaline solution. Jaffe reaction.",
|
| 384 |
+
"FSI": "COMSOL Multiphysics ALE mesh. Blood 1060 kg/m3 0.0035 Pa.s. SJM bileaflet geometry.",
|
| 385 |
+
}
|
| 386 |
+
extra = next((v for k, v in lab_context.items() if k.lower() in experiment_type.lower()), "")
|
| 387 |
+
system_msg = ("You are CardioLab AI protocol generator for SJSU Biomedical Engineering. "
|
| 388 |
+
"Generate a COMPLETE detailed lab protocol with these sections: "
|
| 389 |
+
"1. OBJECTIVE "
|
| 390 |
+
"2. MATERIALS AND EQUIPMENT with exact quantities "
|
| 391 |
+
"3. SAFETY CONSIDERATIONS "
|
| 392 |
+
"4. STEP-BY-STEP PROCEDURE numbered and detailed "
|
| 393 |
+
"5. DATA COLLECTION "
|
| 394 |
+
"6. ANALYSIS METHOD "
|
| 395 |
+
"7. EXPECTED RESULTS with normal ranges "
|
| 396 |
+
"8. TROUBLESHOOTING "
|
| 397 |
+
"Use exact SJSU CardioLab values and equipment.")
|
| 398 |
+
user_msg = "Generate complete protocol for: " + experiment_type
|
| 399 |
+
if specific_params and specific_params.strip():
|
| 400 |
+
user_msg += chr(10) + "Parameters: " + specific_params
|
| 401 |
+
if extra:
|
| 402 |
+
user_msg += chr(10) + "CardioLab context: " + extra
|
| 403 |
+
if paper_context:
|
| 404 |
+
user_msg += chr(10) + "From SJSU papers: " + paper_context[:600]
|
| 405 |
+
resp = client.chat.completions.create(
|
| 406 |
+
model="llama-3.3-70b-versatile",
|
| 407 |
+
messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
|
| 408 |
+
max_tokens=1200)
|
| 409 |
+
return resp.choices[0].message.content
|
| 410 |
+
except Exception as e:
|
| 411 |
+
return "Error generating protocol: " + str(e)
|
| 412 |
|
| 413 |
+
def generate_report(data_description, experiment_type, results):
|
| 414 |
+
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 415 |
+
if not experiment_type: return "Please select a study type."
|
|
|
|
| 416 |
try:
|
| 417 |
+
client = Groq(api_key=GROQ_KEY)
|
| 418 |
+
paper_context, _ = search_papers(experiment_type, n=3)
|
| 419 |
+
system_msg = ("You are CardioLab AI report writer for SJSU Biomedical Engineering. "
|
| 420 |
+
"Generate a professional research report with these sections: "
|
| 421 |
+
"1. ABSTRACT 150 words "
|
| 422 |
+
"2. INTRODUCTION background and objectives "
|
| 423 |
+
"3. MATERIALS AND METHODS "
|
| 424 |
+
"4. RESULTS AND DISCUSSION "
|
| 425 |
+
"5. CONCLUSION "
|
| 426 |
+
"6. RECOMMENDATIONS "
|
| 427 |
+
"7. REFERENCES cite SJSU CardioLab papers "
|
| 428 |
+
"Use specific values. Write in professional academic style.")
|
| 429 |
+
user_msg = "Write research report for: " + experiment_type
|
| 430 |
+
if data_description and data_description.strip():
|
| 431 |
+
user_msg += chr(10) + "Description: " + data_description
|
| 432 |
+
if results and results.strip():
|
| 433 |
+
user_msg += chr(10) + "Results: " + results
|
| 434 |
+
if paper_context:
|
| 435 |
+
user_msg += chr(10) + "SJSU papers: " + paper_context[:600]
|
| 436 |
+
resp = client.chat.completions.create(
|
| 437 |
+
model="llama-3.3-70b-versatile",
|
| 438 |
+
messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
|
| 439 |
+
max_tokens=1500)
|
| 440 |
+
return resp.choices[0].message.content
|
| 441 |
+
except Exception as e:
|
| 442 |
+
return "Error generating report: " + str(e)
|
| 443 |
+
|
| 444 |
+
def generate_hypothesis(research_area, current_findings):
|
| 445 |
+
if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
|
| 446 |
+
if not research_area: return "Please select a research area."
|
| 447 |
try:
|
| 448 |
+
client = Groq(api_key=GROQ_KEY)
|
| 449 |
+
paper_context, _ = search_papers(research_area, n=3)
|
| 450 |
+
system_msg = ("You are CardioLab AI research assistant for SJSU Biomedical Engineering. "
|
| 451 |
+
"Generate 3 specific testable research hypotheses. For each provide: "
|
| 452 |
+
"H0 null hypothesis, "
|
| 453 |
+
"H1 alternative hypothesis, "
|
| 454 |
+
"Scientific rationale, "
|
| 455 |
+
"Suggested experiment, "
|
| 456 |
+
"Expected outcome and measurable metrics. "
|
| 457 |
+
"Base on SJSU CardioLab research.")
|
| 458 |
+
user_msg = "Generate hypotheses for: " + research_area
|
| 459 |
+
if current_findings and current_findings.strip():
|
| 460 |
+
user_msg += chr(10) + "Current findings: " + current_findings
|
| 461 |
+
if paper_context:
|
| 462 |
+
user_msg += chr(10) + "SJSU papers: " + paper_context[:500]
|
| 463 |
+
resp = client.chat.completions.create(
|
| 464 |
+
model="llama-3.3-70b-versatile",
|
| 465 |
+
messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
|
| 466 |
+
max_tokens=1000)
|
| 467 |
+
return resp.choices[0].message.content
|
| 468 |
+
except Exception as e:
|
| 469 |
+
return "Error: " + str(e)
|
|
|
|
|
|
|
| 470 |
|
| 471 |
+
# ββ ANALYSIS TOOLS βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 472 |
def analyze_upad_photo(image):
|
| 473 |
if image is None: return None, "Upload a uPAD photo first."
|
| 474 |
try:
|
|
|
|
| 514 |
if vc:
|
| 515 |
ax.plot(xv,df[vc],color="#c1121f",linewidth=2.5,marker="o",markersize=5)
|
| 516 |
ax.fill_between(xv,df[vc],alpha=0.15,color="#c1121f")
|
| 517 |
+
ax.axhline(y=2.0,color="#f59e0b",linestyle="--",linewidth=2,label="Risk 2.0 m/s")
|
| 518 |
ax.set_ylabel("Velocity (m/s)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 519 |
def ps(ax):
|
| 520 |
if sc2:
|
|
|
|
| 522 |
ax.plot(xp,df[sc2],color="#0057a8",linewidth=2.5,marker="s",markersize=5)
|
| 523 |
ax.fill_between(xp,df[sc2],alpha=0.15,color="#0057a8")
|
| 524 |
ax.axhline(y=5,color="#f59e0b",linestyle="--",linewidth=2,label="Caution 5 Pa")
|
| 525 |
+
ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2,label="Risk 10 Pa")
|
| 526 |
ax.set_ylabel("Shear (Pa)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 527 |
def psc(ax):
|
| 528 |
if vc and sc2:
|
|
|
|
| 592 |
try:
|
| 593 |
client=Groq(api_key=GROQ_KEY)
|
| 594 |
resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
|
| 595 |
+
messages=[{"role":"system","content":"Hematology expert. Thrombogenicity risk."},
|
| 596 |
{"role":"user","content":"TGT:"+chr(10)+df.describe().to_string()[:500]}],max_tokens=250)
|
| 597 |
ai=chr(10)+"AI: "+resp.choices[0].message.content
|
| 598 |
except: pass
|
|
|
|
| 628 |
def piv_manual(v,s,h):
|
| 629 |
vr="HIGH-stenosis" if float(v)>2.0 else "NORMAL"
|
| 630 |
sr="HIGH-thrombosis" if float(s)>10 else "ELEVATED" if float(s)>5 else "NORMAL"
|
| 631 |
+
return "Velocity: "+str(v)+" m/s - "+vr+chr(10)+"Shear: "+str(s)+" Pa - "+sr+chr(10)+"HR: "+str(h)+" bpm"
|
| 632 |
|
| 633 |
def tgt_manual(t,p,h,pl,tm):
|
| 634 |
risk=sum([float(t)>15,float(p)>2.0,float(h)>50,float(pl)<150])
|
| 635 |
return "TAT:"+str(t)+" PF1.2:"+str(p)+chr(10)+"Hemo:"+str(h)+" Plt:"+str(pl)+chr(10)+"RESULT: "+("HIGH RISK" if risk>=3 else "MODERATE" if risk>=2 else "LOW RISK")
|
| 636 |
|
| 637 |
+
# ββ UI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 638 |
with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
| 639 |
gr.HTML(HEADER)
|
| 640 |
|
| 641 |
papers_count = len(set(m["paper"] for m in METADATA)) if PAPERS_LOADED else 0
|
| 642 |
+
model_status = "Fine-tuned Model LOADED" if CARDIOLAB_MODEL_LOADED else "Fine-tuned model loading..."
|
| 643 |
+
rag_status = "RAG: " + str(len(CHUNKS)) + " chunks from " + str(papers_count) + " SJSU papers" if PAPERS_LOADED else "RAG: loading..."
|
| 644 |
+
gr.HTML("<div style='background:#1a7340;color:white;text-align:center;padding:7px;font-size:0.82em;font-weight:700;'>" + rag_status + " | " + model_status + " | Select CardioLab Fine-tuned in Model dropdown!</div>")
|
|
|
|
| 645 |
|
| 646 |
with gr.Tabs():
|
| 647 |
+
|
| 648 |
with gr.Tab("Chat"):
|
| 649 |
with gr.Row():
|
| 650 |
with gr.Column(scale=1, min_width=200):
|
| 651 |
+
gr.HTML("<div style='background:#202123;padding:10px;border-radius:8px;margin-bottom:6px;'><div style='color:#e8a020;font-weight:700;font-size:0.85em;'>SJSU CARDIOLAB</div><div style='color:#9ca3af;font-size:0.7em;'>Conversations</div></div>")
|
|
|
|
|
|
|
| 652 |
new_chat_btn = gr.Button("New Chat", variant="secondary")
|
| 653 |
session_dropdown = gr.Dropdown(choices=get_session_list(), label="Saved Sessions", interactive=True)
|
| 654 |
load_btn = gr.Button("Load Session", variant="primary")
|
|
|
|
| 683 |
voice_clear.click(lambda: [], outputs=voice_chatbot)
|
| 684 |
|
| 685 |
with gr.Tab("Papers"):
|
| 686 |
+
gr.Markdown("### Search PubMed + Semantic Scholar + SJSU ScholarWorks")
|
| 687 |
with gr.Row():
|
| 688 |
search_input = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve thrombogenicity hemodynamics", label="Research Topic", scale=3)
|
| 689 |
search_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model", scale=1)
|
| 690 |
search_btn = gr.Button("Search", variant="primary", scale=1)
|
| 691 |
search_output = gr.Textbox(label="Results", lines=22)
|
| 692 |
+
search_btn.click(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 693 |
+
search_input.submit(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 694 |
|
| 695 |
with gr.Tab("PIV CSV"):
|
| 696 |
with gr.Row():
|
|
|
|
| 766 |
"uPAD Fabrication","uPAD Creatinine Test",
|
| 767 |
"FSI COMSOL Simulation","Valve Testing"],
|
| 768 |
value="TGT Blood Testing", label="Experiment Type")
|
| 769 |
+
proto_params = gr.Textbox(placeholder="e.g. 27mm SJM valve 70bpm porcine blood",
|
|
|
|
| 770 |
label="Specific Parameters", lines=2)
|
| 771 |
proto_btn = gr.Button("Generate Protocol", variant="primary")
|
| 772 |
with gr.Column(scale=2):
|
|
|
|
| 783 |
"Heart Valve Comparison"],
|
| 784 |
value="TGT Thrombogenicity Study", label="Study Type")
|
| 785 |
report_desc = gr.Textbox(
|
| 786 |
+
placeholder="e.g. TGT with 27mm SJM bileaflet at 70bpm 150mL porcine blood",
|
| 787 |
label="Experiment Description", lines=3)
|
| 788 |
report_results = gr.Textbox(
|
| 789 |
placeholder="e.g. TAT=12.3 ng/mL PF1.2=2.8 Hemo=45 Plt=142",
|
|
|
|
| 813 |
hyp_btn.click(generate_hypothesis, inputs=[hyp_area, hyp_findings], outputs=hyp_output)
|
| 814 |
|
| 815 |
gr.HTML("""<div style="text-align:center;padding:10px;border-top:1px solid #e5e7eb;background:#f9fafb;">
|
| 816 |
+
<span style="color:#9ca3af;font-size:0.75em;">CardioLab AI v38 | SJSU Biomedical Engineering | RAG + Fine-tuned + Phase D | Inspired by <a href="https://github.com/snap-stanford/Biomni" style="color:#c1121f;">Biomni Stanford</a> | Apache 2.0 | $0 Cost</span></div>""")
|
| 817 |
|
| 818 |
demo.launch()
|