Saicharan21 commited on
Commit
5a4d833
·
verified ·
1 Parent(s): 99eedec

Upload versions/app_v38_final.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. versions/app_v38_final.py +831 -0
versions/app_v38_final.py ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os, requests, io, json
3
+ import numpy as np
4
+ import pandas as pd
5
+ import matplotlib
6
+ matplotlib.use("Agg")
7
+ import matplotlib.pyplot as plt
8
+ from groq import Groq
9
+ from PIL import Image
10
+ from datetime import datetime
11
+ from huggingface_hub import HfApi, hf_hub_download
12
+
13
+ GROQ_KEY = os.environ.get("GROQ_API_KEY", "")
14
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
15
+ HISTORY_REPO = "Saicharan21/cardiolab-chat-history"
16
+ PAPERS_DB_REPO = "Saicharan21/cardiolab-papers-db"
17
+ CARDIOLAB_MODEL = "Saicharan21/CardioLab-AI-Model"
18
+
19
+ CHAT_MODELS = {
20
+ "CardioLab Fine-tuned (SJSU)": "cardiolab",
21
+ "Llama 3.3 70B (Best)": "llama-3.3-70b-versatile",
22
+ "Llama 3.1 8B (Fast)": "llama-3.1-8b-instant",
23
+ "Llama 4 Scout (New)": "meta-llama/llama-4-scout-17b-16e-instruct",
24
+ "Llama 4 Maverick": "meta-llama/llama-4-maverick-17b-128e-instruct",
25
+ }
26
+
27
+ KNOWHOW = ("MCL: Sylgard 184 PDMS 10:1 ratio 48hr cure green laser PIV 70bpm 5L/min cardiac output 80-120mmHg. "
28
+ "TGT: Arduino Uno Stepper Motor 150mL blood sampled at 0 20 40 60 minutes. "
29
+ "NORMAL RANGES: TAT below 8 ng/mL. PF1.2 below 2.0 nmol/L. Free hemoglobin below 20 mg/L. Platelets above 150 thousand per uL. "
30
+ "HIGH RISK: TAT above 15. PF1.2 above 3.0. Hemoglobin above 50. Platelets below 100. "
31
+ "uPAD: Jaffe reaction creatinine picric acid orange-red. Normal creatinine 0.6-1.2 mg/dL. Borderline 1.2-1.5. CKD above 1.5. "
32
+ "Stage2 1.5-3.0. Stage3-4 3.0-6.0. Stage5 above 6.0. "
33
+ "MHV: 27mm SJM Regent bileaflet also trileaflet monoleaflet pediatric. "
34
+ "PIV: green laser 532nm time-resolved. Normal velocity 0.5-2.0 m/s. Normal shear below 5 Pa. Risk above 10 Pa. "
35
+ "Equipment: Heska Element HT5 hematology analyzer time-resolved PIV Tygon tubing Arduino Uno stepper motor.")
36
+
37
+ CSS = """
38
+ body, .gradio-container { background: #f7f7f8 !important; font-family: -apple-system, BlinkMacSystemFont, Segoe UI, sans-serif !important; }
39
+ .tab-nav { background: #ffffff !important; border-bottom: 1px solid #e5e7eb !important; padding: 0 16px !important; display: flex !important; flex-wrap: wrap !important; }
40
+ .tab-nav button { background: transparent !important; color: #6b7280 !important; border: none !important; border-bottom: 2px solid transparent !important; padding: 10px 12px !important; font-weight: 500 !important; font-size: 0.8em !important; white-space: nowrap !important; border-radius: 0 !important; }
41
+ .tab-nav button:hover { color: #111827 !important; background: #f9fafb !important; }
42
+ .tab-nav button.selected { color: #c1121f !important; border-bottom: 2px solid #c1121f !important; font-weight: 700 !important; background: transparent !important; }
43
+ .message.user { background: #f3f4f6 !important; color: #1a202c !important; border-radius: 12px !important; }
44
+ .message.bot { background: #ffffff !important; color: #1a202c !important; border-left: 3px solid #c1121f !important; }
45
+ textarea { background: #ffffff !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 10px !important; }
46
+ button.primary { background: #c1121f !important; color: white !important; border: none !important; border-radius: 8px !important; font-weight: 600 !important; }
47
+ button.secondary { background: #f3f4f6 !important; color: #374151 !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
48
+ input[type=number] { background: #f9fafb !important; color: #1a202c !important; border: 1px solid #d1d5db !important; border-radius: 8px !important; }
49
+ """
50
+
51
+ HEADER = """<div style="background:linear-gradient(135deg,#0a0f2e 0%,#1a0a0a 100%);padding:0;border-bottom:3px solid #c1121f;overflow:hidden;">
52
+ <svg style="position:absolute;opacity:0.07;width:100%;height:100%;" viewBox="0 0 1200 120" preserveAspectRatio="none">
53
+ <polyline points="0,60 100,60 130,20 150,100 170,10 200,90 220,60 400,60 430,20 450,100 470,10 500,90 520,60 700,60 730,20 750,100 770,10 800,90 820,60 1000,60 1030,20 1050,100 1070,10 1100,90 1120,60 1200,60" fill="none" stroke="#c1121f" stroke-width="3"/>
54
+ </svg>
55
+ <div style="max-width:1200px;margin:0 auto;padding:16px 24px;display:flex;align-items:center;justify-content:space-between;position:relative;z-index:1;">
56
+ <div style="display:flex;align-items:center;gap:14px;">
57
+ <svg width="55" height="55" viewBox="0 0 100 100"><circle cx="50" cy="35" r="28" fill="#0057a8" opacity="0.9"/><ellipse cx="50" cy="14" rx="22" ry="10" fill="#0057a8"/>
58
+ <polygon points="30,14 33,4 36,14" fill="#e8a020"/><polygon points="36,12 39,2 42,12" fill="#e8a020"/>
59
+ <polygon points="42,11 45,1 48,11" fill="#e8a020"/><polygon points="48,11 51,1 54,11" fill="#e8a020"/>
60
+ <polygon points="54,12 57,2 60,12" fill="#e8a020"/><polygon points="60,14 63,4 66,14" fill="#e8a020"/>
61
+ <rect x="36" y="30" width="28" height="22" rx="4" fill="#0057a8"/><rect x="40" y="35" width="8" height="12" rx="2" fill="#e8a020"/>
62
+ <rect x="34" y="50" width="32" height="8" rx="4" fill="#0057a8"/></svg>
63
+ <div><div style="color:#9ca3af;font-size:0.7em;letter-spacing:2px;text-transform:uppercase;">San Jose State University</div>
64
+ <div style="color:#e8a020;font-size:0.82em;font-weight:700;">Biomedical Engineering</div></div></div>
65
+ <div style="text-align:center;flex:1;padding:0 20px;">
66
+ <div style="display:flex;align-items:center;justify-content:center;gap:10px;margin-bottom:3px;">
67
+ <svg width="100" height="28" viewBox="0 0 120 32"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg>
68
+ <div style="font-size:2em;font-weight:900;letter-spacing:2px;"><span style="color:#ffffff;">Cardio</span><span style="color:#c1121f;">Lab</span><span style="color:#ffffff;"> AI</span></div>
69
+ <svg width="100" height="28" viewBox="0 0 120 32" style="transform:scaleX(-1);"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg></div>
70
+ <div style="color:#9ca3af;font-size:0.68em;letter-spacing:2px;text-transform:uppercase;">RAG + Fine-tuned | Protocol Generator | Report Writer | BioGPT | 5 AI Models</div></div>
71
+ <div style="display:flex;align-items:center;gap:14px;">
72
+ <div style="text-align:right;"><div style="color:#9ca3af;font-size:0.68em;text-transform:uppercase;">Research Pillars</div>
73
+ <div style="color:#ffffff;font-size:0.72em;margin-top:3px;">MHV CKD FSI</div>
74
+ <div style="color:#9ca3af;font-size:0.62em;margin-top:2px;">MCL PIV TGT uPAD COMSOL</div></div>
75
+ <svg width="48" height="48" viewBox="0 0 100 90">
76
+ <path d="M50 85 C50 85 5 55 5 30 C5 15 18 5 30 5 C38 5 45 9 50 15 C55 9 62 5 70 5 C82 5 95 15 95 30 C95 55 50 85 50 85Z" fill="#c1121f" opacity="0.9"/>
77
+ <polyline points="25,45 32,45 35,35 38,55 41,30 44,50 50,45 75,45" fill="none" stroke="white" stroke-width="2.5" stroke-linecap="round" opacity="0.9"/></svg></div></div>
78
+ <div style="height:3px;background:linear-gradient(90deg,#0057a8,#c1121f,#e8a020,#c1121f,#0057a8);"></div></div>"""
79
+
80
+ # ── PAPER DATABASE ─────────────────────────────────────────────────
81
+ CHUNKS = []
82
+ METADATA = []
83
+ EMBEDDINGS = None
84
+ PAPERS_LOADED = False
85
+ EMBEDDER = None
86
+ CARDIOLAB_TOKENIZER = None
87
+ CARDIOLAB_LLM = None
88
+ CARDIOLAB_MODEL_LOADED = False
89
+
90
+ def load_papers():
91
+ global CHUNKS, METADATA, EMBEDDINGS, PAPERS_LOADED, EMBEDDER
92
+ try:
93
+ from sentence_transformers import SentenceTransformer
94
+ chunks_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="chunks.json", repo_type="dataset", token=HF_TOKEN)
95
+ meta_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="metadata.json", repo_type="dataset", token=HF_TOKEN)
96
+ emb_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="embeddings.npy", repo_type="dataset", token=HF_TOKEN)
97
+ with open(chunks_path) as f: CHUNKS = json.load(f)
98
+ with open(meta_path) as f: METADATA = json.load(f)
99
+ EMBEDDINGS = np.load(emb_path)
100
+ EMBEDDER = SentenceTransformer("all-MiniLM-L6-v2")
101
+ PAPERS_LOADED = True
102
+ print("Papers loaded: " + str(len(CHUNKS)) + " chunks")
103
+ return True
104
+ except Exception as e:
105
+ print("Paper load error: " + str(e))
106
+ return False
107
+
108
+ def load_cardiolab_model():
109
+ global CARDIOLAB_TOKENIZER, CARDIOLAB_LLM, CARDIOLAB_MODEL_LOADED
110
+ try:
111
+ import torch
112
+ from transformers import AutoModelForCausalLM, AutoTokenizer
113
+ print("Loading CardioLab fine-tuned model...")
114
+ CARDIOLAB_TOKENIZER = AutoTokenizer.from_pretrained(CARDIOLAB_MODEL, token=HF_TOKEN)
115
+ CARDIOLAB_TOKENIZER.pad_token = CARDIOLAB_TOKENIZER.eos_token
116
+ device = "cuda" if torch.cuda.is_available() else "cpu"
117
+ CARDIOLAB_LLM = AutoModelForCausalLM.from_pretrained(
118
+ CARDIOLAB_MODEL, token=HF_TOKEN,
119
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
120
+ device_map="auto" if device == "cuda" else None,
121
+ low_cpu_mem_usage=True
122
+ )
123
+ CARDIOLAB_MODEL_LOADED = True
124
+ print("CardioLab model loaded!")
125
+ return True
126
+ except Exception as e:
127
+ print("CardioLab model error: " + str(e))
128
+ return False
129
+
130
+ load_papers()
131
+ load_cardiolab_model()
132
+
133
+ def search_papers(query, n=4):
134
+ if not PAPERS_LOADED or EMBEDDINGS is None or EMBEDDER is None:
135
+ return "", []
136
+ try:
137
+ q_emb = EMBEDDER.encode([query])
138
+ norms = np.linalg.norm(EMBEDDINGS, axis=1, keepdims=True)
139
+ emb_norm = EMBEDDINGS / (norms + 1e-10)
140
+ q_norm = q_emb / (np.linalg.norm(q_emb) + 1e-10)
141
+ scores = (emb_norm @ q_norm.T).flatten()
142
+ top_idx = np.argsort(scores)[::-1][:n]
143
+ context = ""
144
+ results = []
145
+ seen = set()
146
+ for idx in top_idx:
147
+ chunk = CHUNKS[idx]
148
+ meta = METADATA[idx]
149
+ score = float(scores[idx])
150
+ if score > 0.25:
151
+ results.append({"chunk": chunk, "paper": meta["paper"], "score": score})
152
+ if meta["paper"] not in seen:
153
+ context += chr(10) + "=== FROM: " + meta["paper"] + " ===" + chr(10)
154
+ seen.add(meta["paper"])
155
+ context += chunk[:500] + chr(10)
156
+ return context, results
157
+ except Exception as e:
158
+ return "", []
159
+
160
+ # ── SESSION MANAGEMENT ─────────────────────────────────────────────
161
+ def load_all_sessions():
162
+ if not HF_TOKEN: return {}
163
+ try:
164
+ path = hf_hub_download(repo_id=HISTORY_REPO, filename="chat_history.json", repo_type="dataset", token=HF_TOKEN)
165
+ with open(path) as f: return json.load(f)
166
+ except: return {}
167
+
168
+ def save_all_sessions(sessions):
169
+ if not HF_TOKEN: return False
170
+ try:
171
+ api2 = HfApi(token=HF_TOKEN)
172
+ api2.upload_file(path_or_fileobj=json.dumps(sessions, indent=2).encode(),
173
+ path_in_repo="chat_history.json", repo_id=HISTORY_REPO,
174
+ repo_type="dataset", token=HF_TOKEN, commit_message="Update")
175
+ return True
176
+ except: return False
177
+
178
+ def get_session_list():
179
+ s = load_all_sessions()
180
+ return list(reversed(list(s.keys()))) if s else ["No saved sessions"]
181
+
182
+ def save_session(history, name):
183
+ if not history: return "Nothing to save", gr.update()
184
+ if not name or not name.strip(): name = "Chat " + datetime.now().strftime("%b %d %H:%M")
185
+ sessions = load_all_sessions()
186
+ sessions[name] = {"messages": history, "saved_at": datetime.now().isoformat()}
187
+ ok = save_all_sessions(sessions)
188
+ choices = get_session_list()
189
+ return ("Saved: " + name if ok else "Save failed"), gr.update(choices=choices, value=name)
190
+
191
+ def load_session(name):
192
+ if not name or "No saved" in name: return [], "Select a session"
193
+ sessions = load_all_sessions()
194
+ return (sessions[name]["messages"], "Loaded: " + name) if name in sessions else ([], "Not found")
195
+
196
+ def delete_session(name):
197
+ if not name or "No saved" in name: return "Select a session", gr.update()
198
+ sessions = load_all_sessions()
199
+ if name in sessions:
200
+ del sessions[name]; save_all_sessions(sessions)
201
+ choices = get_session_list()
202
+ return "Deleted: " + name, gr.update(choices=choices, value=choices[0] if choices else None)
203
+ return "Not found", gr.update()
204
+
205
+ def new_chat(): return [], "", "New chat started"
206
+
207
+ # ── SEARCH ─────────────────────────────────────────────────────────
208
+ def get_pubmed_chat(query, n=3):
209
+ try:
210
+ r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
211
+ params={"db":"pubmed","term":query+" AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR CKD)","retmax":n,"retmode":"json","sort":"date","field":"tiab"},timeout=10)
212
+ ids = r.json()["esearchresult"]["idlist"]
213
+ return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids]) if ids else ""
214
+ except: return ""
215
+
216
+ def expand_query_ai(query):
217
+ if not GROQ_KEY: return query
218
+ try:
219
+ client = Groq(api_key=GROQ_KEY)
220
+ resp = client.chat.completions.create(model="llama-3.1-8b-instant",
221
+ messages=[{"role":"system","content":"Biomedical PubMed expert. Convert to MeSH terms for heart valves hemodynamics PIV thrombogenicity FSI microfluidics CKD. Return ONLY terms."},
222
+ {"role":"user","content":"Optimize: " + query}], max_tokens=80)
223
+ return resp.choices[0].message.content.strip() or query
224
+ except: return query
225
+
226
+ def quick_search(query, search_model="Llama 3.3 70B (Best)"):
227
+ if not query.strip(): return "Please enter a topic."
228
+ expanded = expand_query_ai(query)
229
+ results = []
230
+ try:
231
+ forced = expanded + " AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR PIV OR CFD OR CKD)"
232
+ r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
233
+ params={"db":"pubmed","term":forced,"retmax":8,"retmode":"json","sort":"date","field":"tiab"},timeout=12)
234
+ ids = r.json()["esearchresult"]["idlist"]
235
+ if ids:
236
+ r2 = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi",
237
+ params={"db":"pubmed","id":",".join(ids),"retmode":"xml","rettype":"abstract"},timeout=12)
238
+ import xml.etree.ElementTree as ET
239
+ root = ET.fromstring(r2.content)
240
+ for article in root.findall(".//PubmedArticle"):
241
+ try:
242
+ title = article.find(".//ArticleTitle").text or "No title"
243
+ pmid = article.find(".//PMID").text or ""
244
+ year_el = article.find(".//PubDate/Year")
245
+ year = year_el.text if year_el is not None else ""
246
+ results.append({"source":"PubMed","title":str(title),"year":year,"url":"https://pubmed.ncbi.nlm.nih.gov/"+pmid})
247
+ except: continue
248
+ except: pass
249
+ try:
250
+ r = requests.get("https://api.semanticscholar.org/graph/v1/paper/search",
251
+ params={"query":expanded,"limit":6,"fields":"title,year,url,citationCount"},timeout=12)
252
+ for p in r.json().get("data",[]):
253
+ year = p.get("year",0) or 0
254
+ if int(year) >= 2015:
255
+ results.append({"source":"Scholar","title":p.get("title",""),"year":str(year),"url":p.get("url",""),"citations":str(p.get("citationCount",0))})
256
+ except: pass
257
+ out = "QUERY: " + query + chr(10) + "AI EXPANDED: " + expanded + chr(10) + "="*45 + chr(10) + chr(10)
258
+ groups = {"PubMed":[],"Scholar":[]}
259
+ seen = set()
260
+ for r in results:
261
+ key = r["title"][:50].lower()
262
+ if key not in seen and r["url"]:
263
+ seen.add(key); groups[r["source"]].append(r)
264
+ for source, papers in groups.items():
265
+ if not papers: continue
266
+ out += "--- " + source + " ---" + chr(10)
267
+ for p in papers[:8]:
268
+ out += p["title"][:85] + " (" + p["year"] + ")" + chr(10)
269
+ out += " " + p["url"] + chr(10) + chr(10)
270
+ out += "--- SJSU ScholarWorks ---" + chr(10)
271
+ out += "https://scholarworks.sjsu.edu/do/search/?q=" + requests.utils.quote(query) + "&context=6781027"
272
+ return out
273
+
274
+ # ── CHAT ───────────────────────────────────────────────────────────
275
+ def answer_with_cardiolab_model(question, paper_context=""):
276
+ if not CARDIOLAB_MODEL_LOADED: return None
277
+ try:
278
+ import torch
279
+ system = "You are CardioLab AI for SJSU Biomedical Engineering."
280
+ if paper_context: system += " Use these SJSU research papers: " + paper_context[:400]
281
+ prompt = "<|system|>" + system + "</s><|user|>" + question + "</s><|assistant|>"
282
+ inputs = CARDIOLAB_TOKENIZER(prompt, return_tensors="pt", truncation=True, max_length=512)
283
+ device = next(CARDIOLAB_LLM.parameters()).device
284
+ inputs = {k: v.to(device) for k, v in inputs.items()}
285
+ with torch.no_grad():
286
+ outputs = CARDIOLAB_LLM.generate(**inputs, max_new_tokens=200, do_sample=True,
287
+ temperature=0.3, pad_token_id=CARDIOLAB_TOKENIZER.eos_token_id)
288
+ response = CARDIOLAB_TOKENIZER.decode(outputs[0], skip_special_tokens=True)
289
+ if "<|assistant|>" in response:
290
+ answer = response.split("<|assistant|>")[-1].strip()
291
+ else:
292
+ answer = response[-300:].strip()
293
+ return answer if len(answer) > 20 else None
294
+ except Exception as e:
295
+ print("CardioLab model error: " + str(e))
296
+ return None
297
+
298
+ def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
299
+ if not message.strip(): return "", history
300
+ paper_context, paper_results = search_papers(message, n=4)
301
+ if chat_model == "CardioLab Fine-tuned (SJSU)" and CARDIOLAB_MODEL_LOADED:
302
+ answer = answer_with_cardiolab_model(message, paper_context)
303
+ if answer:
304
+ if paper_results:
305
+ unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
306
+ answer += chr(10) + chr(10) + "Sources from SJSU CardioLab papers:"
307
+ for p in unique_papers[:3]:
308
+ answer += chr(10) + " - " + p.replace(".pdf","").replace("_"," ")
309
+ pubmed = get_pubmed_chat(message, n=2)
310
+ if pubmed: answer += chr(10) + "PubMed: " + pubmed
311
+ history.append({"role":"user","content":message})
312
+ history.append({"role":"assistant","content":"[CardioLab Fine-tuned] " + answer})
313
+ return "", history
314
+ if not GROQ_KEY:
315
+ history.append({"role":"user","content":message})
316
+ history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings."})
317
+ return "", history
318
+ try:
319
+ model_id = CHAT_MODELS.get(chat_model, "llama-3.3-70b-versatile")
320
+ client = Groq(api_key=GROQ_KEY)
321
+ if paper_context:
322
+ system_prompt = ("You are CardioLab AI for SJSU Biomedical Engineering. "
323
+ "Answer using SJSU CardioLab research papers below. Cite paper names with specific data." +
324
+ chr(10) + chr(10) + "SJSU CARDIOLAB PAPERS:" + chr(10) + paper_context +
325
+ chr(10) + chr(10) + "ADDITIONAL KNOWLEDGE: " + KNOWHOW)
326
+ else:
327
+ system_prompt = "You are CardioLab AI for SJSU Biomedical Engineering. Expert in MHV MCL PIV TGT uPAD CKD FSI. " + KNOWHOW
328
+ msgs = [{"role":"system","content":system_prompt}]
329
+ for item in history:
330
+ if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
331
+ msgs.append({"role":"user","content":message})
332
+ resp = client.chat.completions.create(model=model_id, messages=msgs, max_tokens=800)
333
+ answer = resp.choices[0].message.content
334
+ if paper_results:
335
+ unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
336
+ answer += chr(10) + chr(10) + "Sources from SJSU CardioLab papers:"
337
+ for p in unique_papers[:3]:
338
+ answer += chr(10) + " - " + p.replace(".pdf","").replace("_"," ")
339
+ pubmed = get_pubmed_chat(message, n=2)
340
+ if pubmed: answer += chr(10) + "PubMed: " + pubmed
341
+ history.append({"role":"user","content":message})
342
+ history.append({"role":"assistant","content":answer})
343
+ return "", history
344
+ except Exception as e:
345
+ history.append({"role":"user","content":message})
346
+ history.append({"role":"assistant","content":"Error: " + str(e)})
347
+ return "", history
348
+
349
+ def voice_chat(audio, history):
350
+ if audio is None:
351
+ history.append({"role":"assistant","content":"Please record your question first."})
352
+ return history
353
+ try:
354
+ client = Groq(api_key=GROQ_KEY)
355
+ with open(audio, "rb") as f:
356
+ tx = client.audio.transcriptions.create(file=("audio.wav", f, "audio/wav"), model="whisper-large-v3")
357
+ paper_context, _ = search_papers(tx.text, n=3)
358
+ system = "You are CardioLab AI. " + KNOWHOW
359
+ if paper_context: system = "You are CardioLab AI. Use these SJSU papers:" + chr(10) + paper_context + chr(10) + KNOWHOW
360
+ msgs = [{"role":"system","content":system}]
361
+ for item in history:
362
+ if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
363
+ msgs.append({"role":"user","content":tx.text})
364
+ resp = client.chat.completions.create(model="llama-3.3-70b-versatile", messages=msgs, max_tokens=500)
365
+ history.append({"role":"user","content":"Voice: " + tx.text})
366
+ history.append({"role":"assistant","content":resp.choices[0].message.content})
367
+ return history
368
+ except Exception as e:
369
+ history.append({"role":"assistant","content":"Voice error: " + str(e)})
370
+ return history
371
+
372
+ # ── PHASE D: PROTOCOL GENERATOR + REPORT WRITER + HYPOTHESIS ──────
373
+ def generate_protocol(experiment_type, specific_params):
374
+ # CRITICAL DEFINITIONS - never interpret these wrong
375
+ DEFINITIONS = (
376
+ "CRITICAL: TGT = Thrombogenicity Tester device. "
377
+ "TGT measures blood CLOTTING and THROMBOSIS using Arduino Uno stepper motor rotating blood samples. "
378
+ "TGT does NOT measure glucose. TGT biomarkers are TAT PF1.2 free hemoglobin platelets. "
379
+ "TAT = Thrombin-Antithrombin complex normal below 8 ng/mL. "
380
+ "PF1.2 = Prothrombin Fragment 1.2 normal below 2.0 nmol/L. "
381
+ "Free hemoglobin normal below 20 mg/L. Platelet count normal above 150 thousand per uL. "
382
+ "MCL = Mock Circulatory Loop cardiovascular simulation. "
383
+ "PIV = Particle Image Velocimetry laser flow measurement. "
384
+ "uPAD = microfluidic Paper Analytical Device for creatinine kidney disease detection. "
385
+ )
386
+ experiment_type = experiment_type # use as is
387
+ if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
388
+ if not experiment_type: return "Please select an experiment type."
389
+ try:
390
+ client = Groq(api_key=GROQ_KEY)
391
+ paper_context, _ = search_papers(experiment_type, n=4)
392
+ lab_context = {
393
+ "MCL": "Sylgard 184 PDMS 10:1 ratio 48hr cure. Tygon tubing. 70bpm 5L/min 80-120mmHg.",
394
+ "PIV": "Green laser 532nm time-resolved. Normal velocity 0.5-2.0 m/s. Shear below 5 Pa.",
395
+ "Thrombogenicity": "Arduino Uno stepper motor 48V. 150mL fresh blood. Sample at 0 20 40 60 min. Heska HT5. Measures TAT PF1.2 free hemoglobin platelets. TAT normal below 8 ng/mL. PF1.2 normal below 2.0 nmol/L.",
396
+ "uPAD": "Whatman filter paper. Wax printer 120C. Picric acid alkaline solution. Jaffe reaction.",
397
+ "FSI": "COMSOL Multiphysics ALE mesh. Blood 1060 kg/m3 0.0035 Pa.s. SJM bileaflet geometry.",
398
+ }
399
+ extra = next((v for k, v in lab_context.items() if k.lower() in experiment_type.lower()), "")
400
+ system_msg = ("You are CardioLab AI protocol generator for SJSU Biomedical Engineering. "
401
+ "Generate a COMPLETE detailed lab protocol with these sections: "
402
+ "1. OBJECTIVE "
403
+ "2. MATERIALS AND EQUIPMENT with exact quantities "
404
+ "3. SAFETY CONSIDERATIONS "
405
+ "4. STEP-BY-STEP PROCEDURE numbered and detailed "
406
+ "5. DATA COLLECTION "
407
+ "6. ANALYSIS METHOD "
408
+ "7. EXPECTED RESULTS with normal ranges "
409
+ "8. TROUBLESHOOTING "
410
+ "Use exact SJSU CardioLab values and equipment.")
411
+ user_msg = "Generate complete protocol for: " + experiment_type
412
+ if specific_params and specific_params.strip():
413
+ user_msg += chr(10) + "Parameters: " + specific_params
414
+ if extra:
415
+ user_msg += chr(10) + "CardioLab context: " + extra
416
+ if paper_context:
417
+ user_msg += chr(10) + "From SJSU papers: " + paper_context[:600]
418
+ resp = client.chat.completions.create(
419
+ model="llama-3.3-70b-versatile",
420
+ messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
421
+ max_tokens=1200)
422
+ return resp.choices[0].message.content
423
+ except Exception as e:
424
+ return "Error generating protocol: " + str(e)
425
+
426
+ def generate_report(data_description, experiment_type, results):
427
+ if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
428
+ if not experiment_type: return "Please select a study type."
429
+ try:
430
+ client = Groq(api_key=GROQ_KEY)
431
+ paper_context, _ = search_papers(experiment_type, n=3)
432
+ system_msg = ("You are CardioLab AI report writer for SJSU Biomedical Engineering. "
433
+ "Generate a professional research report with these sections: "
434
+ "1. ABSTRACT 150 words "
435
+ "2. INTRODUCTION background and objectives "
436
+ "3. MATERIALS AND METHODS "
437
+ "4. RESULTS AND DISCUSSION "
438
+ "5. CONCLUSION "
439
+ "6. RECOMMENDATIONS "
440
+ "7. REFERENCES cite SJSU CardioLab papers "
441
+ "Use specific values. Write in professional academic style.")
442
+ user_msg = "Write research report for: " + experiment_type
443
+ if data_description and data_description.strip():
444
+ user_msg += chr(10) + "Description: " + data_description
445
+ if results and results.strip():
446
+ user_msg += chr(10) + "Results: " + results
447
+ if paper_context:
448
+ user_msg += chr(10) + "SJSU papers: " + paper_context[:600]
449
+ resp = client.chat.completions.create(
450
+ model="llama-3.3-70b-versatile",
451
+ messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
452
+ max_tokens=1500)
453
+ return resp.choices[0].message.content
454
+ except Exception as e:
455
+ return "Error generating report: " + str(e)
456
+
457
+ def generate_hypothesis(research_area, current_findings):
458
+ if not GROQ_KEY: return "Error: Add GROQ_API_KEY to Space Settings."
459
+ if not research_area: return "Please select a research area."
460
+ try:
461
+ client = Groq(api_key=GROQ_KEY)
462
+ paper_context, _ = search_papers(research_area, n=3)
463
+ system_msg = ("You are CardioLab AI research assistant for SJSU Biomedical Engineering. "
464
+ "Generate 3 specific testable research hypotheses. For each provide: "
465
+ "H0 null hypothesis, "
466
+ "H1 alternative hypothesis, "
467
+ "Scientific rationale, "
468
+ "Suggested experiment, "
469
+ "Expected outcome and measurable metrics. "
470
+ "Base on SJSU CardioLab research.")
471
+ user_msg = "Generate hypotheses for: " + research_area
472
+ if current_findings and current_findings.strip():
473
+ user_msg += chr(10) + "Current findings: " + current_findings
474
+ if paper_context:
475
+ user_msg += chr(10) + "SJSU papers: " + paper_context[:500]
476
+ resp = client.chat.completions.create(
477
+ model="llama-3.3-70b-versatile",
478
+ messages=[{"role":"system","content":system_msg},{"role":"user","content":user_msg}],
479
+ max_tokens=1000)
480
+ return resp.choices[0].message.content
481
+ except Exception as e:
482
+ return "Error: " + str(e)
483
+
484
+ # ── ANALYSIS TOOLS ─────────────────────────────────────────────────
485
+ def analyze_upad_photo(image):
486
+ if image is None: return None, "Upload a uPAD photo first."
487
+ try:
488
+ img = Image.fromarray(image) if not isinstance(image, Image.Image) else image
489
+ arr = np.array(img); h,w = arr.shape[:2]
490
+ y1,y2,x1,x2 = int(h*0.35),int(h*0.65),int(w*0.35),int(w*0.65)
491
+ zone = arr[y1:y2,x1:x2]
492
+ R,G,B = float(np.mean(zone[:,:,0])),float(np.mean(zone[:,:,1])),float(np.mean(zone[:,:,2]))
493
+ c = max(0,round(0.018*(R-B)-0.3,2))
494
+ if c<1.2: s,a="Normal","Monitor annually."
495
+ elif c<1.5: s,a="Borderline","Repeat in 3 months."
496
+ elif c<3.0: s,a="Stage 2 CKD","Consult nephrologist."
497
+ elif c<6.0: s,a="Stage 3-4 CKD","Immediate consultation."
498
+ else: s,a="Stage 5 CKD","Emergency care."
499
+ ri=img.copy()
500
+ import PIL.ImageDraw as D; D.Draw(ri).rectangle([x1,y1,x2,y2],outline=(0,255,0),width=3)
501
+ return ri,("uPAD ANALYSIS"+chr(10)+"R:"+str(round(R,1))+" G:"+str(round(G,1))+" B:"+str(round(B,1))+chr(10)+"Creatinine: "+str(c)+" mg/dL"+chr(10)+"Stage: "+s+chr(10)+"Action: "+a)
502
+ except Exception as e: return None,"Error: "+str(e)
503
+
504
+ def mk_chart(fn,title,bg,fg,gc,ac,pb):
505
+ fig2,ax=plt.subplots(figsize=(8,5)); fig2.patch.set_facecolor(bg); ax.set_facecolor(pb)
506
+ fn(ax); ax.set_title(title,color=fg,fontweight="bold",fontsize=13,pad=8)
507
+ ax.tick_params(colors=ac,labelsize=10); ax.grid(True,alpha=0.3,color=gc,linestyle="--")
508
+ for sp in ["top","right"]: ax.spines[sp].set_visible(False)
509
+ for sp in ["bottom","left"]: ax.spines[sp].set_color(gc)
510
+ plt.tight_layout(); buf=io.BytesIO(); plt.savefig(buf,format="png",facecolor=bg,bbox_inches="tight",dpi=130); buf.seek(0)
511
+ res=Image.open(buf).copy(); plt.close(); return res
512
+
513
+ def analyze_piv_csv(file,theme="White"):
514
+ if file is None: return None,None,None,None,"Upload PIV CSV first."
515
+ try:
516
+ df=pd.read_csv(file.name); cols=[c.lower().strip() for c in df.columns]; df.columns=cols
517
+ num_cols=df.select_dtypes(include=[np.number]).columns.tolist()
518
+ if not num_cols: return None,None,None,None,"No numeric columns."
519
+ bg="#fff" if theme=="White" else "#0a1628"; fg="#1a202c" if theme=="White" else "white"
520
+ gc="#e2e8f0" if theme=="White" else "#2d4a8a"; ac="#4a5568" if theme=="White" else "#a8b2d8"
521
+ pb="#f7fafc" if theme=="White" else "#132340"
522
+ x=np.arange(len(df))
523
+ vc=next((c for c in cols if any(k in c for k in ["vel","speed","v_mag"])),num_cols[0] if num_cols else None)
524
+ sc2=next((c for c in cols if any(k in c for k in ["shear","stress","tau","wss"])),num_cols[1] if len(num_cols)>1 else None)
525
+ tc=next((c for c in cols if "time" in c or "frame" in c),None); xv=df[tc] if tc else x
526
+ def pv(ax):
527
+ if vc:
528
+ ax.plot(xv,df[vc],color="#c1121f",linewidth=2.5,marker="o",markersize=5)
529
+ ax.fill_between(xv,df[vc],alpha=0.15,color="#c1121f")
530
+ ax.axhline(y=2.0,color="#f59e0b",linestyle="--",linewidth=2,label="Risk 2.0 m/s")
531
+ ax.set_ylabel("Velocity (m/s)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
532
+ def ps(ax):
533
+ if sc2:
534
+ xp=xv.values if tc else x
535
+ ax.plot(xp,df[sc2],color="#0057a8",linewidth=2.5,marker="s",markersize=5)
536
+ ax.fill_between(xp,df[sc2],alpha=0.15,color="#0057a8")
537
+ ax.axhline(y=5,color="#f59e0b",linestyle="--",linewidth=2,label="Caution 5 Pa")
538
+ ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2,label="Risk 10 Pa")
539
+ ax.set_ylabel("Shear (Pa)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
540
+ def psc(ax):
541
+ if vc and sc2:
542
+ s3=ax.scatter(df[vc],df[sc2],c=x,cmap="RdYlGn_r",s=90,edgecolors=fg,linewidth=0.5,zorder=5)
543
+ cb=plt.colorbar(s3,ax=ax,label="Time"); cb.ax.yaxis.label.set_color(fg); cb.ax.tick_params(colors=ac)
544
+ ax.axvline(x=2.0,color="#f59e0b",linestyle="--",linewidth=2); ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2)
545
+ ax.set_xlabel("Velocity (m/s)",color=ac); ax.set_ylabel("Shear (Pa)",color=ac)
546
+ def psum(ax):
547
+ ax.axis("off"); risk=[]
548
+ st="CLINICAL SUMMARY"+chr(10)+"="*20+chr(10)+chr(10)
549
+ for col in num_cols[:3]:
550
+ mn=round(df[col].mean(),3); mx=round(df[col].max(),3)
551
+ st+=col[:14]+":"+chr(10)+" Mean: "+str(mn)+chr(10)+" Max: "+str(mx)+chr(10)+chr(10)
552
+ if "vel" in col and mx>2.0: risk.append("HIGH VELOCITY")
553
+ if "shear" in col and mx>10: risk.append("HIGH SHEAR")
554
+ bc="#c1121f" if risk else "#2ecc71"
555
+ st+="="*20+chr(10)+("OVERALL: HIGH RISK" if risk else "OVERALL: LOW RISK")
556
+ ax.text(0.05,0.97,st,transform=ax.transAxes,color=fg,fontsize=10,va="top",fontfamily="monospace",
557
+ bbox=dict(boxstyle="round,pad=0.8",facecolor=pb,edgecolor=bc,linewidth=2.5))
558
+ i1=mk_chart(pv,"Velocity Profile",bg,fg,gc,ac,pb); i2=mk_chart(ps,"Wall Shear Stress",bg,fg,gc,ac,pb)
559
+ i3=mk_chart(psc,"Velocity vs Shear",bg,fg,gc,ac,pb); i4=mk_chart(psum,"Clinical Summary",bg,fg,gc,ac,pb)
560
+ ai=""
561
+ if GROQ_KEY:
562
+ try:
563
+ client=Groq(api_key=GROQ_KEY)
564
+ resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
565
+ messages=[{"role":"system","content":"PIV expert SJSU CardioLab."},
566
+ {"role":"user","content":"PIV from 27mm SJM Regent:"+chr(10)+df.describe().to_string()[:500]}],max_tokens=250)
567
+ ai=chr(10)+"AI: "+resp.choices[0].message.content
568
+ except: pass
569
+ return i1,i2,i3,i4,"PIV: "+str(len(df))+" rows"+ai
570
+ except Exception as e: return None,None,None,None,"Error: "+str(e)
571
+
572
+ def analyze_tgt_csv(file,theme="White"):
573
+ if file is None: return None,None,None,None,"Upload TGT CSV first."
574
+ try:
575
+ df=pd.read_csv(file.name); cols=[c.lower().strip() for c in df.columns]; df.columns=cols
576
+ num_cols=df.select_dtypes(include=[np.number]).columns.tolist()
577
+ bg="#fff" if theme=="White" else "#0a1628"; fg="#1a202c" if theme=="White" else "white"
578
+ gc="#e2e8f0" if theme=="White" else "#2d4a8a"; ac="#4a5568" if theme=="White" else "#a8b2d8"
579
+ pb="#f7fafc" if theme=="White" else "#132340"
580
+ tc=next((c for c in cols if "time" in c or "min" in c),None)
581
+ tatc=next((c for c in cols if "tat" in c),num_cols[0] if num_cols else None)
582
+ pfc=next((c for c in cols if "pf" in c),num_cols[1] if len(num_cols)>1 else None)
583
+ hc=next((c for c in cols if "hemo" in c),num_cols[2] if len(num_cols)>2 else None)
584
+ plc=next((c for c in cols if "platelet" in c or "plt" in c),num_cols[3] if len(num_cols)>3 else None)
585
+ def mk2(dc,color,yl,lim,ll,title,bar=False):
586
+ def fn(ax):
587
+ if dc and dc in df.columns:
588
+ xp=df[tc].values if tc else range(len(df)); yp=df[dc].values
589
+ if bar:
590
+ bs=ax.bar(range(len(yp)),yp,color=color,alpha=0.85,edgecolor=bg,width=0.6)
591
+ for b,v in zip(bs,yp): ax.text(b.get_x()+b.get_width()/2,b.get_height()+0.5,str(round(v,1)),ha="center",va="bottom",color=fg,fontsize=10,fontweight="bold")
592
+ else:
593
+ ax.plot(xp,yp,color=color,linewidth=3,marker="o",markersize=8)
594
+ ax.fill_between(xp,yp,alpha=0.15,color=color)
595
+ for xi,yi in zip(xp,yp): ax.annotate(str(round(yi,1)),(xi,yi),textcoords="offset points",xytext=(0,10),ha="center",color=fg,fontsize=10,fontweight="bold")
596
+ ax.axhline(y=lim,color="#f59e0b",linestyle="--",linewidth=2.5,label=ll)
597
+ ax.legend(fontsize=10,labelcolor=fg,facecolor=pb); ax.set_ylabel(yl,color=ac)
598
+ mv=round(float(np.max(yp)),2)
599
+ ax.set_title(title+chr(10)+"Max: "+str(mv)+" - "+("HIGH" if mv>lim else "NORMAL"),color=fg,fontweight="bold",fontsize=12)
600
+ return mk_chart(fn,title,bg,fg,gc,ac,pb)
601
+ i1=mk2(tatc,"#c1121f","TAT (ng/mL)",8,"Normal: 8","TAT"); i2=mk2(pfc,"#0057a8","PF1.2",2.0,"Normal: 2.0","PF1.2")
602
+ i3=mk2(hc,"#2ecc71","Free Hgb (mg/L)",20,"Normal: 20","Free Hemoglobin",bar=True); i4=mk2(plc,"#e8a020","Platelets",150,"Normal>150","Platelets")
603
+ ai=""
604
+ if GROQ_KEY:
605
+ try:
606
+ client=Groq(api_key=GROQ_KEY)
607
+ resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
608
+ messages=[{"role":"system","content":"Hematology expert. Thrombogenicity risk."},
609
+ {"role":"user","content":"TGT:"+chr(10)+df.describe().to_string()[:500]}],max_tokens=250)
610
+ ai=chr(10)+"AI: "+resp.choices[0].message.content
611
+ except: pass
612
+ return i1,i2,i3,i4,"TGT: "+str(len(df))+" rows"+ai
613
+ except Exception as e: return None,None,None,None,"Error: "+str(e)
614
+
615
+ def generate_image(prompt):
616
+ if not prompt.strip(): return None,"Enter description.","";
617
+ if not HF_TOKEN: return None,"Add HF_TOKEN to Space secrets.","";
618
+ try:
619
+ enhanced,desc=prompt,""
620
+ if GROQ_KEY:
621
+ try:
622
+ client=Groq(api_key=GROQ_KEY)
623
+ resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
624
+ messages=[{"role":"system","content":"Format: DESCRIPTION: [2 sentences] PROMPT: [detailed image prompt]"},
625
+ {"role":"user","content":"Biomedical image: "+prompt}],max_tokens=200)
626
+ full=resp.choices[0].message.content
627
+ if "DESCRIPTION:" in full and "PROMPT:" in full:
628
+ desc=full.split("DESCRIPTION:")[1].split("PROMPT:")[0].strip()
629
+ enhanced=full.split("PROMPT:")[1].strip()
630
+ except: pass
631
+ headers={"Authorization":"Bearer "+HF_TOKEN,"Content-Type":"application/json"}
632
+ for url in ["https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell",
633
+ "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-base-1.0"]:
634
+ try:
635
+ r=requests.post(url,headers=headers,json={"inputs":enhanced,"parameters":{"num_inference_steps":8}},timeout=60)
636
+ if r.status_code==200: return Image.open(io.BytesIO(r.content)),"Generated!",desc
637
+ except: continue
638
+ return None,"Models busy.",desc
639
+ except Exception as e: return None,"Error: "+str(e),""
640
+
641
+ def piv_manual(v,s,h):
642
+ vr="HIGH-stenosis" if float(v)>2.0 else "NORMAL"
643
+ sr="HIGH-thrombosis" if float(s)>10 else "ELEVATED" if float(s)>5 else "NORMAL"
644
+ return "Velocity: "+str(v)+" m/s - "+vr+chr(10)+"Shear: "+str(s)+" Pa - "+sr+chr(10)+"HR: "+str(h)+" bpm"
645
+
646
+ def tgt_manual(t,p,h,pl,tm):
647
+ risk=sum([float(t)>15,float(p)>2.0,float(h)>50,float(pl)<150])
648
+ return "TAT:"+str(t)+" PF1.2:"+str(p)+chr(10)+"Hemo:"+str(h)+" Plt:"+str(pl)+chr(10)+"RESULT: "+("HIGH RISK" if risk>=3 else "MODERATE" if risk>=2 else "LOW RISK")
649
+
650
+ # ── UI ─────────────────────────────────────────────────────────────
651
+ with gr.Blocks(title="CardioLab AI - SJSU") as demo:
652
+ gr.HTML(HEADER)
653
+
654
+ papers_count = len(set(m["paper"] for m in METADATA)) if PAPERS_LOADED else 0
655
+ model_status = "Fine-tuned Model LOADED" if CARDIOLAB_MODEL_LOADED else "Fine-tuned model loading..."
656
+ rag_status = "RAG: " + str(len(CHUNKS)) + " chunks from " + str(papers_count) + " SJSU papers" if PAPERS_LOADED else "RAG: loading..."
657
+ gr.HTML("<div style='background:#1a7340;color:white;text-align:center;padding:7px;font-size:0.82em;font-weight:700;'>" + rag_status + " | " + model_status + " | Select CardioLab Fine-tuned in Model dropdown!</div>")
658
+
659
+ with gr.Tabs():
660
+
661
+ with gr.Tab("Chat"):
662
+ with gr.Row():
663
+ with gr.Column(scale=1, min_width=200):
664
+ gr.HTML("<div style='background:#202123;padding:10px;border-radius:8px;margin-bottom:6px;'><div style='color:#e8a020;font-weight:700;font-size:0.85em;'>SJSU CARDIOLAB</div><div style='color:#9ca3af;font-size:0.7em;'>Conversations</div></div>")
665
+ new_chat_btn = gr.Button("New Chat", variant="secondary")
666
+ session_dropdown = gr.Dropdown(choices=get_session_list(), label="Saved Sessions", interactive=True)
667
+ load_btn = gr.Button("Load Session", variant="primary")
668
+ session_name_box = gr.Textbox(placeholder="Session name...", label="", lines=1, container=False)
669
+ with gr.Row():
670
+ save_btn = gr.Button("Save", variant="primary", scale=2)
671
+ delete_btn = gr.Button("Del", variant="secondary", scale=1)
672
+ session_status = gr.Textbox(label="", lines=1, interactive=False, container=False)
673
+ with gr.Column(scale=4):
674
+ chatbot = gr.Chatbot(label="", height=460, show_label=False, container=False)
675
+ with gr.Row():
676
+ msg_box = gr.Textbox(placeholder="Ask anything — AI searches 16 SJSU papers + PubMed...", label="", lines=2, scale=4, container=False)
677
+ with gr.Column(scale=1, min_width=160):
678
+ chat_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model")
679
+ send_btn = gr.Button("Send", variant="primary")
680
+ clear_btn = gr.Button("Clear", variant="secondary")
681
+ send_btn.click(research_chat, inputs=[msg_box, chatbot, chat_model_dd], outputs=[msg_box, chatbot])
682
+ msg_box.submit(research_chat, inputs=[msg_box, chatbot, chat_model_dd], outputs=[msg_box, chatbot])
683
+ clear_btn.click(lambda: ([], ""), outputs=[chatbot, msg_box])
684
+ new_chat_btn.click(new_chat, outputs=[chatbot, msg_box, session_status])
685
+ save_btn.click(save_session, inputs=[chatbot, session_name_box], outputs=[session_status, session_dropdown])
686
+ load_btn.click(load_session, inputs=session_dropdown, outputs=[chatbot, session_status])
687
+ delete_btn.click(delete_session, inputs=session_dropdown, outputs=[session_status, session_dropdown])
688
+
689
+ with gr.Tab("Voice"):
690
+ voice_chatbot = gr.Chatbot(label="", height=360, show_label=False)
691
+ audio_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Question")
692
+ with gr.Row():
693
+ voice_btn = gr.Button("Ask by Voice", variant="primary")
694
+ voice_clear = gr.Button("Clear", variant="secondary")
695
+ voice_btn.click(voice_chat, inputs=[audio_input, voice_chatbot], outputs=voice_chatbot)
696
+ voice_clear.click(lambda: [], outputs=voice_chatbot)
697
+
698
+ with gr.Tab("Papers"):
699
+ gr.Markdown("### Search PubMed + Semantic Scholar + SJSU ScholarWorks")
700
+ with gr.Row():
701
+ search_input = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve thrombogenicity hemodynamics", label="Research Topic", scale=3)
702
+ search_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model", scale=1)
703
+ search_btn = gr.Button("Search", variant="primary", scale=1)
704
+ search_output = gr.Textbox(label="Results", lines=22)
705
+ search_btn.click(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
706
+ search_input.submit(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
707
+
708
+ with gr.Tab("PIV CSV"):
709
+ with gr.Row():
710
+ piv_file = gr.File(label="Upload PIV CSV", file_types=[".csv"], scale=3)
711
+ piv_theme = gr.Radio(["White","Dark"], value="White", label="Theme", scale=1)
712
+ piv_btn = gr.Button("Analyze PIV Data", variant="primary")
713
+ piv_result = gr.Textbox(label="AI Analysis", lines=4)
714
+ with gr.Row():
715
+ piv_c1=gr.Image(label="Velocity",type="pil"); piv_c2=gr.Image(label="Shear Stress",type="pil")
716
+ with gr.Row():
717
+ piv_c3=gr.Image(label="Vel vs Shear",type="pil"); piv_c4=gr.Image(label="Clinical Summary",type="pil")
718
+ piv_btn.click(analyze_piv_csv, inputs=[piv_file,piv_theme], outputs=[piv_c1,piv_c2,piv_c3,piv_c4,piv_result])
719
+
720
+ with gr.Tab("TGT CSV"):
721
+ with gr.Row():
722
+ tgt_file = gr.File(label="Upload TGT CSV", file_types=[".csv"], scale=3)
723
+ tgt_theme = gr.Radio(["White","Dark"], value="White", label="Theme", scale=1)
724
+ tgt_btn = gr.Button("Analyze TGT Data", variant="primary")
725
+ tgt_result = gr.Textbox(label="AI Assessment", lines=4)
726
+ with gr.Row():
727
+ tgt_c1=gr.Image(label="TAT",type="pil"); tgt_c2=gr.Image(label="PF1.2",type="pil")
728
+ with gr.Row():
729
+ tgt_c3=gr.Image(label="Hemoglobin",type="pil"); tgt_c4=gr.Image(label="Platelets",type="pil")
730
+ tgt_btn.click(analyze_tgt_csv, inputs=[tgt_file,tgt_theme], outputs=[tgt_c1,tgt_c2,tgt_c3,tgt_c4,tgt_result])
731
+
732
+ with gr.Tab("uPAD"):
733
+ with gr.Row():
734
+ with gr.Column():
735
+ photo_input = gr.Image(label="Upload uPAD Photo", type="numpy", height=260)
736
+ analyze_btn = gr.Button("Analyze uPAD", variant="primary")
737
+ with gr.Column():
738
+ photo_img = gr.Image(label="Detection Zone", type="pil", height=260)
739
+ photo_text = gr.Textbox(label="CKD Result", lines=8)
740
+ analyze_btn.click(analyze_upad_photo, inputs=photo_input, outputs=[photo_img, photo_text])
741
+ with gr.Row():
742
+ r=gr.Number(label="R",value=210); g=gr.Number(label="G",value=140); b=gr.Number(label="B",value=80)
743
+ out3=gr.Textbox(label="Result",lines=3)
744
+ gr.Button("Analyze RGB",variant="secondary").click(
745
+ lambda r,g,b:"Creatinine: "+str(max(0,round(0.02*(r-b)-0.5,2)))+" mg/dL"+chr(10)+("Normal" if max(0,round(0.02*(r-b)-0.5,2))<1.2 else "Borderline" if max(0,round(0.02*(r-b)-0.5,2))<1.5 else "CKD"),
746
+ inputs=[r,g,b],outputs=out3)
747
+
748
+ with gr.Tab("AI Image"):
749
+ with gr.Row():
750
+ img_prompt = gr.Textbox(placeholder="e.g. 27mm bileaflet mechanical heart valve cross section", label="Describe image", lines=2, scale=4)
751
+ with gr.Column(scale=1):
752
+ img_btn = gr.Button("Generate", variant="primary")
753
+ img_status = gr.Textbox(label="Status", lines=1)
754
+ img_desc = gr.Textbox(label="AI Description", lines=2, interactive=False)
755
+ img_output = gr.Image(label="Generated Image", type="pil", height=400)
756
+ img_btn.click(generate_image, inputs=img_prompt, outputs=[img_output,img_status,img_desc])
757
+
758
+ with gr.Tab("PIV Manual"):
759
+ with gr.Row():
760
+ with gr.Column():
761
+ v=gr.Number(label="Max Velocity m/s",value=1.8); s=gr.Number(label="Wall Shear Pa",value=6.5)
762
+ h=gr.Number(label="Heart Rate bpm",value=72); piv_out=gr.Textbox(label="Result",lines=4)
763
+ gr.Button("Analyze PIV",variant="primary").click(piv_manual,inputs=[v,s,h],outputs=piv_out)
764
+
765
+ with gr.Tab("TGT Manual"):
766
+ with gr.Row():
767
+ with gr.Column():
768
+ t1=gr.Number(label="TAT ng/mL",value=18); t2=gr.Number(label="PF1.2",value=2.5)
769
+ t3=gr.Number(label="Hemoglobin mg/L",value=60); t4=gr.Number(label="Platelets",value=140)
770
+ t5=gr.Number(label="Time min",value=40); out2=gr.Textbox(label="Result",lines=6)
771
+ gr.Button("Analyze TGT",variant="primary").click(tgt_manual,inputs=[t1,t2,t3,t4,t5],outputs=out2)
772
+
773
+ with gr.Tab("Protocol Generator"):
774
+ gr.Markdown("### Generate complete lab protocols from SJSU CardioLab knowledge")
775
+ with gr.Row():
776
+ with gr.Column(scale=1):
777
+ proto_type = gr.Dropdown(
778
+ choices=["MCL Setup","PIV Experiment","Thrombogenicity Tester Blood Clotting Test",
779
+ "uPAD Fabrication","uPAD Creatinine Test",
780
+ "FSI COMSOL Simulation","Valve Testing"],
781
+ value="Thrombogenicity Tester Blood Clotting Test", label="Experiment Type")
782
+ proto_params = gr.Textbox(placeholder="e.g. 27mm SJM valve 70bpm porcine blood",
783
+ label="Specific Parameters", lines=2)
784
+ proto_btn = gr.Button("Generate Protocol", variant="primary")
785
+ with gr.Column(scale=2):
786
+ proto_output = gr.Textbox(label="Generated Protocol", lines=28)
787
+ proto_btn.click(generate_protocol, inputs=[proto_type, proto_params], outputs=proto_output)
788
+
789
+ with gr.Tab("Report Writer"):
790
+ gr.Markdown("### Generate professional research reports from your data")
791
+ with gr.Row():
792
+ with gr.Column(scale=1):
793
+ report_exp = gr.Dropdown(
794
+ choices=["MCL PIV Flow Analysis","TGT Thrombogenicity Study",
795
+ "uPAD CKD Detection","FSI Simulation Study",
796
+ "Heart Valve Comparison"],
797
+ value="TGT Thrombogenicity Study", label="Study Type")
798
+ report_desc = gr.Textbox(
799
+ placeholder="e.g. TGT with 27mm SJM bileaflet at 70bpm 150mL porcine blood",
800
+ label="Experiment Description", lines=3)
801
+ report_results = gr.Textbox(
802
+ placeholder="e.g. TAT=12.3 ng/mL PF1.2=2.8 Hemo=45 Plt=142",
803
+ label="Your Results", lines=2)
804
+ report_btn = gr.Button("Generate Report", variant="primary")
805
+ with gr.Column(scale=2):
806
+ report_output = gr.Textbox(label="Generated Report", lines=28)
807
+ report_btn.click(generate_report, inputs=[report_desc, report_exp, report_results], outputs=report_output)
808
+
809
+ with gr.Tab("Hypothesis Generator"):
810
+ gr.Markdown("### Generate testable research hypotheses for CardioLab projects")
811
+ with gr.Row():
812
+ with gr.Column(scale=1):
813
+ hyp_area = gr.Dropdown(
814
+ choices=["Bileaflet MHV Thrombogenicity",
815
+ "uPAD CKD Detection Accuracy",
816
+ "PIV Flow Characterization",
817
+ "FSI Simulation Validation",
818
+ "Valve Design Comparison"],
819
+ value="Bileaflet MHV Thrombogenicity", label="Research Area")
820
+ hyp_findings = gr.Textbox(
821
+ placeholder="Current observations from your experiments",
822
+ label="Current Findings", lines=3)
823
+ hyp_btn = gr.Button("Generate Hypotheses", variant="primary")
824
+ with gr.Column(scale=2):
825
+ hyp_output = gr.Textbox(label="Research Hypotheses", lines=25)
826
+ hyp_btn.click(generate_hypothesis, inputs=[hyp_area, hyp_findings], outputs=hyp_output)
827
+
828
+ gr.HTML("""<div style="text-align:center;padding:10px;border-top:1px solid #e5e7eb;background:#f9fafb;">
829
+ <span style="color:#9ca3af;font-size:0.75em;">CardioLab AI v38 | SJSU Biomedical Engineering | RAG + Fine-tuned + Phase D | Inspired by <a href="https://github.com/snap-stanford/Biomni" style="color:#c1121f;">Biomni Stanford</a> | Apache 2.0 | $0 Cost</span></div>""")
830
+
831
+ demo.launch(css=CSS)