Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -14,8 +14,10 @@ GROQ_KEY = os.environ.get("GROQ_API_KEY", "")
|
|
| 14 |
HF_TOKEN = os.environ.get("HF_TOKEN", "")
|
| 15 |
HISTORY_REPO = "Saicharan21/cardiolab-chat-history"
|
| 16 |
PAPERS_DB_REPO = "Saicharan21/cardiolab-papers-db"
|
|
|
|
| 17 |
|
| 18 |
CHAT_MODELS = {
|
|
|
|
| 19 |
"Llama 3.3 70B (Best)": "llama-3.3-70b-versatile",
|
| 20 |
"Llama 3.1 8B (Fast)": "llama-3.1-8b-instant",
|
| 21 |
"Mixtral 8x7B": "mixtral-8x7b-32768",
|
|
@@ -28,18 +30,20 @@ KNOWHOW = ("MCL: Sylgard 184 PDMS 10:1 ratio 48hr cure green laser PIV 70bpm 5L/
|
|
| 28 |
"MHV: 27mm SJM Regent bileaflet trileaflet monoleaflet pediatric. "
|
| 29 |
"Equipment: Heska HT5 analyzer PIV green laser Tygon tubing Arduino Uno.")
|
| 30 |
|
| 31 |
-
# ── LOAD PAPERS ON STARTUP ─────────────────────
|
| 32 |
CHUNKS = []
|
| 33 |
METADATA = []
|
| 34 |
EMBEDDINGS = None
|
| 35 |
PAPERS_LOADED = False
|
| 36 |
EMBEDDER = None
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
def load_papers():
|
| 39 |
global CHUNKS, METADATA, EMBEDDINGS, PAPERS_LOADED, EMBEDDER
|
| 40 |
try:
|
| 41 |
from sentence_transformers import SentenceTransformer
|
| 42 |
-
print("Loading paper database from HuggingFace...")
|
| 43 |
chunks_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="chunks.json", repo_type="dataset", token=HF_TOKEN)
|
| 44 |
meta_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="metadata.json", repo_type="dataset", token=HF_TOKEN)
|
| 45 |
emb_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="embeddings.npy", repo_type="dataset", token=HF_TOKEN)
|
|
@@ -48,18 +52,41 @@ def load_papers():
|
|
| 48 |
EMBEDDINGS = np.load(emb_path)
|
| 49 |
EMBEDDER = SentenceTransformer("all-MiniLM-L6-v2")
|
| 50 |
PAPERS_LOADED = True
|
| 51 |
-
|
| 52 |
-
print(f"Loaded {len(CHUNKS)} chunks from {papers_count} SJSU papers!")
|
| 53 |
return True
|
| 54 |
except Exception as e:
|
| 55 |
print(f"Paper load error: {e}")
|
| 56 |
return False
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
load_papers()
|
|
|
|
| 59 |
|
| 60 |
# ── SEMANTIC SEARCH ────────────────────────────────────────────────
|
| 61 |
def search_papers(query, n=4):
|
| 62 |
-
global CHUNKS, METADATA, EMBEDDINGS, EMBEDDER, PAPERS_LOADED
|
| 63 |
if not PAPERS_LOADED or EMBEDDINGS is None or EMBEDDER is None:
|
| 64 |
return "", []
|
| 65 |
try:
|
|
@@ -77,16 +104,42 @@ def search_papers(query, n=4):
|
|
| 77 |
meta = METADATA[idx]
|
| 78 |
score = float(scores[idx])
|
| 79 |
if score > 0.25:
|
| 80 |
-
results.append({"chunk":
|
| 81 |
if meta["paper"] not in seen:
|
| 82 |
-
context += chr(10)+"=== FROM: "+meta["paper"]+"
|
| 83 |
seen.add(meta["paper"])
|
| 84 |
context += chunk[:500]+chr(10)
|
| 85 |
return context, results
|
| 86 |
except Exception as e:
|
| 87 |
-
print(f"Search error: {e}")
|
| 88 |
return "", []
|
| 89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
CSS = """
|
| 91 |
body, .gradio-container { background: #f7f7f8 !important; font-family: -apple-system, BlinkMacSystemFont, Segoe UI, sans-serif !important; }
|
| 92 |
.tab-nav { background: #ffffff !important; border-bottom: 1px solid #e5e7eb !important; padding: 0 16px !important; display: flex !important; flex-wrap: wrap !important; }
|
|
@@ -120,7 +173,7 @@ HEADER = """<div style="background:linear-gradient(135deg,#0a0f2e 0%,#1a0a0a 100
|
|
| 120 |
<svg width="100" height="28" viewBox="0 0 120 32"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg>
|
| 121 |
<div style="font-size:2em;font-weight:900;letter-spacing:2px;"><span style="color:#ffffff;">Cardio</span><span style="color:#c1121f;">Lab</span><span style="color:#ffffff;"> AI</span></div>
|
| 122 |
<svg width="100" height="28" viewBox="0 0 120 32" style="transform:scaleX(-1);"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg></div>
|
| 123 |
-
<div style="color:#9ca3af;font-size:0.68em;letter-spacing:2px;text-transform:uppercase;">
|
| 124 |
<div style="display:flex;align-items:center;gap:14px;">
|
| 125 |
<div style="text-align:right;"><div style="color:#9ca3af;font-size:0.68em;text-transform:uppercase;">Research Pillars</div>
|
| 126 |
<div style="color:#ffffff;font-size:0.72em;margin-top:3px;">MHV CKD FSI</div>
|
|
@@ -130,7 +183,6 @@ HEADER = """<div style="background:linear-gradient(135deg,#0a0f2e 0%,#1a0a0a 100
|
|
| 130 |
<polyline points="25,45 32,45 35,35 38,55 41,30 44,50 50,45 75,45" fill="none" stroke="white" stroke-width="2.5" stroke-linecap="round" opacity="0.9"/></svg></div></div>
|
| 131 |
<div style="height:3px;background:linear-gradient(90deg,#0057a8,#c1121f,#e8a020,#c1121f,#0057a8);"></div></div>"""
|
| 132 |
|
| 133 |
-
# ── SESSION MANAGEMENT ─────────────────────────────────────────────
|
| 134 |
def load_all_sessions():
|
| 135 |
if not HF_TOKEN: return {}
|
| 136 |
try:
|
|
@@ -150,8 +202,7 @@ def save_all_sessions(sessions):
|
|
| 150 |
|
| 151 |
def get_session_list():
|
| 152 |
s = load_all_sessions()
|
| 153 |
-
|
| 154 |
-
return list(reversed(list(s.keys())))
|
| 155 |
|
| 156 |
def save_session(history, name):
|
| 157 |
if not history: return "Nothing to save", gr.update()
|
|
@@ -165,8 +216,7 @@ def save_session(history, name):
|
|
| 165 |
def load_session(name):
|
| 166 |
if not name or "No saved" in name: return [], "Select a session"
|
| 167 |
sessions = load_all_sessions()
|
| 168 |
-
|
| 169 |
-
return [], "Not found"
|
| 170 |
|
| 171 |
def delete_session(name):
|
| 172 |
if not name or "No saved" in name: return "Select a session", gr.update()
|
|
@@ -179,115 +229,34 @@ def delete_session(name):
|
|
| 179 |
|
| 180 |
def new_chat(): return [], "", "New chat started"
|
| 181 |
|
| 182 |
-
# ── SEARCH FUNCTIONS ───────────────────────────────────────────────
|
| 183 |
-
def expand_query_ai(query, model_id="llama-3.3-70b-versatile"):
|
| 184 |
-
if not GROQ_KEY: return query
|
| 185 |
-
try:
|
| 186 |
-
client = Groq(api_key=GROQ_KEY)
|
| 187 |
-
resp = client.chat.completions.create(model=model_id,
|
| 188 |
-
messages=[{"role":"system","content":"Biomedical PubMed expert. Convert to optimized MeSH terms for heart valves hemodynamics PIV thrombogenicity FSI microfluidics CKD creatinine. Return ONLY terms."},
|
| 189 |
-
{"role":"user","content":"Optimize: "+query}],max_tokens=80)
|
| 190 |
-
return resp.choices[0].message.content.strip() or query
|
| 191 |
-
except: return query
|
| 192 |
-
|
| 193 |
-
def fetch_pubmed(query, n=6):
|
| 194 |
-
try:
|
| 195 |
-
forced = query+" AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR PIV OR CFD OR CKD OR fluid structure)"
|
| 196 |
-
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
| 197 |
-
params={"db":"pubmed","term":forced,"retmax":n,"retmode":"json","sort":"date","field":"tiab"},timeout=12)
|
| 198 |
-
ids = r.json()["esearchresult"]["idlist"]
|
| 199 |
-
if not ids: return []
|
| 200 |
-
r2 = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi",
|
| 201 |
-
params={"db":"pubmed","id":",".join(ids),"retmode":"xml","rettype":"abstract"},timeout=12)
|
| 202 |
-
import xml.etree.ElementTree as ET
|
| 203 |
-
root = ET.fromstring(r2.content)
|
| 204 |
-
results = []
|
| 205 |
-
for article in root.findall(".//PubmedArticle"):
|
| 206 |
-
try:
|
| 207 |
-
title = article.find(".//ArticleTitle").text or "No title"
|
| 208 |
-
pmid = article.find(".//PMID").text or ""
|
| 209 |
-
year_el = article.find(".//PubDate/Year")
|
| 210 |
-
year = year_el.text if year_el is not None else ""
|
| 211 |
-
results.append({"source":"PubMed","title":str(title),"year":year,
|
| 212 |
-
"url":"https://pubmed.ncbi.nlm.nih.gov/"+pmid,"citations":"N/A"})
|
| 213 |
-
except: continue
|
| 214 |
-
return results
|
| 215 |
-
except: return []
|
| 216 |
-
|
| 217 |
-
def fetch_scholar(query, n=6):
|
| 218 |
-
try:
|
| 219 |
-
r = requests.get("https://api.semanticscholar.org/graph/v1/paper/search",
|
| 220 |
-
params={"query":query,"limit":n,"fields":"title,year,url,citationCount"},timeout=12)
|
| 221 |
-
papers = r.json().get("data",[])
|
| 222 |
-
results = []
|
| 223 |
-
for p in papers:
|
| 224 |
-
year = p.get("year",0) or 0
|
| 225 |
-
if int(year) < 2015: continue
|
| 226 |
-
results.append({"source":"Scholar","title":p.get("title",""),"year":str(year),
|
| 227 |
-
"url":p.get("url",""),"citations":str(p.get("citationCount",0))})
|
| 228 |
-
results.sort(key=lambda x:(x["year"],int(x["citations"]) if x["citations"].isdigit() else 0),reverse=True)
|
| 229 |
-
return results
|
| 230 |
-
except: return []
|
| 231 |
-
|
| 232 |
-
def fetch_europe_pmc(query, n=5):
|
| 233 |
-
try:
|
| 234 |
-
r = requests.get("https://www.ebi.ac.uk/europepmc/webservices/rest/search",
|
| 235 |
-
params={"query":query,"format":"json","pageSize":n,"sort":"P_PDATE_D desc"},timeout=12)
|
| 236 |
-
articles = r.json().get("resultList",{}).get("result",[])
|
| 237 |
-
results = []
|
| 238 |
-
for a in articles:
|
| 239 |
-
year = str(a.get("pubYear",""))
|
| 240 |
-
if year and int(year) < 2015: continue
|
| 241 |
-
pmid = a.get("pmid",""); doi = a.get("doi","")
|
| 242 |
-
url = ("https://pubmed.ncbi.nlm.nih.gov/"+pmid if pmid else "https://doi.org/"+doi if doi else "")
|
| 243 |
-
if not url: continue
|
| 244 |
-
results.append({"source":"Europe PMC","title":a.get("title",""),"year":year,
|
| 245 |
-
"url":url,"citations":str(a.get("citedByCount",0))})
|
| 246 |
-
return results
|
| 247 |
-
except: return []
|
| 248 |
-
|
| 249 |
-
def quick_search(query, search_model="Llama 3.3 70B (Best)"):
|
| 250 |
-
if not query.strip(): return "Please enter a research topic."
|
| 251 |
-
model_id = CHAT_MODELS.get(search_model, "llama-3.3-70b-versatile")
|
| 252 |
-
expanded = expand_query_ai(query, model_id)
|
| 253 |
-
r1 = fetch_pubmed(expanded, n=6)
|
| 254 |
-
r2 = fetch_scholar(expanded, n=6)
|
| 255 |
-
r3 = fetch_europe_pmc(expanded, n=5)
|
| 256 |
-
sjsu_url = "https://scholarworks.sjsu.edu/do/search/?q="+requests.utils.quote(query)+"&context=6781027"
|
| 257 |
-
all_results = r1+r2+r3
|
| 258 |
-
seen = set()
|
| 259 |
-
unique = []
|
| 260 |
-
for r in all_results:
|
| 261 |
-
key = r["title"][:50].lower().strip()
|
| 262 |
-
if key not in seen and r["url"]:
|
| 263 |
-
seen.add(key); unique.append(r)
|
| 264 |
-
out = "QUERY: "+query+chr(10)+"AI EXPANDED: "+expanded+chr(10)
|
| 265 |
-
out += "="*45+chr(10)+chr(10)
|
| 266 |
-
groups = {"PubMed":[],"Scholar":[],"Europe PMC":[]}
|
| 267 |
-
for r in unique[:20]:
|
| 268 |
-
if r["source"] in groups: groups[r["source"]].append(r)
|
| 269 |
-
for source, papers in groups.items():
|
| 270 |
-
if not papers: continue
|
| 271 |
-
out += "--- "+source+" ---"+chr(10)
|
| 272 |
-
for p in papers:
|
| 273 |
-
out += p["title"][:85]+" ("+p["year"]+")"
|
| 274 |
-
if p["citations"] not in ("N/A","",): out += " | "+p["citations"]+" citations"
|
| 275 |
-
out += chr(10)+" "+p["url"]+chr(10)+chr(10)
|
| 276 |
-
out += "--- SJSU ScholarWorks ---"+chr(10)
|
| 277 |
-
out += "Search SJSU papers: "+sjsu_url+chr(10)
|
| 278 |
-
return out
|
| 279 |
-
|
| 280 |
def get_pubmed_chat(query, n=3):
|
| 281 |
try:
|
| 282 |
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
| 283 |
params={"db":"pubmed","term":query+" AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR CKD)","retmax":n,"retmode":"json","sort":"date","field":"tiab"},timeout=10)
|
| 284 |
ids = r.json()["esearchresult"]["idlist"]
|
| 285 |
-
|
| 286 |
-
return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids])
|
| 287 |
except: return ""
|
| 288 |
|
| 289 |
-
# ── CHAT WITH RAG ──────────────────────────────────────────────────
|
| 290 |
def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
if not GROQ_KEY:
|
| 292 |
history.append({"role":"user","content":message})
|
| 293 |
history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings."})
|
|
@@ -295,11 +264,10 @@ def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
|
| 295 |
try:
|
| 296 |
model_id = CHAT_MODELS.get(chat_model, "llama-3.3-70b-versatile")
|
| 297 |
client = Groq(api_key=GROQ_KEY)
|
| 298 |
-
paper_context, paper_results = search_papers(message, n=4)
|
| 299 |
if paper_context:
|
| 300 |
system_prompt = ("You are CardioLab AI for SJSU Biomedical Engineering. "
|
| 301 |
"Answer using SJSU CardioLab research papers below. "
|
| 302 |
-
"Always cite the paper name when using specific data.
|
| 303 |
"SJSU CARDIOLAB PAPERS:"+chr(10)+paper_context+chr(10)+chr(10)+
|
| 304 |
"ADDITIONAL KNOWLEDGE: "+KNOWHOW)
|
| 305 |
else:
|
|
@@ -348,6 +316,65 @@ def voice_chat(audio, history):
|
|
| 348 |
history.append({"role":"assistant","content":"Voice error: "+str(e)})
|
| 349 |
return history
|
| 350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
def analyze_upad_photo(image):
|
| 352 |
if image is None: return None, "Upload a uPAD photo first."
|
| 353 |
try:
|
|
@@ -394,7 +421,7 @@ def analyze_piv_csv(file,theme="White"):
|
|
| 394 |
ax.plot(xv,df[vc],color="#c1121f",linewidth=2.5,marker="o",markersize=5)
|
| 395 |
ax.fill_between(xv,df[vc],alpha=0.15,color="#c1121f")
|
| 396 |
ax.axhline(y=2.0,color="#f59e0b",linestyle="--",linewidth=2,label="Risk: 2.0 m/s")
|
| 397 |
-
ax.set_ylabel("Velocity (m/s)",color=ac
|
| 398 |
def ps(ax):
|
| 399 |
if sc2:
|
| 400 |
xp=xv.values if tc else x
|
|
@@ -402,13 +429,13 @@ def analyze_piv_csv(file,theme="White"):
|
|
| 402 |
ax.fill_between(xp,df[sc2],alpha=0.15,color="#0057a8")
|
| 403 |
ax.axhline(y=5,color="#f59e0b",linestyle="--",linewidth=2,label="Caution 5 Pa")
|
| 404 |
ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2,label="High risk 10 Pa")
|
| 405 |
-
ax.set_ylabel("Shear (Pa)",color=ac
|
| 406 |
def psc(ax):
|
| 407 |
if vc and sc2:
|
| 408 |
s3=ax.scatter(df[vc],df[sc2],c=x,cmap="RdYlGn_r",s=90,edgecolors=fg,linewidth=0.5,zorder=5)
|
| 409 |
cb=plt.colorbar(s3,ax=ax,label="Time"); cb.ax.yaxis.label.set_color(fg); cb.ax.tick_params(colors=ac)
|
| 410 |
ax.axvline(x=2.0,color="#f59e0b",linestyle="--",linewidth=2); ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2)
|
| 411 |
-
ax.set_xlabel("Velocity (m/s)",color=ac
|
| 412 |
def psum(ax):
|
| 413 |
ax.axis("off"); risk=[]
|
| 414 |
st="CLINICAL SUMMARY"+chr(10)+"="*20+chr(10)+chr(10)
|
|
@@ -460,22 +487,19 @@ def analyze_tgt_csv(file,theme="White"):
|
|
| 460 |
ax.fill_between(xp,yp,alpha=0.15,color=color)
|
| 461 |
for xi,yi in zip(xp,yp): ax.annotate(str(round(yi,1)),(xi,yi),textcoords="offset points",xytext=(0,10),ha="center",color=fg,fontsize=10,fontweight="bold")
|
| 462 |
ax.axhline(y=lim,color="#f59e0b",linestyle="--",linewidth=2.5,label=ll)
|
| 463 |
-
ax.legend(fontsize=10,labelcolor=fg,facecolor=pb)
|
| 464 |
-
ax.set_ylabel(yl,color=ac,fontsize=11)
|
| 465 |
mv=round(float(np.max(yp)),2)
|
| 466 |
ax.set_title(title+chr(10)+"Max: "+str(mv)+" - "+("HIGH" if mv>lim else "NORMAL"),color=fg,fontweight="bold",fontsize=12)
|
| 467 |
return mk_chart(fn,title,bg,fg,gc,ac,pb)
|
| 468 |
-
i1=mk2(tatc,"#c1121f","TAT (ng/mL)",8,"Normal: 8","TAT
|
| 469 |
-
|
| 470 |
-
i3=mk2(hc,"#2ecc71","Free Hemoglobin (mg/L)",20,"Normal: 20","Free Hemoglobin",bar=True)
|
| 471 |
-
i4=mk2(plc,"#e8a020","Platelet Count",150,"Normal min: 150","Platelet Count")
|
| 472 |
ai=""
|
| 473 |
if GROQ_KEY:
|
| 474 |
try:
|
| 475 |
client=Groq(api_key=GROQ_KEY)
|
| 476 |
resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
|
| 477 |
-
messages=[{"role":"system","content":"Hematology expert
|
| 478 |
-
{"role":"user","content":"TGT
|
| 479 |
ai=chr(10)+"AI: "+resp.choices[0].message.content
|
| 480 |
except: pass
|
| 481 |
return i1,i2,i3,i4,"TGT: "+str(len(df))+" rows"+ai
|
|
@@ -504,27 +528,26 @@ def generate_image(prompt):
|
|
| 504 |
r=requests.post(url,headers=headers,json={"inputs":enhanced,"parameters":{"num_inference_steps":8}},timeout=60)
|
| 505 |
if r.status_code==200: return Image.open(io.BytesIO(r.content)),"Generated!",desc
|
| 506 |
except: continue
|
| 507 |
-
return None,"Models busy.
|
| 508 |
except Exception as e: return None,"Error: "+str(e),""
|
| 509 |
|
| 510 |
def piv_manual(v,s,h):
|
| 511 |
-
vr="HIGH
|
| 512 |
-
sr="HIGH
|
| 513 |
-
return "Velocity: "+str(v)+" m/s
|
| 514 |
|
| 515 |
def tgt_manual(t,p,h,pl,tm):
|
| 516 |
risk=sum([float(t)>15,float(p)>2.0,float(h)>50,float(pl)<150])
|
| 517 |
return "TAT:"+str(t)+" PF1.2:"+str(p)+chr(10)+"Hemo:"+str(h)+" Plt:"+str(pl)+chr(10)+"RESULT: "+("HIGH RISK" if risk>=3 else "MODERATE" if risk>=2 else "LOW RISK")
|
| 518 |
|
| 519 |
-
# ── UI ─────────────────────────────────────────────────────────────
|
| 520 |
with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
| 521 |
gr.HTML(HEADER)
|
| 522 |
|
| 523 |
papers_count = len(set(m["paper"] for m in METADATA)) if PAPERS_LOADED else 0
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
|
| 529 |
with gr.Tabs():
|
| 530 |
with gr.Tab("Chat"):
|
|
@@ -542,11 +565,11 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 542 |
delete_btn = gr.Button("Del", variant="secondary", scale=1)
|
| 543 |
session_status = gr.Textbox(label="", lines=1, interactive=False, container=False)
|
| 544 |
with gr.Column(scale=4):
|
| 545 |
-
chatbot = gr.Chatbot(label="", height=
|
| 546 |
with gr.Row():
|
| 547 |
-
msg_box = gr.Textbox(placeholder="Ask anything
|
| 548 |
-
with gr.Column(scale=1, min_width=
|
| 549 |
-
chat_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="Model")
|
| 550 |
send_btn = gr.Button("Send", variant="primary")
|
| 551 |
clear_btn = gr.Button("Clear", variant="secondary")
|
| 552 |
send_btn.click(research_chat, inputs=[msg_box, chatbot, chat_model_dd], outputs=[msg_box, chatbot])
|
|
@@ -567,12 +590,12 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 567 |
voice_clear.click(lambda: [], outputs=voice_chatbot)
|
| 568 |
|
| 569 |
with gr.Tab("Papers"):
|
| 570 |
-
gr.Markdown("### Search PubMed + Semantic Scholar +
|
| 571 |
with gr.Row():
|
| 572 |
-
search_input = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve
|
| 573 |
search_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model", scale=1)
|
| 574 |
-
search_btn = gr.Button("Search
|
| 575 |
-
search_output = gr.Textbox(label="
|
| 576 |
search_btn.click(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 577 |
search_input.submit(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 578 |
|
|
@@ -583,9 +606,9 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 583 |
piv_btn = gr.Button("Analyze PIV Data", variant="primary")
|
| 584 |
piv_result = gr.Textbox(label="AI Analysis", lines=4)
|
| 585 |
with gr.Row():
|
| 586 |
-
piv_c1=gr.Image(label="Velocity
|
| 587 |
with gr.Row():
|
| 588 |
-
piv_c3=gr.Image(label="
|
| 589 |
piv_btn.click(analyze_piv_csv, inputs=[piv_file,piv_theme], outputs=[piv_c1,piv_c2,piv_c3,piv_c4,piv_result])
|
| 590 |
|
| 591 |
with gr.Tab("TGT CSV"):
|
|
@@ -604,14 +627,14 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 604 |
with gr.Row():
|
| 605 |
with gr.Column():
|
| 606 |
photo_input = gr.Image(label="Upload uPAD Photo", type="numpy", height=260)
|
| 607 |
-
analyze_btn = gr.Button("Analyze uPAD
|
| 608 |
with gr.Column():
|
| 609 |
photo_img = gr.Image(label="Detection Zone", type="pil", height=260)
|
| 610 |
photo_text = gr.Textbox(label="CKD Result", lines=8)
|
| 611 |
analyze_btn.click(analyze_upad_photo, inputs=photo_input, outputs=[photo_img, photo_text])
|
| 612 |
with gr.Row():
|
| 613 |
r=gr.Number(label="R",value=210); g=gr.Number(label="G",value=140); b=gr.Number(label="B",value=80)
|
| 614 |
-
out3=gr.Textbox(label="
|
| 615 |
gr.Button("Analyze RGB",variant="secondary").click(
|
| 616 |
lambda r,g,b:"Creatinine: "+str(max(0,round(0.02*(r-b)-0.5,2)))+" mg/dL"+chr(10)+("Normal" if max(0,round(0.02*(r-b)-0.5,2))<1.2 else "Borderline" if max(0,round(0.02*(r-b)-0.5,2))<1.5 else "CKD"),
|
| 617 |
inputs=[r,g,b],outputs=out3)
|
|
@@ -620,7 +643,7 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 620 |
with gr.Row():
|
| 621 |
img_prompt = gr.Textbox(placeholder="e.g. 27mm bileaflet mechanical heart valve cross section", label="Describe image", lines=2, scale=4)
|
| 622 |
with gr.Column(scale=1):
|
| 623 |
-
img_btn = gr.Button("Generate
|
| 624 |
img_status = gr.Textbox(label="Status", lines=1)
|
| 625 |
img_desc = gr.Textbox(label="AI Description", lines=2, interactive=False)
|
| 626 |
img_output = gr.Image(label="Generated Image", type="pil", height=400)
|
|
@@ -638,10 +661,10 @@ with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
|
| 638 |
with gr.Column():
|
| 639 |
t1=gr.Number(label="TAT ng/mL",value=18); t2=gr.Number(label="PF1.2",value=2.5)
|
| 640 |
t3=gr.Number(label="Hemoglobin mg/L",value=60); t4=gr.Number(label="Platelets",value=140)
|
| 641 |
-
t5=gr.Number(label="Time
|
| 642 |
gr.Button("Analyze TGT",variant="primary").click(tgt_manual,inputs=[t1,t2,t3,t4,t5],outputs=out2)
|
| 643 |
|
| 644 |
gr.HTML("""<div style="text-align:center;padding:10px;border-top:1px solid #e5e7eb;background:#f9fafb;">
|
| 645 |
-
<span style="color:#9ca3af;font-size:0.75em;">CardioLab AI
|
| 646 |
|
| 647 |
demo.launch()
|
|
|
|
| 14 |
HF_TOKEN = os.environ.get("HF_TOKEN", "")
|
| 15 |
HISTORY_REPO = "Saicharan21/cardiolab-chat-history"
|
| 16 |
PAPERS_DB_REPO = "Saicharan21/cardiolab-papers-db"
|
| 17 |
+
CARDIOLAB_MODEL = "Saicharan21/CardioLab-AI-Model"
|
| 18 |
|
| 19 |
CHAT_MODELS = {
|
| 20 |
+
"CardioLab Fine-tuned (SJSU)": "cardiolab",
|
| 21 |
"Llama 3.3 70B (Best)": "llama-3.3-70b-versatile",
|
| 22 |
"Llama 3.1 8B (Fast)": "llama-3.1-8b-instant",
|
| 23 |
"Mixtral 8x7B": "mixtral-8x7b-32768",
|
|
|
|
| 30 |
"MHV: 27mm SJM Regent bileaflet trileaflet monoleaflet pediatric. "
|
| 31 |
"Equipment: Heska HT5 analyzer PIV green laser Tygon tubing Arduino Uno.")
|
| 32 |
|
| 33 |
+
# ── LOAD PAPERS + FINE-TUNED MODEL ON STARTUP ─────────────────────
|
| 34 |
CHUNKS = []
|
| 35 |
METADATA = []
|
| 36 |
EMBEDDINGS = None
|
| 37 |
PAPERS_LOADED = False
|
| 38 |
EMBEDDER = None
|
| 39 |
+
CARDIOLAB_TOKENIZER = None
|
| 40 |
+
CARDIOLAB_LLM = None
|
| 41 |
+
CARDIOLAB_MODEL_LOADED = False
|
| 42 |
|
| 43 |
def load_papers():
|
| 44 |
global CHUNKS, METADATA, EMBEDDINGS, PAPERS_LOADED, EMBEDDER
|
| 45 |
try:
|
| 46 |
from sentence_transformers import SentenceTransformer
|
|
|
|
| 47 |
chunks_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="chunks.json", repo_type="dataset", token=HF_TOKEN)
|
| 48 |
meta_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="metadata.json", repo_type="dataset", token=HF_TOKEN)
|
| 49 |
emb_path = hf_hub_download(repo_id=PAPERS_DB_REPO, filename="embeddings.npy", repo_type="dataset", token=HF_TOKEN)
|
|
|
|
| 52 |
EMBEDDINGS = np.load(emb_path)
|
| 53 |
EMBEDDER = SentenceTransformer("all-MiniLM-L6-v2")
|
| 54 |
PAPERS_LOADED = True
|
| 55 |
+
print(f"Papers loaded: {len(CHUNKS)} chunks from {len(set(m['paper'] for m in METADATA))} papers")
|
|
|
|
| 56 |
return True
|
| 57 |
except Exception as e:
|
| 58 |
print(f"Paper load error: {e}")
|
| 59 |
return False
|
| 60 |
|
| 61 |
+
def load_cardiolab_model():
|
| 62 |
+
global CARDIOLAB_TOKENIZER, CARDIOLAB_LLM, CARDIOLAB_MODEL_LOADED
|
| 63 |
+
try:
|
| 64 |
+
import torch
|
| 65 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 66 |
+
from peft import PeftModel
|
| 67 |
+
print("Loading CardioLab fine-tuned model...")
|
| 68 |
+
base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 69 |
+
CARDIOLAB_TOKENIZER = AutoTokenizer.from_pretrained(CARDIOLAB_MODEL, token=HF_TOKEN)
|
| 70 |
+
CARDIOLAB_TOKENIZER.pad_token = CARDIOLAB_TOKENIZER.eos_token
|
| 71 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 72 |
+
CARDIOLAB_LLM = AutoModelForCausalLM.from_pretrained(
|
| 73 |
+
CARDIOLAB_MODEL, token=HF_TOKEN,
|
| 74 |
+
torch_dtype=torch.float16 if device=="cuda" else torch.float32,
|
| 75 |
+
device_map="auto" if device=="cuda" else None,
|
| 76 |
+
low_cpu_mem_usage=True
|
| 77 |
+
)
|
| 78 |
+
CARDIOLAB_MODEL_LOADED = True
|
| 79 |
+
print(f"CardioLab model loaded on {device}!")
|
| 80 |
+
return True
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"CardioLab model load error: {e}")
|
| 83 |
+
return False
|
| 84 |
+
|
| 85 |
load_papers()
|
| 86 |
+
load_cardiolab_model()
|
| 87 |
|
| 88 |
# ── SEMANTIC SEARCH ────────────────────────────────────────────────
|
| 89 |
def search_papers(query, n=4):
|
|
|
|
| 90 |
if not PAPERS_LOADED or EMBEDDINGS is None or EMBEDDER is None:
|
| 91 |
return "", []
|
| 92 |
try:
|
|
|
|
| 104 |
meta = METADATA[idx]
|
| 105 |
score = float(scores[idx])
|
| 106 |
if score > 0.25:
|
| 107 |
+
results.append({"chunk":chunk,"paper":meta["paper"],"pillar":meta.get("pillar",""),"score":score})
|
| 108 |
if meta["paper"] not in seen:
|
| 109 |
+
context += chr(10)+"=== FROM: "+meta["paper"]+" ==="+chr(10)
|
| 110 |
seen.add(meta["paper"])
|
| 111 |
context += chunk[:500]+chr(10)
|
| 112 |
return context, results
|
| 113 |
except Exception as e:
|
|
|
|
| 114 |
return "", []
|
| 115 |
|
| 116 |
+
def answer_with_cardiolab_model(question, paper_context=""):
|
| 117 |
+
if not CARDIOLAB_MODEL_LOADED:
|
| 118 |
+
return None
|
| 119 |
+
try:
|
| 120 |
+
import torch
|
| 121 |
+
system = "You are CardioLab AI for SJSU Biomedical Engineering."
|
| 122 |
+
if paper_context:
|
| 123 |
+
system += " Use these SJSU research papers: "+paper_context[:500]
|
| 124 |
+
prompt = f"<|system|>{system}</s><|user|>{question}</s><|assistant|>"
|
| 125 |
+
inputs = CARDIOLAB_TOKENIZER(prompt, return_tensors="pt", truncation=True, max_length=512)
|
| 126 |
+
device = next(CARDIOLAB_LLM.parameters()).device
|
| 127 |
+
inputs = {k:v.to(device) for k,v in inputs.items()}
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
outputs = CARDIOLAB_LLM.generate(
|
| 130 |
+
**inputs, max_new_tokens=200, do_sample=True,
|
| 131 |
+
temperature=0.3, pad_token_id=CARDIOLAB_TOKENIZER.eos_token_id
|
| 132 |
+
)
|
| 133 |
+
response = CARDIOLAB_TOKENIZER.decode(outputs[0], skip_special_tokens=True)
|
| 134 |
+
if "<|assistant|>" in response:
|
| 135 |
+
answer = response.split("<|assistant|>")[-1].strip()
|
| 136 |
+
else:
|
| 137 |
+
answer = response[len(prompt):].strip() if len(response) > len(prompt) else response
|
| 138 |
+
return answer if len(answer) > 20 else None
|
| 139 |
+
except Exception as e:
|
| 140 |
+
print(f"CardioLab model error: {e}")
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
CSS = """
|
| 144 |
body, .gradio-container { background: #f7f7f8 !important; font-family: -apple-system, BlinkMacSystemFont, Segoe UI, sans-serif !important; }
|
| 145 |
.tab-nav { background: #ffffff !important; border-bottom: 1px solid #e5e7eb !important; padding: 0 16px !important; display: flex !important; flex-wrap: wrap !important; }
|
|
|
|
| 173 |
<svg width="100" height="28" viewBox="0 0 120 32"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg>
|
| 174 |
<div style="font-size:2em;font-weight:900;letter-spacing:2px;"><span style="color:#ffffff;">Cardio</span><span style="color:#c1121f;">Lab</span><span style="color:#ffffff;"> AI</span></div>
|
| 175 |
<svg width="100" height="28" viewBox="0 0 120 32" style="transform:scaleX(-1);"><polyline points="0,16 20,16 26,4 30,28 34,2 38,26 44,16 120,16" fill="none" stroke="#c1121f" stroke-width="2.5" stroke-linecap="round"/></svg></div>
|
| 176 |
+
<div style="color:#9ca3af;font-size:0.68em;letter-spacing:2px;text-transform:uppercase;">Fine-tuned on 16 SJSU Papers | RAG | Llama 3.3 70B | 5 AI Models</div></div>
|
| 177 |
<div style="display:flex;align-items:center;gap:14px;">
|
| 178 |
<div style="text-align:right;"><div style="color:#9ca3af;font-size:0.68em;text-transform:uppercase;">Research Pillars</div>
|
| 179 |
<div style="color:#ffffff;font-size:0.72em;margin-top:3px;">MHV CKD FSI</div>
|
|
|
|
| 183 |
<polyline points="25,45 32,45 35,35 38,55 41,30 44,50 50,45 75,45" fill="none" stroke="white" stroke-width="2.5" stroke-linecap="round" opacity="0.9"/></svg></div></div>
|
| 184 |
<div style="height:3px;background:linear-gradient(90deg,#0057a8,#c1121f,#e8a020,#c1121f,#0057a8);"></div></div>"""
|
| 185 |
|
|
|
|
| 186 |
def load_all_sessions():
|
| 187 |
if not HF_TOKEN: return {}
|
| 188 |
try:
|
|
|
|
| 202 |
|
| 203 |
def get_session_list():
|
| 204 |
s = load_all_sessions()
|
| 205 |
+
return list(reversed(list(s.keys()))) if s else ["No saved sessions"]
|
|
|
|
| 206 |
|
| 207 |
def save_session(history, name):
|
| 208 |
if not history: return "Nothing to save", gr.update()
|
|
|
|
| 216 |
def load_session(name):
|
| 217 |
if not name or "No saved" in name: return [], "Select a session"
|
| 218 |
sessions = load_all_sessions()
|
| 219 |
+
return (sessions[name]["messages"], "Loaded: "+name) if name in sessions else ([], "Not found")
|
|
|
|
| 220 |
|
| 221 |
def delete_session(name):
|
| 222 |
if not name or "No saved" in name: return "Select a session", gr.update()
|
|
|
|
| 229 |
|
| 230 |
def new_chat(): return [], "", "New chat started"
|
| 231 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
def get_pubmed_chat(query, n=3):
|
| 233 |
try:
|
| 234 |
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
| 235 |
params={"db":"pubmed","term":query+" AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR CKD)","retmax":n,"retmode":"json","sort":"date","field":"tiab"},timeout=10)
|
| 236 |
ids = r.json()["esearchresult"]["idlist"]
|
| 237 |
+
return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids]) if ids else ""
|
|
|
|
| 238 |
except: return ""
|
| 239 |
|
|
|
|
| 240 |
def research_chat(message, history, chat_model="Llama 3.3 70B (Best)"):
|
| 241 |
+
if not message.strip(): return "", history
|
| 242 |
+
paper_context, paper_results = search_papers(message, n=4)
|
| 243 |
+
|
| 244 |
+
# Use fine-tuned CardioLab model if selected
|
| 245 |
+
if chat_model == "CardioLab Fine-tuned (SJSU)" and CARDIOLAB_MODEL_LOADED:
|
| 246 |
+
answer = answer_with_cardiolab_model(message, paper_context)
|
| 247 |
+
if answer:
|
| 248 |
+
if paper_results:
|
| 249 |
+
unique_papers = list(dict.fromkeys([r["paper"] for r in paper_results]))
|
| 250 |
+
answer += chr(10)+chr(10)+"Sources from SJSU CardioLab papers:"
|
| 251 |
+
for p in unique_papers[:3]:
|
| 252 |
+
answer += chr(10)+" - "+p.replace('.pdf','').replace('_',' ')
|
| 253 |
+
pubmed = get_pubmed_chat(message, n=2)
|
| 254 |
+
if pubmed: answer += chr(10)+"PubMed: "+pubmed
|
| 255 |
+
history.append({"role":"user","content":message})
|
| 256 |
+
history.append({"role":"assistant","content":"[CardioLab Fine-tuned Model] "+answer})
|
| 257 |
+
return "", history
|
| 258 |
+
|
| 259 |
+
# Fall back to Groq models
|
| 260 |
if not GROQ_KEY:
|
| 261 |
history.append({"role":"user","content":message})
|
| 262 |
history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings."})
|
|
|
|
| 264 |
try:
|
| 265 |
model_id = CHAT_MODELS.get(chat_model, "llama-3.3-70b-versatile")
|
| 266 |
client = Groq(api_key=GROQ_KEY)
|
|
|
|
| 267 |
if paper_context:
|
| 268 |
system_prompt = ("You are CardioLab AI for SJSU Biomedical Engineering. "
|
| 269 |
"Answer using SJSU CardioLab research papers below. "
|
| 270 |
+
"Always cite the paper name when using specific data."+chr(10)+chr(10)+
|
| 271 |
"SJSU CARDIOLAB PAPERS:"+chr(10)+paper_context+chr(10)+chr(10)+
|
| 272 |
"ADDITIONAL KNOWLEDGE: "+KNOWHOW)
|
| 273 |
else:
|
|
|
|
| 316 |
history.append({"role":"assistant","content":"Voice error: "+str(e)})
|
| 317 |
return history
|
| 318 |
|
| 319 |
+
def expand_query_ai(query):
|
| 320 |
+
if not GROQ_KEY: return query
|
| 321 |
+
try:
|
| 322 |
+
client = Groq(api_key=GROQ_KEY)
|
| 323 |
+
resp = client.chat.completions.create(model="llama-3.1-8b-instant",
|
| 324 |
+
messages=[{"role":"system","content":"Biomedical PubMed expert. Convert to MeSH terms for heart valves hemodynamics PIV thrombogenicity FSI microfluidics CKD. Return ONLY terms."},
|
| 325 |
+
{"role":"user","content":"Optimize: "+query}],max_tokens=80)
|
| 326 |
+
return resp.choices[0].message.content.strip() or query
|
| 327 |
+
except: return query
|
| 328 |
+
|
| 329 |
+
def quick_search(query, search_model="Llama 3.3 70B (Best)"):
|
| 330 |
+
if not query.strip(): return "Please enter a topic."
|
| 331 |
+
expanded = expand_query_ai(query)
|
| 332 |
+
results = []
|
| 333 |
+
try:
|
| 334 |
+
forced = expanded+" AND (heart valve OR hemodynamics OR microfluidic OR thrombogen OR creatinine OR PIV OR CFD OR CKD)"
|
| 335 |
+
r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
|
| 336 |
+
params={"db":"pubmed","term":forced,"retmax":8,"retmode":"json","sort":"date","field":"tiab"},timeout=12)
|
| 337 |
+
ids = r.json()["esearchresult"]["idlist"]
|
| 338 |
+
if ids:
|
| 339 |
+
r2 = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi",
|
| 340 |
+
params={"db":"pubmed","id":",".join(ids),"retmode":"xml","rettype":"abstract"},timeout=12)
|
| 341 |
+
import xml.etree.ElementTree as ET
|
| 342 |
+
root = ET.fromstring(r2.content)
|
| 343 |
+
for article in root.findall(".//PubmedArticle"):
|
| 344 |
+
try:
|
| 345 |
+
title = article.find(".//ArticleTitle").text or "No title"
|
| 346 |
+
pmid = article.find(".//PMID").text or ""
|
| 347 |
+
year_el = article.find(".//PubDate/Year")
|
| 348 |
+
year = year_el.text if year_el is not None else ""
|
| 349 |
+
results.append({"source":"PubMed","title":str(title),"year":year,"url":"https://pubmed.ncbi.nlm.nih.gov/"+pmid,"citations":"N/A"})
|
| 350 |
+
except: continue
|
| 351 |
+
except: pass
|
| 352 |
+
try:
|
| 353 |
+
r = requests.get("https://api.semanticscholar.org/graph/v1/paper/search",
|
| 354 |
+
params={"query":expanded,"limit":6,"fields":"title,year,url,citationCount"},timeout=12)
|
| 355 |
+
for p in r.json().get("data",[]):
|
| 356 |
+
year = p.get("year",0) or 0
|
| 357 |
+
if int(year) >= 2015:
|
| 358 |
+
results.append({"source":"Scholar","title":p.get("title",""),"year":str(year),"url":p.get("url",""),"citations":str(p.get("citationCount",0))})
|
| 359 |
+
except: pass
|
| 360 |
+
out = "QUERY: "+query+chr(10)+"AI EXPANDED: "+expanded+chr(10)+"="*45+chr(10)+chr(10)
|
| 361 |
+
groups = {"PubMed":[],"Scholar":[]}
|
| 362 |
+
seen = set()
|
| 363 |
+
for r in results:
|
| 364 |
+
key = r["title"][:50].lower()
|
| 365 |
+
if key not in seen and r["url"]:
|
| 366 |
+
seen.add(key); groups[r["source"]].append(r)
|
| 367 |
+
for source, papers in groups.items():
|
| 368 |
+
if not papers: continue
|
| 369 |
+
out += "--- "+source+" ---"+chr(10)
|
| 370 |
+
for p in papers[:8]:
|
| 371 |
+
out += p["title"][:85]+" ("+p["year"]+")"
|
| 372 |
+
if p["citations"] not in ("N/A","","0"): out += " | "+p["citations"]+" citations"
|
| 373 |
+
out += chr(10)+" "+p["url"]+chr(10)+chr(10)
|
| 374 |
+
out += "--- SJSU ScholarWorks ---"+chr(10)
|
| 375 |
+
out += "https://scholarworks.sjsu.edu/do/search/?q="+requests.utils.quote(query)+"&context=6781027"
|
| 376 |
+
return out
|
| 377 |
+
|
| 378 |
def analyze_upad_photo(image):
|
| 379 |
if image is None: return None, "Upload a uPAD photo first."
|
| 380 |
try:
|
|
|
|
| 421 |
ax.plot(xv,df[vc],color="#c1121f",linewidth=2.5,marker="o",markersize=5)
|
| 422 |
ax.fill_between(xv,df[vc],alpha=0.15,color="#c1121f")
|
| 423 |
ax.axhline(y=2.0,color="#f59e0b",linestyle="--",linewidth=2,label="Risk: 2.0 m/s")
|
| 424 |
+
ax.set_ylabel("Velocity (m/s)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 425 |
def ps(ax):
|
| 426 |
if sc2:
|
| 427 |
xp=xv.values if tc else x
|
|
|
|
| 429 |
ax.fill_between(xp,df[sc2],alpha=0.15,color="#0057a8")
|
| 430 |
ax.axhline(y=5,color="#f59e0b",linestyle="--",linewidth=2,label="Caution 5 Pa")
|
| 431 |
ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2,label="High risk 10 Pa")
|
| 432 |
+
ax.set_ylabel("Shear (Pa)",color=ac); ax.legend(fontsize=9,labelcolor=fg,facecolor=pb)
|
| 433 |
def psc(ax):
|
| 434 |
if vc and sc2:
|
| 435 |
s3=ax.scatter(df[vc],df[sc2],c=x,cmap="RdYlGn_r",s=90,edgecolors=fg,linewidth=0.5,zorder=5)
|
| 436 |
cb=plt.colorbar(s3,ax=ax,label="Time"); cb.ax.yaxis.label.set_color(fg); cb.ax.tick_params(colors=ac)
|
| 437 |
ax.axvline(x=2.0,color="#f59e0b",linestyle="--",linewidth=2); ax.axhline(y=10,color="#c1121f",linestyle="--",linewidth=2)
|
| 438 |
+
ax.set_xlabel("Velocity (m/s)",color=ac); ax.set_ylabel("Shear (Pa)",color=ac)
|
| 439 |
def psum(ax):
|
| 440 |
ax.axis("off"); risk=[]
|
| 441 |
st="CLINICAL SUMMARY"+chr(10)+"="*20+chr(10)+chr(10)
|
|
|
|
| 487 |
ax.fill_between(xp,yp,alpha=0.15,color=color)
|
| 488 |
for xi,yi in zip(xp,yp): ax.annotate(str(round(yi,1)),(xi,yi),textcoords="offset points",xytext=(0,10),ha="center",color=fg,fontsize=10,fontweight="bold")
|
| 489 |
ax.axhline(y=lim,color="#f59e0b",linestyle="--",linewidth=2.5,label=ll)
|
| 490 |
+
ax.legend(fontsize=10,labelcolor=fg,facecolor=pb); ax.set_ylabel(yl,color=ac)
|
|
|
|
| 491 |
mv=round(float(np.max(yp)),2)
|
| 492 |
ax.set_title(title+chr(10)+"Max: "+str(mv)+" - "+("HIGH" if mv>lim else "NORMAL"),color=fg,fontweight="bold",fontsize=12)
|
| 493 |
return mk_chart(fn,title,bg,fg,gc,ac,pb)
|
| 494 |
+
i1=mk2(tatc,"#c1121f","TAT (ng/mL)",8,"Normal: 8","TAT"); i2=mk2(pfc,"#0057a8","PF1.2",2.0,"Normal: 2.0","PF1.2")
|
| 495 |
+
i3=mk2(hc,"#2ecc71","Free Hgb (mg/L)",20,"Normal: 20","Free Hemoglobin",bar=True); i4=mk2(plc,"#e8a020","Platelets",150,"Normal>150","Platelets")
|
|
|
|
|
|
|
| 496 |
ai=""
|
| 497 |
if GROQ_KEY:
|
| 498 |
try:
|
| 499 |
client=Groq(api_key=GROQ_KEY)
|
| 500 |
resp=client.chat.completions.create(model="llama-3.3-70b-versatile",
|
| 501 |
+
messages=[{"role":"system","content":"Hematology expert. Give thrombogenicity risk."},
|
| 502 |
+
{"role":"user","content":"TGT:"+chr(10)+df.describe().to_string()[:500]}],max_tokens=250)
|
| 503 |
ai=chr(10)+"AI: "+resp.choices[0].message.content
|
| 504 |
except: pass
|
| 505 |
return i1,i2,i3,i4,"TGT: "+str(len(df))+" rows"+ai
|
|
|
|
| 528 |
r=requests.post(url,headers=headers,json={"inputs":enhanced,"parameters":{"num_inference_steps":8}},timeout=60)
|
| 529 |
if r.status_code==200: return Image.open(io.BytesIO(r.content)),"Generated!",desc
|
| 530 |
except: continue
|
| 531 |
+
return None,"Models busy.",desc
|
| 532 |
except Exception as e: return None,"Error: "+str(e),""
|
| 533 |
|
| 534 |
def piv_manual(v,s,h):
|
| 535 |
+
vr="HIGH-stenosis" if float(v)>2.0 else "NORMAL"
|
| 536 |
+
sr="HIGH-thrombosis" if float(s)>10 else "ELEVATED" if float(s)>5 else "NORMAL"
|
| 537 |
+
return "Velocity: "+str(v)+" m/s — "+vr+chr(10)+"Shear: "+str(s)+" Pa — "+sr+chr(10)+"HR: "+str(h)+" bpm"
|
| 538 |
|
| 539 |
def tgt_manual(t,p,h,pl,tm):
|
| 540 |
risk=sum([float(t)>15,float(p)>2.0,float(h)>50,float(pl)<150])
|
| 541 |
return "TAT:"+str(t)+" PF1.2:"+str(p)+chr(10)+"Hemo:"+str(h)+" Plt:"+str(pl)+chr(10)+"RESULT: "+("HIGH RISK" if risk>=3 else "MODERATE" if risk>=2 else "LOW RISK")
|
| 542 |
|
|
|
|
| 543 |
with gr.Blocks(title="CardioLab AI - SJSU", css=CSS) as demo:
|
| 544 |
gr.HTML(HEADER)
|
| 545 |
|
| 546 |
papers_count = len(set(m["paper"] for m in METADATA)) if PAPERS_LOADED else 0
|
| 547 |
+
model_status = "CardioLab Fine-tuned Model LOADED" if CARDIOLAB_MODEL_LOADED else "Fine-tuned model loading..."
|
| 548 |
+
rag_status = f"RAG: {len(CHUNKS)} chunks from {papers_count} SJSU papers" if PAPERS_LOADED else "RAG: loading..."
|
| 549 |
+
gr.HTML(f'''<div style="background:#1a7340;color:white;text-align:center;padding:7px;font-size:0.82em;font-weight:700;">
|
| 550 |
+
{rag_status} | {model_status} | Select "CardioLab Fine-tuned (SJSU)" in Model dropdown to use your custom model!</div>''')
|
| 551 |
|
| 552 |
with gr.Tabs():
|
| 553 |
with gr.Tab("Chat"):
|
|
|
|
| 565 |
delete_btn = gr.Button("Del", variant="secondary", scale=1)
|
| 566 |
session_status = gr.Textbox(label="", lines=1, interactive=False, container=False)
|
| 567 |
with gr.Column(scale=4):
|
| 568 |
+
chatbot = gr.Chatbot(label="", height=460, show_label=False, container=False)
|
| 569 |
with gr.Row():
|
| 570 |
+
msg_box = gr.Textbox(placeholder="Ask anything — AI searches 16 SJSU papers + PubMed...", label="", lines=2, scale=4, container=False)
|
| 571 |
+
with gr.Column(scale=1, min_width=160):
|
| 572 |
+
chat_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model")
|
| 573 |
send_btn = gr.Button("Send", variant="primary")
|
| 574 |
clear_btn = gr.Button("Clear", variant="secondary")
|
| 575 |
send_btn.click(research_chat, inputs=[msg_box, chatbot, chat_model_dd], outputs=[msg_box, chatbot])
|
|
|
|
| 590 |
voice_clear.click(lambda: [], outputs=voice_chatbot)
|
| 591 |
|
| 592 |
with gr.Tab("Papers"):
|
| 593 |
+
gr.Markdown("### Search PubMed + Semantic Scholar + SJSU ScholarWorks")
|
| 594 |
with gr.Row():
|
| 595 |
+
search_input = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve thrombogenicity hemodynamics", label="Research Topic", scale=3)
|
| 596 |
search_model_dd = gr.Dropdown(choices=list(CHAT_MODELS.keys()), value="Llama 3.3 70B (Best)", label="AI Model", scale=1)
|
| 597 |
+
search_btn = gr.Button("Search", variant="primary", scale=1)
|
| 598 |
+
search_output = gr.Textbox(label="Results", lines=22)
|
| 599 |
search_btn.click(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 600 |
search_input.submit(quick_search, inputs=[search_input, search_model_dd], outputs=search_output)
|
| 601 |
|
|
|
|
| 606 |
piv_btn = gr.Button("Analyze PIV Data", variant="primary")
|
| 607 |
piv_result = gr.Textbox(label="AI Analysis", lines=4)
|
| 608 |
with gr.Row():
|
| 609 |
+
piv_c1=gr.Image(label="Velocity",type="pil"); piv_c2=gr.Image(label="Shear Stress",type="pil")
|
| 610 |
with gr.Row():
|
| 611 |
+
piv_c3=gr.Image(label="Vel vs Shear",type="pil"); piv_c4=gr.Image(label="Clinical Summary",type="pil")
|
| 612 |
piv_btn.click(analyze_piv_csv, inputs=[piv_file,piv_theme], outputs=[piv_c1,piv_c2,piv_c3,piv_c4,piv_result])
|
| 613 |
|
| 614 |
with gr.Tab("TGT CSV"):
|
|
|
|
| 627 |
with gr.Row():
|
| 628 |
with gr.Column():
|
| 629 |
photo_input = gr.Image(label="Upload uPAD Photo", type="numpy", height=260)
|
| 630 |
+
analyze_btn = gr.Button("Analyze uPAD", variant="primary")
|
| 631 |
with gr.Column():
|
| 632 |
photo_img = gr.Image(label="Detection Zone", type="pil", height=260)
|
| 633 |
photo_text = gr.Textbox(label="CKD Result", lines=8)
|
| 634 |
analyze_btn.click(analyze_upad_photo, inputs=photo_input, outputs=[photo_img, photo_text])
|
| 635 |
with gr.Row():
|
| 636 |
r=gr.Number(label="R",value=210); g=gr.Number(label="G",value=140); b=gr.Number(label="B",value=80)
|
| 637 |
+
out3=gr.Textbox(label="Result",lines=3)
|
| 638 |
gr.Button("Analyze RGB",variant="secondary").click(
|
| 639 |
lambda r,g,b:"Creatinine: "+str(max(0,round(0.02*(r-b)-0.5,2)))+" mg/dL"+chr(10)+("Normal" if max(0,round(0.02*(r-b)-0.5,2))<1.2 else "Borderline" if max(0,round(0.02*(r-b)-0.5,2))<1.5 else "CKD"),
|
| 640 |
inputs=[r,g,b],outputs=out3)
|
|
|
|
| 643 |
with gr.Row():
|
| 644 |
img_prompt = gr.Textbox(placeholder="e.g. 27mm bileaflet mechanical heart valve cross section", label="Describe image", lines=2, scale=4)
|
| 645 |
with gr.Column(scale=1):
|
| 646 |
+
img_btn = gr.Button("Generate", variant="primary")
|
| 647 |
img_status = gr.Textbox(label="Status", lines=1)
|
| 648 |
img_desc = gr.Textbox(label="AI Description", lines=2, interactive=False)
|
| 649 |
img_output = gr.Image(label="Generated Image", type="pil", height=400)
|
|
|
|
| 661 |
with gr.Column():
|
| 662 |
t1=gr.Number(label="TAT ng/mL",value=18); t2=gr.Number(label="PF1.2",value=2.5)
|
| 663 |
t3=gr.Number(label="Hemoglobin mg/L",value=60); t4=gr.Number(label="Platelets",value=140)
|
| 664 |
+
t5=gr.Number(label="Time min",value=40); out2=gr.Textbox(label="Result",lines=6)
|
| 665 |
gr.Button("Analyze TGT",variant="primary").click(tgt_manual,inputs=[t1,t2,t3,t4,t5],outputs=out2)
|
| 666 |
|
| 667 |
gr.HTML("""<div style="text-align:center;padding:10px;border-top:1px solid #e5e7eb;background:#f9fafb;">
|
| 668 |
+
<span style="color:#9ca3af;font-size:0.75em;">CardioLab AI v36 | SJSU Biomedical Engineering | Fine-tuned on 16 SJSU Papers | RAG + Custom Model | Inspired by <a href="https://github.com/snap-stanford/Biomni" style="color:#c1121f;">Biomni Stanford</a> | <a href="https://github.com/pranatechsol/Cardio-Lab-Ai" style="color:#0057a8;">GitHub</a> | Apache 2.0 | $0 Cost</span></div>""")
|
| 669 |
|
| 670 |
demo.launch()
|