Saicharan21 commited on
Commit
cc951ab
Β·
verified Β·
1 Parent(s): 883ca29

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +254 -125
app.py CHANGED
@@ -1,14 +1,20 @@
1
  import gradio as gr
2
  import os, requests, io
 
3
  from groq import Groq
4
  from PIL import Image
5
 
6
- GROQ_KEY = os.environ.get('GROQ_API_KEY', '')
7
- HF_TOKEN = os.environ.get('HF_TOKEN', '')
8
 
9
- KNOWHOW = 'MCL: Sylgard 184 PDMS 10:1 ratio 48hr cure green laser PIV 70bpm 5L/min. TGT: Arduino Uno Stepper Motor 150mL blood sampled at 0 20 40 60min measures TAT PF1.2 hemolysis platelets. uPAD: Jaffe reaction creatinine plus picric acid gives orange-red color normal 0.6-1.2 mg/dL CKD above 1.5. MHV: 27mm SJM Regent bileaflet also trileaflet monoleaflet pediatric.'
 
 
 
 
 
10
 
11
- CSS = '''
12
  body, .gradio-container { background: #f0f4f8 !important; }
13
  .tab-nav { background: #ffffff !important; border-bottom: 2px solid #e2e8f0 !important; padding: 0 10px !important; }
14
  .tab-nav button { background: #f7fafc !important; color: #2d3748 !important; border: 1px solid #e2e8f0 !important; border-radius: 8px 8px 0 0 !important; padding: 12px 18px !important; font-weight: 600 !important; margin-top: 6px !important; }
@@ -20,204 +26,327 @@ textarea, input[type=number] { background: #f7fafc !important; color: #1a202c !i
20
  .message.user { background: linear-gradient(135deg, #e63946, #c1121f) !important; color: white !important; }
21
  .message.bot { background: #ebf4ff !important; color: #1a202c !important; border: 1px solid #bee3f8 !important; }
22
  label span { color: #2b6cb0 !important; font-weight: 600 !important; font-size: 0.85em !important; text-transform: uppercase !important; }
23
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  def get_pubmed(query, n=5):
26
  try:
27
- r = requests.get('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi',
28
- params={'db':'pubmed','term':query+' AND (mechanical heart valve OR microfluidic OR CKD OR thrombogenicity)','retmax':n,'retmode':'json','sort':'date'},timeout=10)
29
- ids = r.json()['esearchresult']['idlist']
30
- if not ids: return ''
31
- return chr(10).join(['https://pubmed.ncbi.nlm.nih.gov/'+i for i in ids])
32
- except: return ''
33
 
34
  def get_scholar(query, n=5):
35
  try:
36
- r = requests.get('https://api.semanticscholar.org/graph/v1/paper/search',
37
- params={'query':query+' biomedical','limit':n,'fields':'title,year,url,citationCount'},timeout=10)
38
- papers = r.json().get('data',[])
39
  out = []
40
  for p in papers:
41
- url = p.get('url','')
42
- if url: out.append(p.get('title','')[:80]+' ('+str(p.get('year',''))+') - '+str(p.get('citationCount',0))+' citations'+chr(10)+' '+url)
43
  return chr(10).join(out)
44
- except: return ''
45
 
46
  def quick_search(query):
47
- if not query.strip(): return 'Please enter a research topic.'
48
  pubmed = get_pubmed(query, n=8)
49
  scholar = get_scholar(query, n=5)
50
- return 'PUBMED RESULTS:'+chr(10)+pubmed+chr(10)+chr(10)+'SEMANTIC SCHOLAR:'+chr(10)+scholar
51
 
52
  def research_chat(message, history):
53
  if not GROQ_KEY:
54
- history.append({'role':'user','content':message})
55
- history.append({'role':'assistant','content':'Error: Add GROQ_API_KEY to Space Settings Secrets.'})
56
- return '', history
57
  try:
58
  client = Groq(api_key=GROQ_KEY)
59
  pubmed = get_pubmed(message, n=3)
60
- msgs = [{'role':'system','content':'You are CardioLab AI. Expert in MHV MCL PIV TGT uPAD CKD FSI. Remember full conversation. Never invent URLs. '+KNOWHOW}]
61
  for item in history:
62
- if isinstance(item, dict): msgs.append({'role':item['role'],'content':item['content']})
63
- msgs.append({'role':'user','content':message})
64
- resp = client.chat.completions.create(model='llama-3.3-70b-versatile',messages=msgs,max_tokens=700)
65
  answer = resp.choices[0].message.content
66
- if pubmed: answer += chr(10)+chr(10)+'PUBMED LINKS:'+chr(10)+pubmed
67
- history.append({'role':'user','content':message})
68
- history.append({'role':'assistant','content':answer})
69
- return '', history
70
  except Exception as e:
71
- history.append({'role':'user','content':message})
72
- history.append({'role':'assistant','content':'Error: '+str(e)})
73
- return '', history
74
 
75
  def voice_chat(audio, history):
76
  if audio is None:
77
- history.append({'role':'assistant','content':'Please record your question first.'})
78
  return history
79
  try:
80
  client = Groq(api_key=GROQ_KEY)
81
- with open(audio, 'rb') as f:
82
- tx = client.audio.transcriptions.create(file=('audio.wav', f, 'audio/wav'), model='whisper-large-v3')
83
  text = tx.text
84
- msgs = [{'role':'system','content':'You are CardioLab AI. '+KNOWHOW}]
85
  for item in history:
86
- if isinstance(item, dict): msgs.append({'role':item['role'],'content':item['content']})
87
- msgs.append({'role':'user','content':text})
88
- resp = client.chat.completions.create(model='llama-3.3-70b-versatile',messages=msgs,max_tokens=500)
89
- history.append({'role':'user','content':'[Voice] '+text})
90
- history.append({'role':'assistant','content':resp.choices[0].message.content})
91
  return history
92
  except Exception as e:
93
- history.append({'role':'assistant','content':'Voice error: '+str(e)})
94
  return history
95
 
96
  def generate_image(prompt):
97
- if not prompt.strip(): return None, 'Please enter a description.', ''
98
- if not HF_TOKEN: return None, 'Error: Add HF_TOKEN to Space Settings Secrets.', ''
99
  try:
100
  enhanced = prompt
101
- description = ''
102
  if GROQ_KEY:
103
  try:
104
  client = Groq(api_key=GROQ_KEY)
105
  msgs = [
106
- {'role':'system','content':'You are a biomedical visualization expert for SJSU CardioLab. Do two things: 1) Write a clear 2-3 sentence description of what the image will show. 2) Write a detailed image generation prompt. Format your response as: DESCRIPTION: [your description here] PROMPT: [your detailed prompt here]'},
107
- {'role':'user','content':'Create image for: ' + prompt + '. CardioLab context: 27mm SJM Regent bileaflet mechanical heart valve, Sylgard 184 transparent silicone MCL, green laser PIV system, Arduino Uno stepper motor TGT, Whatman paper uPAD microfluidic device, Jaffe reaction orange-red color for CKD creatinine detection.'}
108
  ]
109
- resp = client.chat.completions.create(model='llama-3.3-70b-versatile', messages=msgs, max_tokens=300)
110
  full_resp = resp.choices[0].message.content
111
- if 'DESCRIPTION:' in full_resp and 'PROMPT:' in full_resp:
112
- description = full_resp.split('DESCRIPTION:')[1].split('PROMPT:')[0].strip()
113
- enhanced = full_resp.split('PROMPT:')[1].strip()
114
  else:
115
  description = full_resp[:200]
116
- enhanced = 'Highly detailed scientific biomedical illustration: ' + prompt + ', professional medical diagram, photorealistic, high quality, labeled'
117
  except: enhanced = prompt
118
- headers = {'Authorization': 'Bearer ' + HF_TOKEN, 'Content-Type': 'application/json'}
119
- payload = {'inputs': enhanced, 'parameters': {'num_inference_steps': 8, 'guidance_scale': 7.5}}
120
  models = [
121
- 'https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell',
122
- 'https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-base-1.0',
123
  ]
124
  for model_url in models:
125
  try:
126
- r = requests.post(model_url, headers=headers, json=payload, timeout=60)
127
  if r.status_code == 200:
128
  img = Image.open(io.BytesIO(r.content))
129
- return img, 'Image generated successfully!', description
130
  except: continue
131
- return None, 'Models busy. Try again in 30 seconds.', description
132
  except Exception as e:
133
- return None, 'Error: ' + str(e), ''
134
 
135
  def piv_tool(velocity, shear, hr):
136
- v = 'HIGH - stenosis risk' if float(velocity)>2.0 else 'NORMAL'
137
- s = 'HIGH - thrombosis risk' if float(shear)>10 else 'ELEVATED' if float(shear)>5 else 'NORMAL'
138
- hr_s = 'ABNORMAL' if float(hr)<60 or float(hr)>100 else 'NORMAL'
139
- return 'PIV RESULTS'+chr(10)+'Velocity: '+str(velocity)+' m/s - '+v+chr(10)+'Shear: '+str(shear)+' Pa - '+s+chr(10)+'HR: '+str(hr)+' bpm - '+hr_s
 
 
 
140
 
141
  def tgt_tool(tat,pf12,hemo,platelets,time):
142
  risk=sum([float(tat)>15,float(pf12)>2.0,float(hemo)>50,float(platelets)<150])
143
- r='HIGH THROMBOGENIC RISK' if risk>=3 else 'MODERATE RISK' if risk>=2 else 'LOW RISK'
144
- return 'TGT ANALYSIS'+chr(10)+'Time: '+str(time)+' min'+chr(10)+'TAT: '+str(tat)+(' HIGH' if float(tat)>15 else ' NORMAL')+chr(10)+'PF1.2: '+str(pf12)+(' HIGH' if float(pf12)>2.0 else ' NORMAL')+chr(10)+'Hemo: '+str(hemo)+(' HIGH' if float(hemo)>50 else ' NORMAL')+chr(10)+'Platelets: '+str(platelets)+(' LOW' if float(platelets)<150 else ' NORMAL')+chr(10)+'RESULT: '+r
 
 
 
 
 
 
145
 
146
- def upad_tool(r,g,b):
147
- c=max(0,round(0.02*(float(r)-float(b))-0.5,2))
148
- s='Normal' if c<1.2 else 'Borderline' if c<1.5 else 'Stage 2 CKD' if c<3.0 else 'Stage 3-4 CKD' if c<6.0 else 'Stage 5 CKD'
149
- return 'uPAD RESULT'+chr(10)+'Creatinine: '+str(c)+' mg/dL'+chr(10)+'CKD Stage: '+s
150
 
151
- with gr.Blocks(title='CardioLab AI', css=CSS) as demo:
152
- gr.HTML('<div style="background:linear-gradient(135deg,#1a237e,#b71c1c);padding:25px;text-align:center;border-radius:12px 12px 0 0 "><div style="font-size:2.8em;font-weight:900;color:#fff;letter-spacing:3px">CardioLab AI</div></div>')
153
  with gr.Tabs():
154
- with gr.Tab('Chat'):
155
- chatbot = gr.Chatbot(label='', height=450)
 
156
  with gr.Row():
157
- msg_box = gr.Textbox(placeholder='Ask anything about CardioLab research...', label='', lines=2, scale=4)
158
  with gr.Column(scale=1, min_width=100):
159
- send_btn = gr.Button('Send', variant='primary')
160
- clear_btn = gr.Button('Clear', variant='secondary')
161
  send_btn.click(research_chat, inputs=[msg_box, chatbot], outputs=[msg_box, chatbot])
162
  msg_box.submit(research_chat, inputs=[msg_box, chatbot], outputs=[msg_box, chatbot])
163
- clear_btn.click(lambda: ([], ''), outputs=[chatbot, msg_box])
164
- with gr.Tab('Voice'):
165
- gr.Markdown('### Speak your question - Groq Whisper AI')
166
- voice_chatbot = gr.Chatbot(label='', height=350)
167
- audio_input = gr.Audio(sources=['microphone'], type='filepath', label='Record Question')
 
168
  with gr.Row():
169
- voice_btn = gr.Button('Ask by Voice', variant='primary')
170
- voice_clear = gr.Button('Clear', variant='secondary')
171
  voice_btn.click(voice_chat, inputs=[audio_input, voice_chatbot], outputs=voice_chatbot)
172
  voice_clear.click(lambda: [], outputs=voice_chatbot)
173
- with gr.Tab('Papers'):
 
174
  with gr.Row():
175
- search_input = gr.Textbox(placeholder='e.g. mechanical heart valve thrombogenicity', label='Research Topic', scale=4)
176
- search_btn = gr.Button('Search', variant='primary', scale=1)
177
- search_output = gr.Textbox(label='Verified Results', lines=18)
178
  search_btn.click(quick_search, inputs=search_input, outputs=search_output)
179
  search_input.submit(quick_search, inputs=search_input, outputs=search_output)
180
- with gr.Tab('AI Image Generator'):
181
- gr.Markdown('### Real AI Image Generation using FLUX.1 - Free HuggingFace Model')
182
- gr.Markdown('**Describe any biomedical image and AI will generate it**')
 
 
183
  with gr.Row():
184
- img_prompt = gr.Textbox(
185
- placeholder='e.g. mechanical heart valve bileaflet design | uPAD microfluidic device | blood flow through valve | Arduino circuit for TGT',
186
- label='Describe the image you want',
187
- lines=3,
188
- scale=4
189
- )
190
  with gr.Column(scale=1):
191
- img_btn = gr.Button('Generate Image', variant='primary')
192
- img_status = gr.Textbox(label='Status', lines=2)
193
- img_desc = gr.Textbox(label='AI Description - What will be generated', lines=3, interactive=False)
194
- img_output = gr.Image(label='Generated Image', type='pil', height=500)
195
- img_btn.click(generate_image, inputs=img_prompt, outputs=[img_output, img_status, img_desc])
196
- gr.Markdown('**Try:** `27mm bileaflet mechanical heart valve` | `microfluidic paper device for CKD testing` | `blood flow visualization PIV` | `Arduino circuit with stepper motor`')
197
- with gr.Tab('PIV'):
 
 
 
 
 
 
198
  with gr.Row():
199
  with gr.Column():
200
- v=gr.Number(label='Max Velocity m/s', value=1.8)
201
- s=gr.Number(label='Wall Shear Stress Pa', value=6.5)
202
- h=gr.Number(label='Heart Rate bpm', value=72)
203
- piv_out=gr.Textbox(label='Result', lines=5)
204
- gr.Button('Analyze PIV', variant='primary').click(piv_tool,inputs=[v,s,h],outputs=piv_out)
205
- with gr.Tab('TGT'):
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  with gr.Row():
207
  with gr.Column():
208
- t1=gr.Number(label='TAT ng/mL', value=18)
209
- t2=gr.Number(label='PF1.2 nmol/L', value=2.5)
210
- t3=gr.Number(label='Free Hemoglobin mg/L', value=60)
211
- t4=gr.Number(label='Platelet Count', value=140)
212
- t5=gr.Number(label='Time minutes', value=40)
213
- out2=gr.Textbox(label='Result', lines=8)
214
- gr.Button('Analyze TGT', variant='primary').click(tgt_tool,inputs=[t1,t2,t3,t4,t5],outputs=out2)
215
- with gr.Tab('uPAD'):
216
  with gr.Row():
217
  with gr.Column():
218
- r=gr.Number(label='R value', value=210)
219
- g=gr.Number(label='G value', value=140)
220
- b=gr.Number(label='B value', value=80)
221
- out3=gr.Textbox(label='Result', lines=5)
222
- gr.Button('Analyze uPAD', variant='primary').click(upad_tool,inputs=[r,g,b],outputs=out3)
223
- demo.launch()
 
 
 
 
1
  import gradio as gr
2
  import os, requests, io
3
+ import numpy as np
4
  from groq import Groq
5
  from PIL import Image
6
 
7
+ GROQ_KEY = os.environ.get("GROQ_API_KEY", "")
8
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
9
 
10
+ KNOWHOW = ("SJSU CardioLab: "
11
+ "MCL: Sylgard 184 PDMS 10:1 ratio 48hr cure green laser PIV 70bpm 5L/min. "
12
+ "TGT: Arduino Uno Stepper Motor 150mL blood sampled at 0 20 40 60min measures TAT PF1.2 hemolysis platelets. "
13
+ "uPAD: Jaffe reaction creatinine plus picric acid gives orange-red color normal 0.6-1.2 mg/dL CKD above 1.5. "
14
+ "MHV: 27mm SJM Regent bileaflet also trileaflet monoleaflet pediatric. "
15
+ "Equipment: Heska HT5 hematology analyzer time-resolved PIV Tygon tubing Arduino Uno.")
16
 
17
+ CSS = """
18
  body, .gradio-container { background: #f0f4f8 !important; }
19
  .tab-nav { background: #ffffff !important; border-bottom: 2px solid #e2e8f0 !important; padding: 0 10px !important; }
20
  .tab-nav button { background: #f7fafc !important; color: #2d3748 !important; border: 1px solid #e2e8f0 !important; border-radius: 8px 8px 0 0 !important; padding: 12px 18px !important; font-weight: 600 !important; margin-top: 6px !important; }
 
26
  .message.user { background: linear-gradient(135deg, #e63946, #c1121f) !important; color: white !important; }
27
  .message.bot { background: #ebf4ff !important; color: #1a202c !important; border: 1px solid #bee3f8 !important; }
28
  label span { color: #2b6cb0 !important; font-weight: 600 !important; font-size: 0.85em !important; text-transform: uppercase !important; }
29
+ """
30
+
31
+ def analyze_upad_photo(image):
32
+ if image is None:
33
+ return None, "Please upload a uPAD photo first."
34
+ try:
35
+ img = Image.fromarray(image) if not isinstance(image, Image.Image) else image
36
+ img_array = np.array(img)
37
+ h, w = img_array.shape[:2]
38
+
39
+ # Find the detection zone - center 30% of image
40
+ # This is where the Jaffe reaction orange-red color appears
41
+ y1 = int(h * 0.35)
42
+ y2 = int(h * 0.65)
43
+ x1 = int(w * 0.35)
44
+ x2 = int(w * 0.65)
45
+ zone = img_array[y1:y2, x1:x2]
46
+
47
+ # Extract RGB from detection zone
48
+ R = float(np.mean(zone[:,:,0]))
49
+ G = float(np.mean(zone[:,:,1]))
50
+ B = float(np.mean(zone[:,:,2]))
51
+
52
+ # Jaffe reaction: orange-red color = high R, low B
53
+ # Higher R-B score = more creatinine
54
+ orange_score = R - B
55
+
56
+ # Calibrated formula for Jaffe reaction uPAD
57
+ # Based on: orange-red color intensity maps to creatinine concentration
58
+ creatinine = max(0, round(0.018 * orange_score - 0.3, 2))
59
+
60
+ # CKD Staging
61
+ if creatinine < 1.2:
62
+ stage = "Normal"
63
+ stage_color = "GREEN"
64
+ action = "No CKD detected. Continue monitoring annually."
65
+ elif creatinine < 1.5:
66
+ stage = "Borderline"
67
+ stage_color = "YELLOW"
68
+ action = "Borderline range. Repeat test in 3 months. Consult physician."
69
+ elif creatinine < 3.0:
70
+ stage = "Stage 2 CKD"
71
+ stage_color = "ORANGE"
72
+ action = "Stage 2 CKD detected. Consult nephrologist. Confirm with Heska Element HT5."
73
+ elif creatinine < 6.0:
74
+ stage = "Stage 3-4 CKD"
75
+ stage_color = "RED"
76
+ action = "Advanced CKD. Immediate medical consultation required."
77
+ else:
78
+ stage = "Stage 5 CKD"
79
+ stage_color = "CRITICAL"
80
+ action = "Kidney failure range. Emergency medical care needed."
81
+
82
+ # Draw analysis box on image
83
+ result_img = img.copy()
84
+ import PIL.ImageDraw as ImageDraw
85
+ draw = ImageDraw.Draw(result_img)
86
+
87
+ # Draw detection zone box in green
88
+ draw.rectangle([x1, y1, x2, y2], outline=(0, 255, 0), width=3)
89
+ draw.rectangle([x1-1, y1-1, x2+1, y2+1], outline=(0, 200, 0), width=1)
90
+
91
+ result = (
92
+ "uPAD PHOTO ANALYSIS RESULTS" + chr(10) +
93
+ "━━━━━━━━━━━━━━━━━━━━━━━━━━━" + chr(10) +
94
+ "DETECTION ZONE (center 30%):" + chr(10) +
95
+ " R (Red): " + str(round(R, 1)) + chr(10) +
96
+ " G (Green): " + str(round(G, 1)) + chr(10) +
97
+ " B (Blue): " + str(round(B, 1)) + chr(10) +
98
+ " Orange Score (R-B): " + str(round(orange_score, 1)) + chr(10) +
99
+ "━━━━━━━━━━━━━━━━━━━━━━━━━━━" + chr(10) +
100
+ "CREATININE: " + str(creatinine) + " mg/dL" + chr(10) +
101
+ "CKD STAGE: " + stage + " [" + stage_color + "]" + chr(10) +
102
+ "━━━━━━━━━━━━━━━━━━━━━━━━━━━" + chr(10) +
103
+ "ACTION: " + action + chr(10) + chr(10) +
104
+ "Normal range: 0.6-1.2 mg/dL" + chr(10) +
105
+ "Confirm results with: Heska Element HT5" + chr(10) +
106
+ "Method: Jaffe Reaction (picric acid)"
107
+ )
108
+
109
+ return result_img, result
110
+
111
+ except Exception as e:
112
+ return None, "Error analyzing image: " + str(e)
113
+
114
+ def analyze_upad_manual(r, g, b):
115
+ c = max(0, round(0.02*(float(r)-float(b))-0.5, 2))
116
+ if c < 1.2: s = "Normal - No CKD"
117
+ elif c < 1.5: s = "Borderline - Monitor"
118
+ elif c < 3.0: s = "Stage 2 CKD"
119
+ elif c < 6.0: s = "Stage 3-4 CKD"
120
+ else: s = "Stage 5 CKD - Kidney Failure"
121
+ return ("uPAD MANUAL ANALYSIS" + chr(10) +
122
+ "━━━━━━━━━━━━━━━━━━━━" + chr(10) +
123
+ "RGB: R=" + str(r) + " G=" + str(g) + " B=" + str(b) + chr(10) +
124
+ "Creatinine: " + str(c) + " mg/dL" + chr(10) +
125
+ "CKD Stage: " + s + chr(10) +
126
+ "Confirm with: Heska Element HT5")
127
 
128
  def get_pubmed(query, n=5):
129
  try:
130
+ r = requests.get("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi",
131
+ params={"db":"pubmed","term":query+" AND (mechanical heart valve OR microfluidic OR CKD OR thrombogenicity)","retmax":n,"retmode":"json","sort":"date"},timeout=10)
132
+ ids = r.json()["esearchresult"]["idlist"]
133
+ if not ids: return ""
134
+ return chr(10).join(["https://pubmed.ncbi.nlm.nih.gov/"+i for i in ids])
135
+ except: return ""
136
 
137
  def get_scholar(query, n=5):
138
  try:
139
+ r = requests.get("https://api.semanticscholar.org/graph/v1/paper/search",
140
+ params={"query":query+" biomedical","limit":n,"fields":"title,year,url,citationCount"},timeout=10)
141
+ papers = r.json().get("data",[])
142
  out = []
143
  for p in papers:
144
+ url = p.get("url","")
145
+ if url: out.append(p.get("title","")[:80]+" ("+str(p.get("year",""))+") - "+str(p.get("citationCount",0))+" citations"+chr(10)+" "+url)
146
  return chr(10).join(out)
147
+ except: return ""
148
 
149
  def quick_search(query):
150
+ if not query.strip(): return "Please enter a research topic."
151
  pubmed = get_pubmed(query, n=8)
152
  scholar = get_scholar(query, n=5)
153
+ return "PUBMED RESULTS:"+chr(10)+pubmed+chr(10)+chr(10)+"SEMANTIC SCHOLAR:"+chr(10)+scholar
154
 
155
  def research_chat(message, history):
156
  if not GROQ_KEY:
157
+ history.append({"role":"user","content":message})
158
+ history.append({"role":"assistant","content":"Error: Add GROQ_API_KEY to Space Settings Secrets."})
159
+ return "", history
160
  try:
161
  client = Groq(api_key=GROQ_KEY)
162
  pubmed = get_pubmed(message, n=3)
163
+ msgs = [{"role":"system","content":"You are CardioLab AI. Expert in MHV MCL PIV TGT uPAD CKD FSI. Remember full conversation. Never invent URLs. "+KNOWHOW}]
164
  for item in history:
165
+ if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
166
+ msgs.append({"role":"user","content":message})
167
+ resp = client.chat.completions.create(model="llama-3.3-70b-versatile",messages=msgs,max_tokens=700)
168
  answer = resp.choices[0].message.content
169
+ if pubmed: answer += chr(10)+chr(10)+"PUBMED LINKS:"+chr(10)+pubmed
170
+ history.append({"role":"user","content":message})
171
+ history.append({"role":"assistant","content":answer})
172
+ return "", history
173
  except Exception as e:
174
+ history.append({"role":"user","content":message})
175
+ history.append({"role":"assistant","content":"Error: "+str(e)})
176
+ return "", history
177
 
178
  def voice_chat(audio, history):
179
  if audio is None:
180
+ history.append({"role":"assistant","content":"Please record your question first."})
181
  return history
182
  try:
183
  client = Groq(api_key=GROQ_KEY)
184
+ with open(audio, "rb") as f:
185
+ tx = client.audio.transcriptions.create(file=("audio.wav", f, "audio/wav"), model="whisper-large-v3")
186
  text = tx.text
187
+ msgs = [{"role":"system","content":"You are CardioLab AI. "+KNOWHOW}]
188
  for item in history:
189
+ if isinstance(item, dict): msgs.append({"role":item["role"],"content":item["content"]})
190
+ msgs.append({"role":"user","content":text})
191
+ resp = client.chat.completions.create(model="llama-3.3-70b-versatile",messages=msgs,max_tokens=500)
192
+ history.append({"role":"user","content":"[Voice] "+text})
193
+ history.append({"role":"assistant","content":resp.choices[0].message.content})
194
  return history
195
  except Exception as e:
196
+ history.append({"role":"assistant","content":"Voice error: "+str(e)})
197
  return history
198
 
199
  def generate_image(prompt):
200
+ if not prompt.strip(): return None, "Please enter a description.", ""
201
+ if not HF_TOKEN: return None, "Error: Add HF_TOKEN to Space Settings Secrets.", ""
202
  try:
203
  enhanced = prompt
204
+ description = ""
205
  if GROQ_KEY:
206
  try:
207
  client = Groq(api_key=GROQ_KEY)
208
  msgs = [
209
+ {"role":"system","content":"You are a biomedical visualization expert for SJSU CardioLab. Do two things: 1) Write a clear 2-3 sentence description of what the image will show. 2) Write a detailed image generation prompt. Format: DESCRIPTION: [description] PROMPT: [prompt]"},
210
+ {"role":"user","content":"Create image for: "+prompt+". CardioLab context: 27mm SJM Regent bileaflet mechanical heart valve, Sylgard 184 transparent silicone MCL, green laser PIV, Arduino Uno stepper motor TGT, Whatman paper uPAD microfluidic device, Jaffe reaction orange-red color CKD creatinine."}
211
  ]
212
+ resp = client.chat.completions.create(model="llama-3.3-70b-versatile",messages=msgs,max_tokens=300)
213
  full_resp = resp.choices[0].message.content
214
+ if "DESCRIPTION:" in full_resp and "PROMPT:" in full_resp:
215
+ description = full_resp.split("DESCRIPTION:")[1].split("PROMPT:")[0].strip()
216
+ enhanced = full_resp.split("PROMPT:")[1].strip()
217
  else:
218
  description = full_resp[:200]
219
+ enhanced = "Highly detailed scientific biomedical illustration: "+prompt+", professional medical diagram, photorealistic, high quality, labeled"
220
  except: enhanced = prompt
221
+ headers = {"Authorization":"Bearer "+HF_TOKEN,"Content-Type":"application/json"}
222
+ payload = {"inputs":enhanced,"parameters":{"num_inference_steps":8,"guidance_scale":7.5}}
223
  models = [
224
+ "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell",
225
+ "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-base-1.0",
226
  ]
227
  for model_url in models:
228
  try:
229
+ r = requests.post(model_url,headers=headers,json=payload,timeout=60)
230
  if r.status_code == 200:
231
  img = Image.open(io.BytesIO(r.content))
232
+ return img, "Image generated!", description
233
  except: continue
234
+ return None, "Models busy. Try again in 30 seconds.", description
235
  except Exception as e:
236
+ return None, "Error: "+str(e), ""
237
 
238
  def piv_tool(velocity, shear, hr):
239
+ v = "HIGH - stenosis risk" if float(velocity)>2.0 else "NORMAL"
240
+ s = "HIGH - thrombosis risk" if float(shear)>10 else "ELEVATED" if float(shear)>5 else "NORMAL"
241
+ hr_s = "ABNORMAL" if float(hr)<60 or float(hr)>100 else "NORMAL"
242
+ return ("PIV ANALYSIS RESULTS"+chr(10)+"━━━━━━━━━━━━━━━━━━━━"+chr(10)+
243
+ "Velocity: "+str(velocity)+" m/s β†’ "+v+chr(10)+
244
+ "Shear: "+str(shear)+" Pa β†’ "+s+chr(10)+
245
+ "Heart Rate: "+str(hr)+" bpm β†’ "+hr_s)
246
 
247
  def tgt_tool(tat,pf12,hemo,platelets,time):
248
  risk=sum([float(tat)>15,float(pf12)>2.0,float(hemo)>50,float(platelets)<150])
249
+ r="HIGH THROMBOGENIC RISK" if risk>=3 else "MODERATE RISK" if risk>=2 else "LOW RISK"
250
+ return ("TGT BLOOD ANALYSIS"+chr(10)+"━━━━━━━━━━━━━━━━━━━━"+chr(10)+
251
+ "Time: "+str(time)+" min"+chr(10)+
252
+ "TAT: "+str(tat)+(" HIGH" if float(tat)>15 else " NORMAL")+chr(10)+
253
+ "PF1.2: "+str(pf12)+(" HIGH" if float(pf12)>2.0 else " NORMAL")+chr(10)+
254
+ "Hemo: "+str(hemo)+(" HIGH" if float(hemo)>50 else " NORMAL")+chr(10)+
255
+ "Platelets: "+str(platelets)+(" LOW" if float(platelets)<150 else " NORMAL")+chr(10)+
256
+ "━━━━━━━━━━━━━━━━━━━━"+chr(10)+"OVERALL: "+r)
257
 
258
+ with gr.Blocks(title="CardioLab AI", css=CSS) as demo:
259
+ gr.HTML('''<div style="background:linear-gradient(135deg,#1a237e,#b71c1c);padding:25px;text-align:center;border-radius:12px 12px 0 0"><div style="font-size:2.8em;font-weight:900;color:#fff;letter-spacing:3px">CardioLab AI</div></div>''')
 
 
260
 
 
 
261
  with gr.Tabs():
262
+
263
+ with gr.Tab("Chat"):
264
+ chatbot = gr.Chatbot(label="", height=450)
265
  with gr.Row():
266
+ msg_box = gr.Textbox(placeholder="Ask anything about CardioLab research...", label="", lines=2, scale=4)
267
  with gr.Column(scale=1, min_width=100):
268
+ send_btn = gr.Button("Send", variant="primary")
269
+ clear_btn = gr.Button("Clear", variant="secondary")
270
  send_btn.click(research_chat, inputs=[msg_box, chatbot], outputs=[msg_box, chatbot])
271
  msg_box.submit(research_chat, inputs=[msg_box, chatbot], outputs=[msg_box, chatbot])
272
+ clear_btn.click(lambda: ([], ""), outputs=[chatbot, msg_box])
273
+
274
+ with gr.Tab("Voice"):
275
+ gr.Markdown("### Speak your question - Groq Whisper AI")
276
+ voice_chatbot = gr.Chatbot(label="", height=350)
277
+ audio_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Question")
278
  with gr.Row():
279
+ voice_btn = gr.Button("Ask by Voice", variant="primary")
280
+ voice_clear = gr.Button("Clear", variant="secondary")
281
  voice_btn.click(voice_chat, inputs=[audio_input, voice_chatbot], outputs=voice_chatbot)
282
  voice_clear.click(lambda: [], outputs=voice_chatbot)
283
+
284
+ with gr.Tab("Papers"):
285
  with gr.Row():
286
+ search_input = gr.Textbox(placeholder="e.g. mechanical heart valve thrombogenicity", label="Research Topic", scale=4)
287
+ search_btn = gr.Button("Search", variant="primary", scale=1)
288
+ search_output = gr.Textbox(label="Verified Results", lines=18)
289
  search_btn.click(quick_search, inputs=search_input, outputs=search_output)
290
  search_input.submit(quick_search, inputs=search_input, outputs=search_output)
291
+
292
+ with gr.Tab("uPAD Photo"):
293
+ gr.Markdown("### Upload uPAD Photo β€” AI reads color automatically and gives instant CKD diagnosis")
294
+ gr.Markdown("**How it works:** AI finds the detection zone in center of image, extracts RGB color from Jaffe reaction area, calculates creatinine level, gives CKD stage")
295
+ gr.Markdown("**Supported:** Photo from phone camera, scanned image, or microscope image of uPAD test strip")
296
  with gr.Row():
 
 
 
 
 
 
297
  with gr.Column(scale=1):
298
+ photo_input = gr.Image(label="Upload uPAD Photo", type="numpy", height=300)
299
+ analyze_btn = gr.Button("Analyze uPAD Photo", variant="primary")
300
+ gr.Markdown("**Tips for best results:**")
301
+ gr.Markdown("- Take photo in good lighting")
302
+ gr.Markdown("- Keep uPAD flat and centered")
303
+ gr.Markdown("- Detection zone is center 30% of image")
304
+ with gr.Column(scale=1):
305
+ photo_result_img = gr.Image(label="Analyzed Image (green box = detection zone)", type="pil", height=300)
306
+ photo_result_text = gr.Textbox(label="CKD Analysis Result", lines=16)
307
+ analyze_btn.click(analyze_upad_photo, inputs=photo_input, outputs=[photo_result_img, photo_result_text])
308
+
309
+ with gr.Tab("uPAD Manual"):
310
+ gr.Markdown("### Enter RGB values manually if you already measured them")
311
  with gr.Row():
312
  with gr.Column():
313
+ r=gr.Number(label="R value", value=210, info="Range: 0-255")
314
+ g=gr.Number(label="G value", value=140, info="Range: 0-255")
315
+ b=gr.Number(label="B value", value=80, info="Range: 0-255")
316
+ out3=gr.Textbox(label="Result", lines=6)
317
+ gr.Button("Analyze uPAD", variant="primary").click(analyze_upad_manual,inputs=[r,g,b],outputs=out3)
318
+
319
+ with gr.Tab("AI Image"):
320
+ gr.Markdown("### Real AI Image Generation using FLUX.1")
321
+ with gr.Row():
322
+ img_prompt = gr.Textbox(placeholder="e.g. bileaflet mechanical heart valve | uPAD microfluidic device | Arduino TGT circuit", label="Describe the image", lines=3, scale=4)
323
+ with gr.Column(scale=1):
324
+ img_btn = gr.Button("Generate Image", variant="primary")
325
+ img_status = gr.Textbox(label="Status", lines=2)
326
+ img_desc = gr.Textbox(label="AI Description", lines=3, interactive=False)
327
+ img_output = gr.Image(label="Generated Image", type="pil", height=450)
328
+ img_btn.click(generate_image, inputs=img_prompt, outputs=[img_output, img_status, img_desc])
329
+
330
+ with gr.Tab("PIV"):
331
+ gr.Markdown("### Analyze PIV flow data from Mock Circulatory Loop")
332
  with gr.Row():
333
  with gr.Column():
334
+ v=gr.Number(label="Max Velocity m/s", value=1.8, info="Normal: 0.5-2.0 m/s")
335
+ s=gr.Number(label="Wall Shear Stress Pa", value=6.5, info="Normal: below 5 Pa")
336
+ h=gr.Number(label="Heart Rate bpm", value=72, info="Normal: 60-100 bpm")
337
+ piv_out=gr.Textbox(label="Result", lines=6)
338
+ gr.Button("Analyze PIV", variant="primary").click(piv_tool,inputs=[v,s,h],outputs=piv_out)
339
+
340
+ with gr.Tab("TGT"):
341
+ gr.Markdown("### Interpret Thrombogenicity Tester blood analysis results")
342
  with gr.Row():
343
  with gr.Column():
344
+ t1=gr.Number(label="TAT ng/mL", value=18, info="Normal: below 8")
345
+ t2=gr.Number(label="PF1.2 nmol/L", value=2.5, info="Normal: below 2.0")
346
+ t3=gr.Number(label="Free Hemoglobin mg/L", value=60, info="Normal: below 20")
347
+ t4=gr.Number(label="Platelet Count", value=140, info="Normal: above 150")
348
+ t5=gr.Number(label="Time minutes", value=40)
349
+ out2=gr.Textbox(label="Result", lines=10)
350
+ gr.Button("Analyze TGT", variant="primary").click(tgt_tool,inputs=[t1,t2,t3,t4,t5],outputs=out2)
351
+
352
+ demo.launch()