Nikhil Pravin Pise commited on
Commit
ca20dc7
Β·
1 Parent(s): 3ca1d38

Fix HF bugs: HTML rendering, Q&A validation, add separate Q&A tab

Browse files

- Fixed format_summary: Added missing severity_config tuple with color/bg_color
- Changed summary header text to dark color (#1e293b) instead of white
- Rewrote answer_medical_question() to use retriever+LLM directly (not PatientInput)
- Rewrote streaming_answer() to use retriever+LLM directly (not PatientInput)
- Added top-level tabs: Biomarker Analysis | Medical Q&A
- Improved Q&A UX with dedicated tab for easy access

Files changed (1) hide show
  1. huggingface/app.py +278 -238
huggingface/app.py CHANGED
@@ -381,14 +381,9 @@ def analyze_biomarkers(input_text: str, progress=gr.Progress()) -> tuple[str, st
381
 
382
 
383
  def format_summary(response: dict, elapsed: float) -> str:
384
- """Format the analysis response as beautiful HTML/markdown."""
385
  if not response:
386
- return """
387
- <div style="text-align: center; padding: 40px; color: #94a3b8;">
388
- <div style="font-size: 3em;">❌</div>
389
- <p>No analysis results available.</p>
390
- </div>
391
- """
392
 
393
  parts = []
394
 
@@ -397,17 +392,17 @@ def format_summary(response: dict, elapsed: float) -> str:
397
  confidence = response.get("confidence", {})
398
  conf_score = confidence.get("overall_score", 0) if isinstance(confidence, dict) else 0
399
 
400
- # Determine severity color
401
  severity = response.get("severity", "low")
402
- severity_colors = {
403
- "critical": ("#dc2626", "#fef2f2", "πŸ”΄"),
404
- "high": ("#ea580c", "#fff7ed", "🟠"),
405
- "moderate": ("#ca8a04", "#fefce8", "🟑"),
406
- "low": ("#16a34a", "#f0fdf4", "🟒")
407
  }
408
- color, bg_color, emoji = severity_colors.get(severity, severity_colors["low"])
409
 
410
- # Confidence badge
411
  conf_badge = ""
412
  if conf_score:
413
  conf_pct = int(conf_score * 100)
@@ -418,11 +413,10 @@ def format_summary(response: dict, elapsed: float) -> str:
418
  <div style="background: linear-gradient(135deg, {bg_color} 0%, white 100%); border-left: 4px solid {color}; border-radius: 12px; padding: 20px; margin-bottom: 20px;">
419
  <div style="display: flex; align-items: center; flex-wrap: wrap;">
420
  <span style="font-size: 1.5em; margin-right: 12px;">{emoji}</span>
421
- <h2 style="margin: 0; color: {color}; font-size: 1.4em;">{primary}</h2>
422
  {conf_badge}
423
  </div>
424
- </div>
425
- """)
426
 
427
  # Critical Alerts
428
  alerts = response.get("safety_alerts", [])
@@ -597,7 +591,7 @@ def answer_medical_question(
597
  chat_history: list = None
598
  ) -> tuple[str, list]:
599
  """
600
- Answer a free-form medical question using the RAG pipeline.
601
 
602
  Args:
603
  question: The user's medical question
@@ -623,37 +617,53 @@ def answer_medical_question(
623
 
624
  try:
625
  start_time = time.time()
626
- guild = get_guild()
627
 
628
- if guild is None:
629
- error_msg = "❌ RAG service not initialized. Please try again."
630
- history = (chat_history or []) + [(question, error_msg)]
631
- return error_msg, history
632
 
633
- # Build context with any provided biomarkers
634
- full_context = question
 
 
 
635
  if context.strip():
636
- full_context = f"Patient Context: {context}\n\nQuestion: {question}"
637
 
638
- # Run the RAG pipeline via the guild's ask method if available
639
- # Otherwise, invoke directly
640
- from src.state import PatientInput
641
 
642
- input_state = PatientInput(
643
- question=full_context,
644
- biomarkers={},
645
- patient_context=context or "",
646
- )
 
 
 
 
 
647
 
648
- # Invoke the graph
649
- result = guild.invoke(input_state)
650
 
651
- # Extract answer from result
652
- answer = ""
653
- if hasattr(result, "final_answer"):
654
- answer = result.final_answer
655
- elif isinstance(result, dict):
656
- answer = result.get("final_answer", result.get("conversational_summary", ""))
 
 
 
 
 
 
 
 
 
 
 
657
 
658
  if not answer:
659
  answer = "I apologize, but I couldn't generate a response. Please try rephrasing your question."
@@ -664,7 +674,7 @@ def answer_medical_question(
664
  formatted_answer = f"""{answer}
665
 
666
  ---
667
- *⏱️ Response time: {elapsed:.1f}s | πŸ€– Powered by Agentic RAG*
668
  """
669
 
670
  # Update chat history
@@ -674,7 +684,7 @@ def answer_medical_question(
674
 
675
  except Exception as exc:
676
  logger.exception(f"Q&A error: {exc}")
677
- error_msg = f"❌ Error processing question: {str(exc)}"
678
  history = (chat_history or []) + [(question, error_msg)]
679
  return error_msg, history
680
 
@@ -682,7 +692,7 @@ def answer_medical_question(
682
  def streaming_answer(question: str, context: str = ""):
683
  """
684
  Stream answer tokens for real-time response.
685
- Yields partial answers as they're generated.
686
  """
687
  if not question.strip():
688
  yield ""
@@ -698,39 +708,58 @@ def streaming_answer(question: str, context: str = ""):
698
  setup_llm_provider()
699
 
700
  try:
701
- guild = get_guild()
702
- if guild is None:
703
- yield "❌ RAG service not initialized. Please wait and try again."
704
- return
705
 
706
- # Build context
707
- full_context = question
 
 
 
708
  if context.strip():
709
- full_context = f"Patient Context: {context}\n\nQuestion: {question}"
710
 
711
- # Stream status updates
712
- yield "πŸ” Searching medical knowledge base...\n\n"
713
 
714
- from src.state import PatientInput
 
715
 
716
- input_state = PatientInput(
717
- question=full_context,
718
- biomarkers={},
719
- patient_context=context or "",
720
- )
 
 
 
 
 
721
 
722
- # Run pipeline (non-streaming fallback, but show progress)
723
- yield "πŸ” Searching medical knowledge base...\nπŸ“š Retrieving relevant documents...\n\n"
 
 
724
 
725
  start_time = time.time()
726
- result = guild.invoke(input_state)
727
 
728
- # Extract answer
729
- answer = ""
730
- if hasattr(result, "final_answer"):
731
- answer = result.final_answer
732
- elif isinstance(result, dict):
733
- answer = result.get("final_answer", result.get("conversational_summary", ""))
 
 
 
 
 
 
 
 
 
 
734
 
735
  if not answer:
736
  answer = "I apologize, but I couldn't generate a response. Please try rephrasing your question."
@@ -742,15 +771,15 @@ def streaming_answer(question: str, context: str = ""):
742
  accumulated = ""
743
  for i, word in enumerate(words):
744
  accumulated += word + " "
745
- if i % 5 == 0: # Update every 5 words for smooth streaming
746
  yield accumulated
747
- time.sleep(0.02) # Small delay for visual streaming effect
748
 
749
- # Final complete response with metadata
750
  yield f"""{answer}
751
 
752
  ---
753
- *⏱️ Response time: {elapsed:.1f}s | πŸ€– Powered by Agentic RAG*
754
  """
755
 
756
  except Exception as exc:
@@ -758,6 +787,7 @@ def streaming_answer(question: str, context: str = ""):
758
  yield f"❌ Error: {str(exc)}"
759
 
760
 
 
761
  # ---------------------------------------------------------------------------
762
  # Gradio Interface
763
  # ---------------------------------------------------------------------------
@@ -1052,194 +1082,204 @@ def create_demo() -> gr.Blocks:
1052
  </div>
1053
  """)
1054
 
1055
- # ===== MAIN CONTENT =====
1056
- with gr.Row(equal_height=False):
1057
 
1058
- # ----- LEFT PANEL: INPUT -----
1059
- with gr.Column(scale=2, min_width=400):
1060
- gr.HTML('<div class="section-title">πŸ“ Enter Your Biomarkers</div>')
1061
 
1062
- with gr.Group():
1063
- input_text = gr.Textbox(
1064
- label="",
1065
- placeholder="Enter biomarkers in any format:\n\nβ€’ Glucose: 140, HbA1c: 7.5, Cholesterol: 210\nβ€’ My glucose is 140 and HbA1c is 7.5\nβ€’ {\"Glucose\": 140, \"HbA1c\": 7.5}",
1066
- lines=6,
1067
- max_lines=12,
1068
- show_label=False,
1069
- )
1070
 
1071
- with gr.Row():
1072
- analyze_btn = gr.Button(
1073
- "πŸ”¬ Analyze Biomarkers",
1074
- variant="primary",
1075
- size="lg",
1076
- scale=3,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077
  )
1078
- clear_btn = gr.Button(
1079
- "πŸ—‘οΈ Clear",
1080
- variant="secondary",
1081
- size="lg",
1082
- scale=1,
 
 
 
 
 
 
 
 
 
 
1083
  )
1084
-
1085
- # Status display
1086
- status_output = gr.Markdown(
1087
- value="",
1088
- elem_classes="status-box"
1089
- )
1090
-
1091
- # Quick Examples
1092
- gr.HTML('<div class="section-title" style="margin-top: 24px;">⚑ Quick Examples</div>')
1093
- gr.HTML('<p style="color: #64748b; font-size: 0.9em; margin-bottom: 12px;">Click any example to load it instantly</p>')
1094
-
1095
- examples = gr.Examples(
1096
- examples=[
1097
- ["Glucose: 185, HbA1c: 8.2, Cholesterol: 245, LDL: 165"],
1098
- ["Glucose: 95, HbA1c: 5.4, Cholesterol: 180, HDL: 55, LDL: 100"],
1099
- ["Hemoglobin: 9.5, Iron: 40, Ferritin: 15"],
1100
- ["TSH: 8.5, T4: 4.0, T3: 80"],
1101
- ["Creatinine: 2.5, BUN: 45, eGFR: 35"],
1102
- ],
1103
- inputs=input_text,
1104
- label="",
1105
- )
1106
-
1107
- # Supported Biomarkers
1108
- with gr.Accordion("πŸ“Š Supported Biomarkers", open=False):
1109
- gr.HTML("""
1110
- <div style="display: grid; grid-template-columns: repeat(2, 1fr); gap: 16px; padding: 12px;">
1111
- <div>
1112
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🩸 Diabetes</h4>
1113
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">Glucose, HbA1c, Fasting Glucose, Insulin</p>
1114
- </div>
1115
- <div>
1116
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">❀️ Cardiovascular</h4>
1117
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">Cholesterol, LDL, HDL, Triglycerides</p>
1118
- </div>
1119
- <div>
1120
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🫘 Kidney</h4>
1121
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">Creatinine, BUN, eGFR, Uric Acid</p>
1122
- </div>
1123
- <div>
1124
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🦴 Liver</h4>
1125
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">ALT, AST, Bilirubin, Albumin</p>
1126
- </div>
1127
- <div>
1128
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">πŸ¦‹ Thyroid</h4>
1129
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">TSH, T3, T4, Free T4</p>
1130
- </div>
1131
- <div>
1132
- <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">πŸ’‰ Blood</h4>
1133
- <p style="color: #64748b; font-size: 0.85em; margin: 0;">Hemoglobin, WBC, RBC, Platelets</p>
1134
- </div>
1135
- </div>
1136
- """)
1137
-
1138
- # ----- RIGHT PANEL: RESULTS -----
1139
- with gr.Column(scale=3, min_width=500):
1140
- gr.HTML('<div class="section-title">πŸ“Š Analysis Results</div>')
1141
-
1142
- with gr.Tabs() as result_tabs:
1143
- with gr.Tab("πŸ“‹ Summary", id="summary"):
1144
- summary_output = gr.Markdown(
1145
- value="""
1146
  <div style="text-align: center; padding: 60px 20px; color: #94a3b8;">
1147
  <div style="font-size: 4em; margin-bottom: 16px;">πŸ”¬</div>
1148
  <h3 style="color: #64748b; font-weight: 500;">Ready to Analyze</h3>
1149
  <p>Enter your biomarkers on the left and click <strong>Analyze</strong> to get your personalized health insights.</p>
1150
  </div>
1151
- """,
1152
- elem_classes="summary-output"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153
  )
1154
-
1155
- with gr.Tab("πŸ” Detailed JSON", id="json"):
1156
- details_output = gr.Code(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157
  label="",
1158
- language="json",
1159
- lines=30,
1160
- show_label=False,
1161
  )
1162
-
1163
- # ===== Q&A SECTION =====
1164
- gr.HTML('<div class="section-title" style="margin-top: 32px;">πŸ’¬ Medical Q&A Assistant</div>')
1165
- gr.HTML("""
1166
- <p style="color: #64748b; margin-bottom: 16px;">
1167
- Ask any medical question and get evidence-based answers powered by our RAG system with 750+ pages of clinical guidelines.
1168
- </p>
1169
- """)
1170
-
1171
- with gr.Row(equal_height=False):
1172
- with gr.Column(scale=1):
1173
- qa_context = gr.Textbox(
1174
- label="Patient Context (Optional)",
1175
- placeholder="Provide biomarkers or context:\nβ€’ Glucose: 140, HbA1c: 7.5\nβ€’ 45-year-old male with family history of diabetes",
1176
- lines=3,
1177
- max_lines=6,
1178
- )
1179
- qa_question = gr.Textbox(
1180
- label="Your Question",
1181
- placeholder="Ask any medical question...\nβ€’ What do my elevated glucose levels indicate?\nβ€’ Should I be concerned about my HbA1c of 7.5%?\nβ€’ What lifestyle changes help with prediabetes?",
1182
- lines=3,
1183
- max_lines=6,
1184
- )
1185
- with gr.Row():
1186
- qa_submit_btn = gr.Button(
1187
- "πŸ’¬ Ask Question",
1188
- variant="primary",
1189
- size="lg",
1190
- scale=3,
1191
- )
1192
- qa_clear_btn = gr.Button(
1193
- "πŸ—‘οΈ Clear",
1194
- variant="secondary",
1195
- size="lg",
1196
- scale=1,
1197
- )
1198
-
1199
- # Quick question examples
1200
- gr.HTML('<h4 style="margin-top: 16px; color: #1e3a5f;">Example Questions</h4>')
1201
- qa_examples = gr.Examples(
1202
- examples=[
1203
- ["What does elevated HbA1c mean?", ""],
1204
- ["How is diabetes diagnosed?", "Glucose: 185, HbA1c: 7.8"],
1205
- ["What lifestyle changes help lower cholesterol?", "LDL: 165, HDL: 35"],
1206
- ["What causes high creatinine levels?", "Creatinine: 2.5, BUN: 45"],
1207
- ],
1208
- inputs=[qa_question, qa_context],
1209
- label="",
1210
- )
1211
-
1212
- with gr.Column(scale=2):
1213
- gr.HTML('<h4 style="color: #1e3a5f; margin-bottom: 12px;">πŸ“ Answer</h4>')
1214
- qa_answer = gr.Markdown(
1215
- value="""
1216
  <div style="text-align: center; padding: 40px 20px; color: #94a3b8;">
1217
  <div style="font-size: 3em; margin-bottom: 12px;">πŸ’¬</div>
1218
  <h3 style="color: #64748b; font-weight: 500;">Ask a Medical Question</h3>
1219
  <p>Enter your question on the left and click <strong>Ask Question</strong> to get evidence-based answers.</p>
1220
  </div>
1221
- """,
1222
- elem_classes="qa-output"
 
 
 
 
 
 
 
 
1223
  )
1224
-
1225
- # Q&A Event Handlers
1226
- qa_submit_btn.click(
1227
- fn=streaming_answer,
1228
- inputs=[qa_question, qa_context],
1229
- outputs=qa_answer,
1230
- show_progress="minimal",
1231
- )
1232
-
1233
- qa_clear_btn.click(
1234
- fn=lambda: ("", "", """
1235
  <div style="text-align: center; padding: 40px 20px; color: #94a3b8;">
1236
  <div style="font-size: 3em; margin-bottom: 12px;">πŸ’¬</div>
1237
  <h3 style="color: #64748b; font-weight: 500;">Ask a Medical Question</h3>
1238
  <p>Enter your question on the left and click <strong>Ask Question</strong> to get evidence-based answers.</p>
1239
  </div>
1240
- """),
1241
- outputs=[qa_question, qa_context, qa_answer],
1242
- )
1243
 
1244
  # ===== HOW IT WORKS =====
1245
  gr.HTML('<div class="section-title" style="margin-top: 32px;">πŸ€– How It Works</div>')
 
381
 
382
 
383
  def format_summary(response: dict, elapsed: float) -> str:
384
+ """Format the analysis response as clean markdown with black text."""
385
  if not response:
386
+ return "❌ **No analysis results available.**"
 
 
 
 
 
387
 
388
  parts = []
389
 
 
392
  confidence = response.get("confidence", {})
393
  conf_score = confidence.get("overall_score", 0) if isinstance(confidence, dict) else 0
394
 
395
+ # Determine severity
396
  severity = response.get("severity", "low")
397
+ severity_config = {
398
+ "critical": ("πŸ”΄", "#dc2626", "#fef2f2"),
399
+ "high": ("🟠", "#ea580c", "#fff7ed"),
400
+ "moderate": ("🟑", "#ca8a04", "#fefce8"),
401
+ "low": ("🟒", "#16a34a", "#f0fdf4")
402
  }
403
+ emoji, color, bg_color = severity_config.get(severity, severity_config["low"])
404
 
405
+ # Build confidence display
406
  conf_badge = ""
407
  if conf_score:
408
  conf_pct = int(conf_score * 100)
 
413
  <div style="background: linear-gradient(135deg, {bg_color} 0%, white 100%); border-left: 4px solid {color}; border-radius: 12px; padding: 20px; margin-bottom: 20px;">
414
  <div style="display: flex; align-items: center; flex-wrap: wrap;">
415
  <span style="font-size: 1.5em; margin-right: 12px;">{emoji}</span>
416
+ <h2 style="margin: 0; color: #1e293b; font-size: 1.4em;">{primary}</h2>
417
  {conf_badge}
418
  </div>
419
+ </div>""")
 
420
 
421
  # Critical Alerts
422
  alerts = response.get("safety_alerts", [])
 
591
  chat_history: list = None
592
  ) -> tuple[str, list]:
593
  """
594
+ Answer a free-form medical question using retriever + LLM directly.
595
 
596
  Args:
597
  question: The user's medical question
 
617
 
618
  try:
619
  start_time = time.time()
 
620
 
621
+ # Import retriever and LLM
622
+ from src.services.retrieval import make_retriever
623
+ from src.llm_config import get_synthesizer
 
624
 
625
+ # Initialize retriever
626
+ retriever = make_retriever()
627
+
628
+ # Build search query with context
629
+ search_query = question
630
  if context.strip():
631
+ search_query = f"{context} {question}"
632
 
633
+ # Retrieve relevant documents
634
+ docs = retriever.search(search_query, top_k=5)
 
635
 
636
+ # Format context from retrieved docs
637
+ doc_context = ""
638
+ if docs:
639
+ doc_texts = []
640
+ for doc in docs[:5]:
641
+ if hasattr(doc, 'content'):
642
+ doc_texts.append(doc.content[:500])
643
+ elif isinstance(doc, dict) and 'content' in doc:
644
+ doc_texts.append(doc['content'][:500])
645
+ doc_context = "\n\n---\n\n".join(doc_texts)
646
 
647
+ # Get LLM
648
+ llm = get_synthesizer()
649
 
650
+ # Build prompt
651
+ prompt = f"""You are a medical AI assistant. Answer the following medical question based on the provided context.
652
+ Be helpful, accurate, and include relevant medical information. Always recommend consulting a healthcare professional for personal medical advice.
653
+
654
+ Context from medical knowledge base:
655
+ {doc_context if doc_context else "No specific context available - using general medical knowledge."}
656
+
657
+ Patient Context: {context if context else "Not provided"}
658
+
659
+ Question: {question}
660
+
661
+ Answer:"""
662
+
663
+
664
+ # Generate response
665
+ response = llm.invoke(prompt)
666
+ answer = response.content if hasattr(response, 'content') else str(response)
667
 
668
  if not answer:
669
  answer = "I apologize, but I couldn't generate a response. Please try rephrasing your question."
 
674
  formatted_answer = f"""{answer}
675
 
676
  ---
677
+ *⏱️ Response time: {elapsed:.1f}s | πŸ€– Powered by RAG*
678
  """
679
 
680
  # Update chat history
 
684
 
685
  except Exception as exc:
686
  logger.exception(f"Q&A error: {exc}")
687
+ error_msg = f"❌ Error: {str(exc)}"
688
  history = (chat_history or []) + [(question, error_msg)]
689
  return error_msg, history
690
 
 
692
  def streaming_answer(question: str, context: str = ""):
693
  """
694
  Stream answer tokens for real-time response.
695
+ Uses retriever + LLM directly (not the guild).
696
  """
697
  if not question.strip():
698
  yield ""
 
708
  setup_llm_provider()
709
 
710
  try:
711
+ yield "πŸ” Searching medical knowledge base...\n\n"
712
+
713
+ from src.services.retrieval import make_retriever
714
+ from src.llm_config import get_synthesizer
715
 
716
+ # Initialize retriever
717
+ retriever = make_retriever()
718
+
719
+ # Build search query
720
+ search_query = question
721
  if context.strip():
722
+ search_query = f"{context} {question}"
723
 
724
+ yield "πŸ” Searching medical knowledge base...\nπŸ“š Retrieving relevant documents...\n\n"
 
725
 
726
+ # Retrieve docs
727
+ docs = retriever.search(search_query, top_k=5)
728
 
729
+ # Format context
730
+ doc_context = ""
731
+ if docs:
732
+ doc_texts = []
733
+ for doc in docs[:5]:
734
+ if hasattr(doc, 'content'):
735
+ doc_texts.append(doc.content[:500])
736
+ elif isinstance(doc, dict) and 'content' in doc:
737
+ doc_texts.append(doc['content'][:500])
738
+ doc_context = "\n\n---\n\n".join(doc_texts)
739
 
740
+ yield "πŸ” Searching medical knowledge base...\nπŸ“š Retrieving relevant documents...\nπŸ’­ Generating response...\n\n"
741
+
742
+ # Get LLM
743
+ llm = get_synthesizer()
744
 
745
  start_time = time.time()
 
746
 
747
+ # Build prompt
748
+ prompt = f"""You are a medical AI assistant. Answer the following medical question based on the provided context.
749
+ Be helpful, accurate, and include relevant medical information. Always recommend consulting a healthcare professional for personal medical advice.
750
+
751
+ Context from medical knowledge base:
752
+ {doc_context if doc_context else "No specific context available - using general medical knowledge."}
753
+
754
+ Patient Context: {context if context else "Not provided"}
755
+
756
+ Question: {question}
757
+
758
+ Answer:"""
759
+
760
+ # Generate response
761
+ response = llm.invoke(prompt)
762
+ answer = response.content if hasattr(response, 'content') else str(response)
763
 
764
  if not answer:
765
  answer = "I apologize, but I couldn't generate a response. Please try rephrasing your question."
 
771
  accumulated = ""
772
  for i, word in enumerate(words):
773
  accumulated += word + " "
774
+ if i % 5 == 0:
775
  yield accumulated
776
+ time.sleep(0.02)
777
 
778
+ # Final complete response
779
  yield f"""{answer}
780
 
781
  ---
782
+ *⏱️ Response time: {elapsed:.1f}s | πŸ€– Powered by RAG*
783
  """
784
 
785
  except Exception as exc:
 
787
  yield f"❌ Error: {str(exc)}"
788
 
789
 
790
+
791
  # ---------------------------------------------------------------------------
792
  # Gradio Interface
793
  # ---------------------------------------------------------------------------
 
1082
  </div>
1083
  """)
1084
 
1085
+ # ===== MAIN TABS =====
1086
+ with gr.Tabs() as main_tabs:
1087
 
1088
+ # ==================== TAB 1: BIOMARKER ANALYSIS ====================
1089
+ with gr.Tab("πŸ”¬ Biomarker Analysis", id="biomarker-tab"):
 
1090
 
1091
+ # ===== MAIN CONTENT =====
1092
+ with gr.Row(equal_height=False):
 
 
 
 
 
 
1093
 
1094
+ # ----- LEFT PANEL: INPUT -----
1095
+ with gr.Column(scale=2, min_width=400):
1096
+ gr.HTML('<div class="section-title">πŸ“ Enter Your Biomarkers</div>')
1097
+
1098
+ with gr.Group():
1099
+ input_text = gr.Textbox(
1100
+ label="",
1101
+ placeholder="Enter biomarkers in any format:\n\nβ€’ Glucose: 140, HbA1c: 7.5, Cholesterol: 210\nβ€’ My glucose is 140 and HbA1c is 7.5\nβ€’ {\"Glucose\": 140, \"HbA1c\": 7.5}",
1102
+ lines=6,
1103
+ max_lines=12,
1104
+ show_label=False,
1105
+ )
1106
+
1107
+ with gr.Row():
1108
+ analyze_btn = gr.Button(
1109
+ "πŸ”¬ Analyze Biomarkers",
1110
+ variant="primary",
1111
+ size="lg",
1112
+ scale=3,
1113
+ )
1114
+ clear_btn = gr.Button(
1115
+ "πŸ—‘οΈ Clear",
1116
+ variant="secondary",
1117
+ size="lg",
1118
+ scale=1,
1119
+ )
1120
+
1121
+ # Status display
1122
+ status_output = gr.Markdown(
1123
+ value="",
1124
+ elem_classes="status-box"
1125
  )
1126
+
1127
+ # Quick Examples
1128
+ gr.HTML('<div class="section-title" style="margin-top: 24px;">⚑ Quick Examples</div>')
1129
+ gr.HTML('<p style="color: #64748b; font-size: 0.9em; margin-bottom: 12px;">Click any example to load it instantly</p>')
1130
+
1131
+ examples = gr.Examples(
1132
+ examples=[
1133
+ ["Glucose: 185, HbA1c: 8.2, Cholesterol: 245, LDL: 165"],
1134
+ ["Glucose: 95, HbA1c: 5.4, Cholesterol: 180, HDL: 55, LDL: 100"],
1135
+ ["Hemoglobin: 9.5, Iron: 40, Ferritin: 15"],
1136
+ ["TSH: 8.5, T4: 4.0, T3: 80"],
1137
+ ["Creatinine: 2.5, BUN: 45, eGFR: 35"],
1138
+ ],
1139
+ inputs=input_text,
1140
+ label="",
1141
  )
1142
+
1143
+ # Supported Biomarkers
1144
+ with gr.Accordion("πŸ“Š Supported Biomarkers", open=False):
1145
+ gr.HTML("""
1146
+ <div style="display: grid; grid-template-columns: repeat(2, 1fr); gap: 16px; padding: 12px;">
1147
+ <div>
1148
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🩸 Diabetes</h4>
1149
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">Glucose, HbA1c, Fasting Glucose, Insulin</p>
1150
+ </div>
1151
+ <div>
1152
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">❀️ Cardiovascular</h4>
1153
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">Cholesterol, LDL, HDL, Triglycerides</p>
1154
+ </div>
1155
+ <div>
1156
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🫘 Kidney</h4>
1157
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">Creatinine, BUN, eGFR, Uric Acid</p>
1158
+ </div>
1159
+ <div>
1160
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">🦴 Liver</h4>
1161
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">ALT, AST, Bilirubin, Albumin</p>
1162
+ </div>
1163
+ <div>
1164
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">πŸ¦‹ Thyroid</h4>
1165
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">TSH, T3, T4, Free T4</p>
1166
+ </div>
1167
+ <div>
1168
+ <h4 style="color: #1e3a5f; margin: 0 0 8px 0;">πŸ’‰ Blood</h4>
1169
+ <p style="color: #64748b; font-size: 0.85em; margin: 0;">Hemoglobin, WBC, RBC, Platelets</p>
1170
+ </div>
1171
+ </div>
1172
+ """)
1173
+
1174
+ # ----- RIGHT PANEL: RESULTS -----
1175
+ with gr.Column(scale=3, min_width=500):
1176
+ gr.HTML('<div class="section-title">πŸ“Š Analysis Results</div>')
1177
+
1178
+ with gr.Tabs() as result_tabs:
1179
+ with gr.Tab("πŸ“‹ Summary", id="summary"):
1180
+ summary_output = gr.Markdown(
1181
+ value="""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182
  <div style="text-align: center; padding: 60px 20px; color: #94a3b8;">
1183
  <div style="font-size: 4em; margin-bottom: 16px;">πŸ”¬</div>
1184
  <h3 style="color: #64748b; font-weight: 500;">Ready to Analyze</h3>
1185
  <p>Enter your biomarkers on the left and click <strong>Analyze</strong> to get your personalized health insights.</p>
1186
  </div>
1187
+ """,
1188
+ elem_classes="summary-output"
1189
+ )
1190
+
1191
+ with gr.Tab("πŸ” Detailed JSON", id="json"):
1192
+ details_output = gr.Code(
1193
+ label="",
1194
+ language="json",
1195
+ lines=30,
1196
+ show_label=False,
1197
+ )
1198
+
1199
+ # ==================== TAB 2: MEDICAL Q&A ====================
1200
+ with gr.Tab("πŸ’¬ Medical Q&A", id="qa-tab"):
1201
+
1202
+ gr.HTML("""
1203
+ <div style="margin-bottom: 20px;">
1204
+ <h3 style="color: #1e3a5f; margin: 0 0 8px 0;">πŸ’¬ Medical Q&A Assistant</h3>
1205
+ <p style="color: #64748b; margin: 0;">
1206
+ Ask any medical question and get evidence-based answers powered by our RAG system with 750+ pages of clinical guidelines.
1207
+ </p>
1208
+ </div>
1209
+ """)
1210
+
1211
+ with gr.Row(equal_height=False):
1212
+ with gr.Column(scale=1):
1213
+ qa_context = gr.Textbox(
1214
+ label="Patient Context (Optional)",
1215
+ placeholder="Provide biomarkers or context:\nβ€’ Glucose: 140, HbA1c: 7.5\nβ€’ 45-year-old male with family history of diabetes",
1216
+ lines=3,
1217
+ max_lines=6,
1218
  )
1219
+ qa_question = gr.Textbox(
1220
+ label="Your Question",
1221
+ placeholder="Ask any medical question...\nβ€’ What do my elevated glucose levels indicate?\nβ€’ Should I be concerned about my HbA1c of 7.5%?\nβ€’ What lifestyle changes help with prediabetes?",
1222
+ lines=3,
1223
+ max_lines=6,
1224
+ )
1225
+ with gr.Row():
1226
+ qa_submit_btn = gr.Button(
1227
+ "πŸ’¬ Ask Question",
1228
+ variant="primary",
1229
+ size="lg",
1230
+ scale=3,
1231
+ )
1232
+ qa_clear_btn = gr.Button(
1233
+ "πŸ—‘οΈ Clear",
1234
+ variant="secondary",
1235
+ size="lg",
1236
+ scale=1,
1237
+ )
1238
+
1239
+ # Quick question examples
1240
+ gr.HTML('<h4 style="margin-top: 16px; color: #1e3a5f;">Example Questions</h4>')
1241
+ qa_examples = gr.Examples(
1242
+ examples=[
1243
+ ["What does elevated HbA1c mean?", ""],
1244
+ ["How is diabetes diagnosed?", "Glucose: 185, HbA1c: 7.8"],
1245
+ ["What lifestyle changes help lower cholesterol?", "LDL: 165, HDL: 35"],
1246
+ ["What causes high creatinine levels?", "Creatinine: 2.5, BUN: 45"],
1247
+ ],
1248
+ inputs=[qa_question, qa_context],
1249
  label="",
 
 
 
1250
  )
1251
+
1252
+ with gr.Column(scale=2):
1253
+ gr.HTML('<h4 style="color: #1e3a5f; margin-bottom: 12px;">πŸ“ Answer</h4>')
1254
+ qa_answer = gr.Markdown(
1255
+ value="""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256
  <div style="text-align: center; padding: 40px 20px; color: #94a3b8;">
1257
  <div style="font-size: 3em; margin-bottom: 12px;">πŸ’¬</div>
1258
  <h3 style="color: #64748b; font-weight: 500;">Ask a Medical Question</h3>
1259
  <p>Enter your question on the left and click <strong>Ask Question</strong> to get evidence-based answers.</p>
1260
  </div>
1261
+ """,
1262
+ elem_classes="qa-output"
1263
+ )
1264
+
1265
+ # Q&A Event Handlers
1266
+ qa_submit_btn.click(
1267
+ fn=streaming_answer,
1268
+ inputs=[qa_question, qa_context],
1269
+ outputs=qa_answer,
1270
+ show_progress="minimal",
1271
  )
1272
+
1273
+ qa_clear_btn.click(
1274
+ fn=lambda: ("", "", """
 
 
 
 
 
 
 
 
1275
  <div style="text-align: center; padding: 40px 20px; color: #94a3b8;">
1276
  <div style="font-size: 3em; margin-bottom: 12px;">πŸ’¬</div>
1277
  <h3 style="color: #64748b; font-weight: 500;">Ask a Medical Question</h3>
1278
  <p>Enter your question on the left and click <strong>Ask Question</strong> to get evidence-based answers.</p>
1279
  </div>
1280
+ """),
1281
+ outputs=[qa_question, qa_context, qa_answer],
1282
+ )
1283
 
1284
  # ===== HOW IT WORKS =====
1285
  gr.HTML('<div class="section-title" style="margin-top: 32px;">πŸ€– How It Works</div>')