Spaces:
Sleeping
Sleeping
File size: 7,611 Bytes
9506f55 c08ace3 f19b8f7 bd57f6c a725bee 2c23c25 19570d2 c08ace3 bd57f6c a725bee bd57f6c f3e951a a725bee bd57f6c a725bee bd57f6c a725bee db6c616 a725bee f3e951a db6c616 a725bee db6c616 a725bee 9506f55 a725bee 9506f55 a725bee 3cbdd87 bd57f6c 9506f55 bd57f6c a725bee 84e8f17 bd57f6c 9506f55 b4961fa 84e8f17 db6c616 a725bee 84e8f17 a725bee 84e8f17 0c52dfa 84e8f17 0c52dfa f3e951a bd57f6c f3e951a 84e8f17 9885f37 84e8f17 a725bee 84e8f17 a725bee 84e8f17 a725bee 84e8f17 a725bee 84e8f17 a725bee f19b8f7 84e8f17 bd57f6c 9506f55 bd57f6c a725bee f19b8f7 b4961fa bd57f6c f19b8f7 bd57f6c f19b8f7 bd57f6c f19b8f7 9885f37 b4961fa bd57f6c 9b1be36 0c66313 9885f37 bd57f6c 9885f37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
# app.py
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import os
import torch
import re
MODEL_ID = "Muhammadidrees/MedicalInsights"
# -----------------------
# Load tokenizer + model safely (GPU or CPU)
# -----------------------
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
# Try a few loading strategies so this works on GPU or CPU Spaces
try:
# Preferred: let HF decide device placement (works for GPU-enabled Spaces)
model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
except Exception:
# Fallback: force CPU (slower but safe)
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.float32, low_cpu_mem_usage=True)
# Create pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
# -----------------------
# Helper: robust section splitter
# -----------------------
def split_report(text):
"""
Split model output into left (sections 1-4) and right (sections 5-6).
Accepts various markers for robustness.
"""
# Normalize whitespace
text = text.strip()
# Common markers that indicate tabular/insights section
markers = [
"5. Tabular Mapping",
"5. Tabular",
"Tabular Mapping",
"Tabular & AI Insights",
"π Tabular",
"## 5",
]
# Find earliest marker occurrence
idx = None
for m in markers:
pos = text.find(m)
if pos != -1:
if idx is None or pos < idx:
idx = pos
if idx is None:
# fallback: try splitting at "Enhanced AI Insights" or "Enhanced AI"
fallback = text.find("Enhanced AI Insights")
if fallback == -1:
fallback = text.find("Enhanced AI")
idx = fallback if fallback != -1 else None
if idx is None:
# couldn't find a split marker -> put everything in left
return text, ""
left = text[:idx].strip()
right = text[idx:].strip()
return left, right
# -----------------------
# The analyze function
# -----------------------
def analyze(
albumin, creatinine, glucose, crp, mcv, rdw, alp,
wbc, lymph, age, gender, height, weight
):
# Validate BMI
try:
height = float(height)
weight = float(weight)
bmi = round(weight / ((height / 100) ** 2), 2) if height > 0 else "N/A"
except Exception:
bmi = "N/A"
# -------------------------
# System prompt (enforce 6 headings)
# -------------------------
system_prompt = (
"You are a professional AI Medical Assistant.\n"
"You are analyzing patient demographics (age, height, weight) and the Levine biomarker panel.\n\n"
"STRICT RULES:\n"
"- Use ONLY the 9 biomarkers (Albumin, Creatinine, Glucose, CRP, MCV, RDW, ALP, WBC, Lymphocytes) + Age/Height/Weight.\n"
"- Do NOT use or invent other labs (cholesterol, ferritin, vitamin D, etc.).\n"
"- If data missing: explicitly write 'Not available from current biomarkers.'\n"
"- Always cover ALL SIX SECTIONS with detail:\n"
" 1. Executive Summary\n"
" 2. System-Specific Analysis\n"
" 3. Personalized Action Plan\n"
" 4. Interaction Alerts\n"
" 5. Tabular Mapping\n"
" 6. Enhanced AI Insights & Longitudinal Risk\n"
"- Use Markdown formatting for readability.\n"
"- Keep tone professional, clear, and client-friendly.\n"
"- Tables must be clean Markdown tables.\n"
)
# Patient input block
patient_input = (
f"Patient Profile:\n"
f"- Age: {age}\n"
f"- Gender: {gender}\n"
f"- Height: {height} cm\n"
f"- Weight: {weight} kg\n"
f"- BMI: {bmi}\n\n"
"Lab Values:\n"
f"- Albumin: {albumin} g/dL\n"
f"- Creatinine: {creatinine} mg/dL\n"
f"- Glucose: {glucose} mg/dL\n"
f"- CRP: {crp} mg/L\n"
f"- MCV: {mcv} fL\n"
f"- RDW: {rdw} %\n"
f"- ALP: {alp} U/L\n"
f"- WBC: {wbc} K/uL\n"
f"- Lymphocytes: {lymph} %\n"
)
prompt = system_prompt + "\n" + patient_input
# -------------------------
# Generate with strong control
# -------------------------
gen = pipe(
prompt,
max_new_tokens=3000,
do_sample=False, # deterministic
temperature=0.01, # no randomness
top_p=1.0, # cover all tokens
repetition_penalty=1.1, # reduce repetition
return_full_text=False
)
# Extract text
generated = gen[0].get("generated_text") or gen[0].get("text") or ""
generated = generated.strip()
# Remove possible echoes
for chunk in [patient_input, system_prompt]:
if chunk.strip() in generated:
generated = generated.split(chunk.strip())[-1].strip()
# Split into panels
left_md, right_md = split_report(generated)
# Fallback if empty
if len(left_md) < 50 and len(right_md) < 50:
return (
"β οΈ Model response too short. Please re-run.\n\n**Patient Profile:**\n" + patient_input,
""
)
return left_md, right_md
# -----------------------
# Build Gradio app
# -----------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# π₯ AI Medical Biomarker Dashboard")
gr.Markdown("Enter lab values and demographics β Report is generated in two panels (Summary & Table/Insights).")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### π€ Demographics")
age = gr.Number(label="Age", value=45)
gender = gr.Dropdown(["Male", "Female"], label="Gender", value="Male")
height = gr.Number(label="Height (cm)", value=174)
weight = gr.Number(label="Weight (kg)", value=75)
gr.Markdown("### π©Έ Blood Panel")
wbc = gr.Number(label="WBC (K/uL)", value=6.5)
lymph = gr.Number(label="Lymphocytes (%)", value=30)
mcv = gr.Number(label="MCV (fL)", value=88)
rdw = gr.Number(label="RDW (%)", value=13)
with gr.Column(scale=1):
gr.Markdown("### 𧬠Chemistry Panel")
albumin = gr.Number(label="Albumin (g/dL)", value=4.2)
creatinine = gr.Number(label="Creatinine (mg/dL)", value=0.9)
glucose = gr.Number(label="Glucose (mg/dL)", value=92)
crp = gr.Number(label="CRP (mg/L)", value=1.0)
alp = gr.Number(label="ALP (U/L)", value=70)
analyze_btn = gr.Button("π¬ Generate Report", variant="primary")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### π Summary & Action Plan")
left_output = gr.Markdown(value="Press *Generate Report* to create the analysis.")
with gr.Column(scale=1):
gr.Markdown("### π Tabular & AI Insights")
right_output = gr.Markdown(value="Tabular mapping and enhanced insights will appear here.")
# Connect button to function
analyze_btn.click(
fn=analyze,
inputs=[albumin, creatinine, glucose, crp, mcv, rdw, alp, wbc, lymph, age, gender, height, weight],
outputs=[left_output, right_output]
)
# -------------------------
# Launch app with error visibility
# -------------------------
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=int(os.environ.get("PORT", 7860)),
show_error=True, # π enables full error trace in logs
share=False # keep private; set True only for public links
)
|