Spaces:
Sleeping
Sleeping
| # app.py | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import os | |
| import torch | |
| import re | |
| MODEL_ID = "Muhammadidrees/MedicalInsights" | |
| # ----------------------- | |
| # Load tokenizer + model safely (GPU or CPU) | |
| # ----------------------- | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| # Try a few loading strategies so this works on GPU or CPU Spaces | |
| try: | |
| # Preferred: let HF decide device placement (works for GPU-enabled Spaces) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_ID) | |
| except Exception: | |
| # Fallback: force CPU (slower but safe) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.float32, low_cpu_mem_usage=True) | |
| # Create pipeline | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1) | |
| # ----------------------- | |
| # Helper: robust section splitter | |
| # ----------------------- | |
| def split_report(text): | |
| """ | |
| Split model output into left (sections 1-4) and right (sections 5-6). | |
| Accepts various markers for robustness. | |
| """ | |
| # Normalize whitespace | |
| text = text.strip() | |
| # Common markers that indicate tabular/insights section | |
| markers = [ | |
| "5. Tabular Mapping", | |
| "5. Tabular", | |
| "Tabular Mapping", | |
| "Tabular & AI Insights", | |
| "π Tabular", | |
| "## 5", | |
| ] | |
| # Find earliest marker occurrence | |
| idx = None | |
| for m in markers: | |
| pos = text.find(m) | |
| if pos != -1: | |
| if idx is None or pos < idx: | |
| idx = pos | |
| if idx is None: | |
| # fallback: try splitting at "Enhanced AI Insights" or "Enhanced AI" | |
| fallback = text.find("Enhanced AI Insights") | |
| if fallback == -1: | |
| fallback = text.find("Enhanced AI") | |
| idx = fallback if fallback != -1 else None | |
| if idx is None: | |
| # couldn't find a split marker -> put everything in left | |
| return text, "" | |
| left = text[:idx].strip() | |
| right = text[idx:].strip() | |
| return left, right | |
| # ----------------------- | |
| # The analyze function | |
| # ----------------------- | |
| def analyze( | |
| albumin, creatinine, glucose, crp, mcv, rdw, alp, | |
| wbc, lymph, age, gender, height, weight | |
| ): | |
| # Validate/constrain inputs | |
| try: | |
| age = int(age) | |
| except Exception: | |
| age = age | |
| try: | |
| height = float(height) | |
| weight = float(weight) | |
| bmi = round(weight / ((height / 100) ** 2), 2) if height > 0 else "N/A" | |
| except Exception: | |
| bmi = "N/A" | |
| system_prompt = ( | |
| "You are a professional AI Medical Assistant.\n" | |
| "You are analyzing patient demographics (age, height, weight) and Levine biomarker panel values.\n\n" | |
| "The Levine biomarker panel includes:\n" | |
| "- Albumin\n" | |
| "- Creatinine\n" | |
| "- Glucose\n" | |
| "- C-reactive protein (CRP)\n" | |
| "- Mean Cell Volume (MCV)\n" | |
| "- Red Cell Distribution Width (RDW)\n" | |
| "- Alkaline Phosphatase (ALP)\n" | |
| "- White Blood Cell count (WBC)\n" | |
| "- Lymphocyte percentage\n\n" | |
| "STRICT RULES:\n" | |
| "- Use ONLY the 9 biomarkers above + age, height, weight.\n" | |
| "- DO NOT use or invent other lab results (e.g., cholesterol, vitamin D, ferritin, ALT, AST, urine results).\n" | |
| "- If a section cannot be addressed with available data, explicitly state: 'Not available from current biomarkers.'\n" | |
| "- Do not give absolute longevity scores. Instead, summarize trends (e.g., 'No major abnormalities suggesting elevated short-term risk.').\n" | |
| "- Nutrient status (Iron, B12, Folate) can only be suggested as possible IF supported by MCV + RDW patterns, but never stated as confirmed.\n" | |
| "- Interpret ALP cautiously: mention bone vs liver as possible sources, but highlight that more tests would be required to confirm.\n" | |
| "- Always highlight limitations where applicable.\n\n" | |
| "OUTPUT FORMAT (strict, structured, and client-friendly):\n\n" | |
| "1. Executive Summary\n" | |
| " - Top Priority Issues (based only on provided biomarkers)\n" | |
| " - Key Strengths\n\n" | |
| "2. System-Specific Analysis\n" | |
| " - Blood Health (MCV, RDW, Lymphocytes, WBC)\n" | |
| " - Protein & Liver Health (Albumin, ALP)\n" | |
| " - Kidney Health (Creatinine)\n" | |
| " - Metabolic Health (Glucose, CRP)\n" | |
| " - Anthropometrics (Age, Height, Weight, BMI)\n" | |
| " - Other systems: Always state 'Not available from current biomarkers.' if data missing\n\n" | |
| "3. Personalized Action Plan\n" | |
| " - Medical (tests/consults related only to biomarkers β e.g., repeat CBC, iron studies if anemia suspected)\n" | |
| " - Nutrition (diet & supplements grounded ONLY in biomarker findings β e.g., protein intake if albumin low, anti-inflammatory foods if CRP elevated)\n" | |
| " - Lifestyle (hydration, exercise, sleep β general guidance contextualized by BMI and biomarkers)\n" | |
| " - Testing (only mention ferritin, B12, folate, GGT, etc. as follow-up β but clarify these are NOT part of current data)\n\n" | |
| "4. Interaction Alerts\n" | |
| " - Describe ONLY interactions among provided biomarkers (e.g., RDW with MCV for anemia trends, ALP bone/liver origin, WBC with CRP for infection/inflammation)\n\n" | |
| "5. Tabular Mapping\n" | |
| " - Present a Markdown table: Biomarker β Value β Status (Low/Normal/High) β AI-Inferred Insight β Client-Friendly Message\n" | |
| " - Include ONLY the 9 Levine biomarkers, no extras\n\n" | |
| "6. Enhanced AI Insights & Longitudinal Risk\n" | |
| " - Subclinical nutrient predictions ONLY if patterns (MCV + RDW) suggest it β state as possible, not confirmed\n" | |
| " - ALP interpretation limited to bone vs liver origin (uncertain without further tests)\n" | |
| " - WBC & lymphocyte balance for immunity\n" | |
| " - Risk framing: Highlight if biomarkers suggest resilience or potential stress, but avoid absolute longevity claims\n\n" | |
| "STYLE REQUIREMENTS:\n" | |
| "- Use clear section headings and bullet points.\n" | |
| "- Keep language professional but client-friendly.\n" | |
| "- Format tables cleanly in Markdown.\n" | |
| "- Present output beautifully, like a polished medical summary.\n" | |
| ) | |
| patient_input = ( | |
| f"Patient Profile:\n" | |
| f"- Age: {age}\n" | |
| f"- Gender: {gender}\n" | |
| f"- Height: {height} cm\n" | |
| f"- Weight: {weight} kg\n" | |
| f"- BMI: {bmi}\n\n" | |
| "Lab Values:\n" | |
| f"- Albumin: {albumin} g/dL\n" | |
| f"- Creatinine: {creatinine} mg/dL\n" | |
| f"- Glucose: {glucose} mg/dL\n" | |
| f"- CRP: {crp} mg/L\n" | |
| f"- MCV: {mcv} fL\n" | |
| f"- RDW: {rdw} %\n" | |
| f"- ALP: {alp} U/L\n" | |
| f"- WBC: {wbc} K/uL\n" | |
| f"- Lymphocytes: {lymph} %\n" | |
| ) | |
| prompt = system_prompt + "\n" + patient_input | |
| # Generate | |
| # Keep generation parameters conservative for Spaces | |
| gen = pipe(prompt, | |
| max_new_tokens=2500, | |
| do_sample=True, | |
| temperature=0.001, | |
| top_p=0.9, | |
| return_full_text=False) | |
| # Extract generated text | |
| generated = gen[0].get("generated_text") or gen[0].get("text") or str(gen[0]) | |
| generated = generated.strip() | |
| # Clean: some models repeat prompt β attempt to strip prompt if present | |
| # Remove leading prompt echo if it appears | |
| if patient_input.strip() in generated: | |
| generated = generated.split(patient_input.strip())[-1].strip() | |
| # Also remove repeated instructions | |
| if system_prompt.strip() in generated: | |
| generated = generated.split(system_prompt.strip())[-1].strip() | |
| # Split into left/right panels | |
| left_md, right_md = split_report(generated) | |
| # If the model output is empty or too short, return a helpful fallback | |
| if len(left_md) < 50 and len(right_md) < 50: | |
| fallback = ( | |
| "β οΈ The model returned an unexpectedly short response. Try re-running the report.\n\n" | |
| "**Patient Profile:**\n" + patient_input | |
| ) | |
| return fallback, "" | |
| return left_md, right_md | |
| # ----------------------- | |
| # Build Gradio app | |
| # ----------------------- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π₯ AI Medical Biomarker Dashboard") | |
| gr.Markdown("Enter lab values and demographics β Report is generated in two panels (Summary & Table/Insights).") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π€ Demographics") | |
| age = gr.Number(label="Age", value=45) | |
| gender = gr.Dropdown(["Male", "Female"], label="Gender", value="Male") | |
| height = gr.Number(label="Height (cm)", value=174) | |
| weight = gr.Number(label="Weight (kg)", value=75) | |
| gr.Markdown("### π©Έ Blood Panel") | |
| wbc = gr.Number(label="WBC (K/uL)", value=6.5) | |
| lymph = gr.Number(label="Lymphocytes (%)", value=30) | |
| mcv = gr.Number(label="MCV (fL)", value=88) | |
| rdw = gr.Number(label="RDW (%)", value=13) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 𧬠Chemistry Panel") | |
| albumin = gr.Number(label="Albumin (g/dL)", value=4.2) | |
| creatinine = gr.Number(label="Creatinine (mg/dL)", value=0.9) | |
| glucose = gr.Number(label="Glucose (mg/dL)", value=92) | |
| crp = gr.Number(label="CRP (mg/L)", value=1.0) | |
| alp = gr.Number(label="ALP (U/L)", value=70) | |
| analyze_btn = gr.Button("π¬ Generate Report", variant="primary") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π Summary & Action Plan") | |
| left_output = gr.Markdown(value="Press *Generate Report* to create the analysis.") | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π Tabular & AI Insights") | |
| right_output = gr.Markdown(value="Tabular mapping and enhanced insights will appear here.") | |
| analyze_btn.click( | |
| fn=analyze, | |
| inputs=[albumin, creatinine, glucose, crp, mcv, rdw, alp, wbc, lymph, age, gender, height, weight], | |
| outputs=[left_output, right_output] | |
| ) | |
| gr.Markdown("*β οΈ Disclaimer: This AI output is for educational purposes only and not a substitute for professional medical advice.*") | |
| # Launch (HF Spaces expects this pattern) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860))) | |