Spaces:
Paused
Paused
File size: 3,346 Bytes
de8206c 79b45fb de8206c adc1528 de8206c 1dde1db adc1528 de8206c 1dde1db de8206c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import os
# Load your model from Hugging Face Hub
MODEL_ID = "Muhammadidrees/MedicalInsights"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
offload_path = os.path.join(os.getcwd(), "offload")
os.makedirs(offload_path, exist_ok=True)
#model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto")
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
device_map="auto", # Hugging Face accelerate use karega
offload_folder=offload_path, # Disk pe offload karega (RAM bachi rahegi)
low_cpu_mem_usage=True,
trust_remote_code=True
)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Function to build structured input and query the LLM
def analyze(
albumin, creatinine, glucose, crp, mcv, rdw, alp,
wbc, lymph, age, gender, height, weight, bmi
):
# System-style instruction
system_prompt = (
"You are an advanced AI medical assistant. "
"Analyze the patient’s biomarkers and demographics. "
"Provide a structured assessment including: "
"patient_profile, lab_results, risk_assessment, clinical_impression, recommendations. "
)
# Construct patient profile input
patient_input = f"""
Patient Profile:
- Age: {age}
- Gender: {gender}
- Height: {height} cm
- Weight: {weight} kg
- BMI: {bmi}
Lab Values:
- Albumin: {albumin} g/dL
- Creatinine: {creatinine} mg/dL
- Glucose: {glucose} mg/dL
- C-Reactive Protein: {crp} mg/L
- Mean Cell Volume: {mcv} fL
- Red Cell Distribution Width: {rdw} %
- Alkaline Phosphatase: {alp} U/L
- White Blood Cell Count: {wbc} K/uL
- Lymphocyte Percentage: {lymph} %
"""
prompt = system_prompt + "\n" + patient_input
# Call LLM
result = pipe(prompt, max_new_tokens=400, do_sample=True, temperature=0.6)
return result[0]["generated_text"]
# Build Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## 🧪 Medical Insights AI — Enter Patient Data")
with gr.Row():
albumin = gr.Number(label="Albumin (g/dL)")
wbc = gr.Number(label="White Blood Cell Count (K/uL)")
with gr.Row():
creatinine = gr.Number(label="Creatinine (mg/dL)")
lymph = gr.Number(label="Lymphocyte Percentage (%)")
with gr.Row():
glucose = gr.Number(label="Glucose (mg/dL)")
age = gr.Number(label="Age (years)")
with gr.Row():
crp = gr.Number(label="C-Reactive Protein (mg/L)")
gender = gr.Dropdown(choices=["Male", "Female"], label="Gender")
with gr.Row():
mcv = gr.Number(label="Mean Cell Volume (fL)")
height = gr.Number(label="Height (cm)")
with gr.Row():
rdw = gr.Number(label="Red Cell Distribution Width (%)")
weight = gr.Number(label="Weight (kg)")
with gr.Row():
alp = gr.Number(label="Alkaline Phosphatase (U/L)")
bmi = gr.Number(label="BMI")
analyze_btn = gr.Button("🔎 Analyze")
output = gr.Textbox(label="AI Medical Assessment", lines=12)
analyze_btn.click(
fn=analyze,
inputs=[albumin, creatinine, glucose, crp, mcv, rdw, alp,
wbc, lymph, age, gender, height, weight, bmi],
outputs=output
)
demo.launch() |