Muhammadidrees commited on
Commit
9506f55
·
verified ·
1 Parent(s): 3cbdd87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -102
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import os
@@ -6,7 +7,7 @@ import torch
6
  MODEL_ID = "Muhammadidrees/MedicalInsights"
7
 
8
  # -----------------------
9
- # Load tokenizer + model safely (GPU or CPU)
10
  # -----------------------
11
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
12
 
@@ -17,99 +18,86 @@ except Exception:
17
  MODEL_ID, torch_dtype=torch.float32, low_cpu_mem_usage=True
18
  )
19
 
20
- pipe = pipeline(
21
- "text-generation",
22
- model=model,
23
- tokenizer=tokenizer,
24
- device=0 if torch.cuda.is_available() else -1,
25
- )
26
 
27
  # -----------------------
28
- # Lookup Table for Biomarkers
29
  # -----------------------
30
  REFERENCE_RANGES = {
31
- "Albumin": {"low": 3.5, "high": 5.5, "unit": "g/dL"},
32
- "Creatinine_Male": {"low": 0.7, "high": 1.3, "unit": "mg/dL"},
33
- "Creatinine_Female": {"low": 0.6, "high": 1.1, "unit": "mg/dL"},
34
- "Glucose": {"low": 70, "high": 100, "unit": "mg/dL"},
35
- "CRP": {"low": 0.3, "high": 10, "unit": "mg/L"},
36
- "MCV": {"low": 80, "high": 100, "unit": "fL"},
37
- "RDW": {"low": 11, "high": 15, "unit": "%"},
38
- "WBC": {"low": 4000, "high": 11000, "unit": "/µL"},
39
- "Lymphocytes": {"low": 20, "high": 40, "unit": "%"},
40
- "ALP": {"low": 44, "high": 147, "unit": "U/L"},
41
  }
42
 
43
- def classify(value, biomarker, gender="Male"):
44
- """Classify biomarker as Low, Normal, or High using lookup table."""
45
- if biomarker == "Creatinine":
46
- ref = REFERENCE_RANGES[f"Creatinine_{gender}"]
47
- else:
48
- ref = REFERENCE_RANGES[biomarker]
49
-
50
- if value < ref["low"]:
51
- return "Low"
52
- elif value > ref["high"]:
53
- return "High"
54
- else:
55
- return "Normal"
56
-
57
- # -----------------------
58
- # Splitter helper
59
- # -----------------------
60
- def split_report(text):
61
- text = text.strip()
62
- markers = ["5. Tabular Mapping", "Tabular Mapping", "📊 Tabular", "## 5"]
63
- idx = None
64
- for m in markers:
65
- pos = text.find(m)
66
- if pos != -1 and (idx is None or pos < idx):
67
- idx = pos
68
- if idx is None:
69
- return text, ""
70
- return text[:idx].strip(), text[idx:].strip()
71
-
72
- # -----------------------
73
- # Post-processing cleaner (remove repeated lines)
74
- # -----------------------
75
- def clean_output(text):
76
- lines = text.split("\n")
77
- cleaned = []
78
- prev_line = None
79
- for line in lines:
80
- if line.strip() and line != prev_line: # remove duplicates
81
- cleaned.append(line)
82
- prev_line = line
83
- return "\n".join(cleaned)
84
 
85
  # -----------------------
86
- # Analyze Function
87
  # -----------------------
88
  def analyze(albumin, creatinine, glucose, crp, mcv, rdw, alp,
89
  wbc, lymph, age, gender, height, weight):
90
 
91
- # Calculate BMI
 
 
 
 
 
92
  try:
93
  height = float(height)
94
  weight = float(weight)
95
- bmi = round(weight / ((height / 100) ** 2), 2)
96
  except Exception:
97
  bmi = "N/A"
98
 
99
- # Classify biomarkers via lookup
100
  statuses = {
101
- "Albumin": classify(albumin, "Albumin", gender),
102
- "Creatinine": classify(creatinine, "Creatinine", gender),
103
- "Glucose": classify(glucose, "Glucose", gender),
104
- "CRP": classify(crp, "CRP", gender),
105
- "MCV": classify(mcv, "MCV", gender),
106
- "RDW": classify(rdw, "RDW", gender),
107
- "WBC": classify(wbc * 1000, "WBC", gender), # convert K/uL to /µL
108
- "Lymphocytes": classify(lymph, "Lymphocytes", gender),
109
- "ALP": classify(alp, "ALP", gender),
110
  }
111
 
112
- # Structured context for LLM
113
  patient_input = (
114
  f"Patient Profile:\n"
115
  f"- Age: {age}\n"
@@ -117,7 +105,7 @@ def analyze(albumin, creatinine, glucose, crp, mcv, rdw, alp,
117
  f"- Height: {height} cm\n"
118
  f"- Weight: {weight} kg\n"
119
  f"- BMI: {bmi}\n\n"
120
- "Biomarker Results (Pre-classified):\n"
121
  )
122
  for biomarker, value in {
123
  "Albumin": albumin,
@@ -130,49 +118,52 @@ def analyze(albumin, creatinine, glucose, crp, mcv, rdw, alp,
130
  "WBC": wbc,
131
  "Lymphocytes": lymph,
132
  }.items():
133
- unit = REFERENCE_RANGES["Creatinine_Male"]["unit"] if biomarker == "Creatinine" else REFERENCE_RANGES[biomarker]["unit"]
134
- patient_input += f"- {biomarker}: {value} {unit} → {statuses[biomarker]}\n"
135
 
 
136
  system_prompt = (
137
- "You are a professional AI Medical Assistant.\n"
138
- "Generate a structured medical report ONLY using the 9 Levine biomarkers, age, height, and weight.\n"
139
- "Follow this structure:\n\n"
140
- "1. Executive Summary\n"
141
- "2. System-Specific Analysis\n"
142
- "3. Personalized Action Plan\n"
143
- "4. Interaction Alerts\n"
144
- "5. Tabular Mapping (with 4 columns: Biomarker | Value | Status | AI-Inferred Insight)\n"
145
- "6. Enhanced AI Insights & Longitudinal Risk\n\n"
146
- "Rules:\n"
147
- "- Always include all 9 biomarkers in the table.\n"
148
- "- Do not repeat lines or add unrelated labs.\n"
149
- "- Keep responses concise, professional, and readable.\n"
 
 
 
 
 
 
150
  )
151
 
152
  prompt = system_prompt + "\n" + patient_input
153
 
154
- # ---- FIXED PIPE CALL ----
155
  gen = pipe(
156
  prompt,
157
  max_new_tokens=1200,
158
- do_sample=True, # controlled sampling
159
- temperature=0.3,
160
- top_p=0.9,
161
- repetition_penalty=1.2, # prevent loops
162
- eos_token_id=tokenizer.eos_token_id,
163
- return_full_text=False,
164
  )
165
 
166
- generated = clean_output(gen[0]["generated_text"].strip())
167
- left, right = split_report(generated)
168
- return left, right
169
 
170
  # -----------------------
171
- # Gradio UI
172
  # -----------------------
173
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
174
  gr.Markdown("# 🏥 AI Medical Biomarker Dashboard")
175
- gr.Markdown("Enter lab values and demographics — Report is generated in two panels (Summary & Table/Insights).")
176
 
177
  with gr.Row():
178
  with gr.Column(scale=1):
@@ -213,4 +204,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
213
  )
214
 
215
  if __name__ == "__main__":
216
- demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))
 
 
 
 
 
 
1
+ # app.py
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
  import os
 
7
  MODEL_ID = "Muhammadidrees/MedicalInsights"
8
 
9
  # -----------------------
10
+ # Load tokenizer + model
11
  # -----------------------
12
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
13
 
 
18
  MODEL_ID, torch_dtype=torch.float32, low_cpu_mem_usage=True
19
  )
20
 
21
+ device = 0 if torch.cuda.is_available() else -1
22
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
 
 
 
 
23
 
24
  # -----------------------
25
+ # Reference ranges lookup
26
  # -----------------------
27
  REFERENCE_RANGES = {
28
+ "Albumin": (3.5, 5.5),
29
+ "Creatinine": {"Male": (0.7, 1.3), "Female": (0.6, 1.1)},
30
+ "Glucose": (70, 100),
31
+ "CRP": (0.3, 10),
32
+ "MCV": (80, 100),
33
+ "RDW": (11, 15),
34
+ "WBC": (4, 11), # K/uL
35
+ "Lymphocytes": (20, 40),
36
+ "ALP": (44, 147),
 
37
  }
38
 
39
+ def classify_biomarker(name, value, gender=None):
40
+ """
41
+ Returns Low / Normal / High for a biomarker using REFERENCE_RANGES.
42
+ Safe: won't crash if value or range missing.
43
+ """
44
+ try:
45
+ if value is None:
46
+ return "Unknown"
47
+ ref = REFERENCE_RANGES.get(name)
48
+ if ref is None:
49
+ return "Unknown"
50
+
51
+ if isinstance(ref, dict) and gender:
52
+ low, high = ref.get(gender, (None, None))
53
+ else:
54
+ low, high = ref
55
+
56
+ if low is None or high is None:
57
+ return "Unknown"
58
+
59
+ if value < low:
60
+ return "Low"
61
+ elif value > high:
62
+ return "High"
63
+ else:
64
+ return "Normal"
65
+ except Exception as e:
66
+ return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # -----------------------
69
+ # The analyze function
70
  # -----------------------
71
  def analyze(albumin, creatinine, glucose, crp, mcv, rdw, alp,
72
  wbc, lymph, age, gender, height, weight):
73
 
74
+ # Safe conversions
75
+ try:
76
+ age = int(age)
77
+ except Exception:
78
+ age = "N/A"
79
+
80
  try:
81
  height = float(height)
82
  weight = float(weight)
83
+ bmi = round(weight / ((height / 100) ** 2), 2) if height > 0 else "N/A"
84
  except Exception:
85
  bmi = "N/A"
86
 
87
+ # Classify each biomarker
88
  statuses = {
89
+ "Albumin": classify_biomarker("Albumin", albumin, gender),
90
+ "Creatinine": classify_biomarker("Creatinine", creatinine, gender),
91
+ "Glucose": classify_biomarker("Glucose", glucose),
92
+ "CRP": classify_biomarker("CRP", crp),
93
+ "MCV": classify_biomarker("MCV", mcv),
94
+ "RDW": classify_biomarker("RDW", rdw),
95
+ "WBC": classify_biomarker("WBC", wbc),
96
+ "Lymphocytes": classify_biomarker("Lymphocytes", lymph),
97
+ "ALP": classify_biomarker("ALP", alp),
98
  }
99
 
100
+ # Build structured patient input for LLM
101
  patient_input = (
102
  f"Patient Profile:\n"
103
  f"- Age: {age}\n"
 
105
  f"- Height: {height} cm\n"
106
  f"- Weight: {weight} kg\n"
107
  f"- BMI: {bmi}\n\n"
108
+ "Biomarker Results:\n"
109
  )
110
  for biomarker, value in {
111
  "Albumin": albumin,
 
118
  "WBC": wbc,
119
  "Lymphocytes": lymph,
120
  }.items():
121
+ patient_input += f"- {biomarker}: {value} ({statuses[biomarker]})\n"
 
122
 
123
+ # System prompt
124
  system_prompt = (
125
+ "You are 'Medical Insights AI', a trusted medical assistant.\n"
126
+ "The biomarker classifications (Low / Normal / High) are already provided.\n"
127
+ "Do not recompute them — just use them to generate:\n\n"
128
+ "*Executive Summary*\n"
129
+ "- Top Priority Issues\n"
130
+ "- Key Strengths\n\n"
131
+ "*System-Specific Analysis*\n"
132
+ "- Blood Health\n"
133
+ "- Protein & Liver Health\n"
134
+ "- Kidney Health\n"
135
+ "- Metabolic Health\n"
136
+ "- Anthropometrics\n"
137
+ "- Other Systems\n\n"
138
+ "*Personalized Action Plan*\n"
139
+ "- Medical, Nutrition, Lifestyle, Testing\n\n"
140
+ "*Interaction Alerts*\n\n"
141
+ "*Tabular Mapping*\n"
142
+ "- Table with Biomarker | Reference Range | Value | Status | Insight\n\n"
143
+ "*Enhanced AI Insights & Longitudinal Risk*\n"
144
  )
145
 
146
  prompt = system_prompt + "\n" + patient_input
147
 
 
148
  gen = pipe(
149
  prompt,
150
  max_new_tokens=1200,
151
+ do_sample=False,
152
+ temperature=0.0,
153
+ top_p=1.0,
154
+ repetition_penalty=1.0,
155
+ early_stopping=True,
156
+ return_full_text=False
157
  )
158
 
159
+ generated = gen[0].get("generated_text") or gen[0].get("text") or str(gen[0])
160
+ return generated, generated # left and right panels (split optional)
 
161
 
162
  # -----------------------
163
+ # Build Gradio app
164
  # -----------------------
165
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
166
  gr.Markdown("# 🏥 AI Medical Biomarker Dashboard")
 
167
 
168
  with gr.Row():
169
  with gr.Column(scale=1):
 
204
  )
205
 
206
  if __name__ == "__main__":
207
+ demo.launch(
208
+ server_name="0.0.0.0",
209
+ server_port=int(os.environ.get("PORT", 7860)),
210
+ show_error=True, # Show traceback on Hugging Face
211
+ debug=True
212
+ )