Muhammadidrees commited on
Commit
ec1a12b
Β·
verified Β·
1 Parent(s): b16a1fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -86
app.py CHANGED
@@ -2,7 +2,6 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
  import torch
4
  import time
5
- from typing import List, Dict, Generator
6
 
7
  # =======================================================
8
  # Load Model
@@ -29,39 +28,52 @@ print(f"Model device: {next(model.parameters()).device}")
29
 
30
 
31
  # =======================================================
32
- # Response Generation
33
  # =======================================================
34
- def generate_doctor_response(user_message: str) -> Generator[str, None, None]:
35
- """Generate medical advice response with streaming output."""
 
36
  if not user_message.strip():
37
- yield "⚠️ Please describe your symptoms or ask a question."
 
38
  return
39
 
40
- # Enhanced prompt - asks ONE relevant follow-up question
41
- prompt = f"""You are a compassionate medical AI assistant. Provide helpful, accurate medical information.
 
 
 
 
 
 
 
 
 
42
 
43
  Guidelines:
44
- - Respond directly without role labels like "Doctor:" or "Assistant:"
45
- - Be concise (2-3 sentences)
46
- - Provide helpful information about the symptoms
47
- - Ask ONE relevant follow-up question to better understand the condition
48
- - Include disclaimer for serious symptoms: "βš•οΈ Please consult a healthcare professional for proper diagnosis."
 
49
 
50
- User's question: {user_message}
51
 
52
- Response:"""
53
 
 
54
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
55
 
56
  gen_config = GenerationConfig(
57
  temperature=0.7,
58
- top_p=0.9,
59
  top_k=50,
60
  do_sample=True,
61
- max_new_tokens=350,
62
  pad_token_id=tokenizer.pad_token_id,
63
  eos_token_id=tokenizer.eos_token_id,
64
- repetition_penalty=1.15,
65
  no_repeat_ngram_size=3
66
  )
67
 
@@ -73,121 +85,166 @@ Response:"""
73
  generated_ids = output_ids[0][input_len:]
74
  response = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
75
 
76
- response = clean_response(response)
77
-
78
- # Stream response
79
- for i in range(0, len(response), 3):
80
- chunk = response[:i + 3]
81
- yield chunk + "β–Œ"
82
- time.sleep(0.012)
83
 
84
- yield response
 
 
 
 
 
 
 
 
 
85
 
86
 
87
- def clean_response(response: str) -> str:
88
- """Clean and format the model's response."""
89
  # Remove common prefixes
90
- prefixes = ["assistant:", "doctor:", "response:", "answer:"]
91
  response_lower = response.lower()
92
  for prefix in prefixes:
93
  if response_lower.startswith(prefix):
94
  response = response[len(prefix):].strip()
95
  break
96
 
97
- # Limit length
98
- sentences = response.split('. ')
99
- if len(sentences) > 4:
100
- response = '. '.join(sentences[:4]) + '.'
 
 
 
101
 
 
102
  if response and response[-1] not in '.!?':
103
  response += '.'
104
 
105
- if len(response.strip()) < 15:
106
- response = "I understand your concern. Could you please provide more details about your symptoms?"
 
107
 
108
- return response
109
-
110
-
111
- # =======================================================
112
- # Chat Handler
113
- # =======================================================
114
- def respond(message: str, history: List[Dict]) -> tuple:
115
- """Handle user message and generate response."""
116
- user_message = message.strip()
117
- if not user_message:
118
- return "", history
119
-
120
- history.append({"role": "user", "content": user_message})
121
- history.append({"role": "assistant", "content": ""})
122
-
123
- for partial_response in generate_doctor_response(user_message):
124
- history[-1]["content"] = partial_response
125
- yield "", history
126
-
127
- return "", history
128
-
129
-
130
- def clear_chat():
131
- """Clear the chat history."""
132
- return []
133
 
134
 
135
  # =======================================================
136
  # Gradio Interface
137
  # =======================================================
138
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
139
-
140
- gr.Markdown("""
141
- # 🩺 AI Medical Assistant
 
 
 
 
 
 
142
 
143
- Get medical information and guidance. This AI will ask relevant follow-up questions to better understand your condition.
144
-
145
- ⚠️ **Disclaimer:** For informational purposes only. Always consult healthcare professionals for medical advice.
 
 
146
  """)
147
 
148
  chatbot = gr.Chatbot(
149
- label="πŸ’¬ Medical Consultation",
150
  type='messages',
151
  avatar_images=(
152
- "https://cdn-icons-png.flaticon.com/512/706/706830.png",
153
- "https://cdn-icons-png.flaticon.com/512/3774/3774299.png"
154
  ),
155
- height=500,
156
  show_copy_button=True
157
  )
158
 
159
  with gr.Row():
160
  user_input = gr.Textbox(
161
- placeholder="Describe your symptoms...",
162
- label="πŸ’­ Your Message",
163
- lines=2,
164
  scale=4
165
  )
166
 
167
  with gr.Row():
168
- send_btn = gr.Button("πŸ’¬ Send", variant="primary", scale=1)
169
- clear_btn = gr.Button("🧹 Clear", variant="secondary", scale=1)
170
 
 
171
  gr.Examples(
172
  examples=[
173
- "I have a fever of 102Β°F and body aches",
174
- "I've been having headaches for a week",
175
- "I feel extremely tired all the time",
176
- "I have a sore throat and cough"
 
 
177
  ],
178
  inputs=user_input,
179
  )
180
 
181
- # Event handlers
182
- send_btn.click(respond, [user_input, chatbot], [user_input, chatbot], queue=True)
183
- user_input.submit(respond, [user_input, chatbot], [user_input, chatbot], queue=True)
184
- clear_btn.click(clear_chat, outputs=chatbot, queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  # =======================================================
188
- # Launch
189
  # =======================================================
190
  if __name__ == "__main__":
191
- print("πŸš€ Starting AI Medical Assistant...")
 
 
192
  demo.queue(max_size=20)
193
- demo.launch(share=True, show_error=True)
 
 
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
  import torch
4
  import time
 
5
 
6
  # =======================================================
7
  # Load Model
 
28
 
29
 
30
  # =======================================================
31
+ # Generate Comprehensive Doctor Response
32
  # =======================================================
33
+ def generate_doctor_response(history):
34
+ user_message = history[-1]["content"]
35
+
36
  if not user_message.strip():
37
+ history.append({"role": "assistant", "content": "⚠️ Please describe your symptoms or ask a question."})
38
+ yield history
39
  return
40
 
41
+ # Enhanced Medical Prompt - Comprehensive Doctor Approach
42
+ prompt = f"""You are an experienced and compassionate medical doctor providing a comprehensive consultation.
43
+
44
+ Based on the patient's concern, provide a detailed response that includes:
45
+
46
+ 1. **Assessment**: Acknowledge their symptoms and provide initial medical assessment
47
+ 2. **Possible Causes**: Explain potential causes or conditions
48
+ 3. **Medications**: Recommend appropriate over-the-counter or prescription medications (with dosages when relevant)
49
+ 4. **Nutrition & Diet**: Suggest specific foods, nutrients, or dietary changes that can help
50
+ 5. **Lifestyle Modifications**: Recommend lifestyle changes, exercises, rest, or habits to adopt
51
+ 6. **Follow-up**: Advise when to see a doctor or what warning signs to watch for
52
 
53
  Guidelines:
54
+ - Do NOT use labels like "Doctor:" or "Patient:" in your response
55
+ - Be professional, empathetic, and thorough like a real doctor
56
+ - Provide specific, actionable recommendations
57
+ - Use medical terminology but explain it simply
58
+ - Structure your response clearly with the categories above
59
+ - End with: "βš•οΈ *Please consult a healthcare provider for proper diagnosis and personalized treatment plan.*"
60
 
61
+ Patient's concern: {user_message}
62
 
63
+ Comprehensive Medical Response:"""
64
 
65
+ # Tokenize input
66
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
67
 
68
  gen_config = GenerationConfig(
69
  temperature=0.7,
70
+ top_p=0.92,
71
  top_k=50,
72
  do_sample=True,
73
+ max_new_tokens=600,
74
  pad_token_id=tokenizer.pad_token_id,
75
  eos_token_id=tokenizer.eos_token_id,
76
+ repetition_penalty=1.18,
77
  no_repeat_ngram_size=3
78
  )
79
 
 
85
  generated_ids = output_ids[0][input_len:]
86
  response = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
87
 
88
+ # Clean response
89
+ response = clean_medical_response(response)
 
 
 
 
 
90
 
91
+ # Stream response token by token
92
+ history.append({"role": "assistant", "content": ""})
93
+ for i in range(0, len(response), 5):
94
+ chunk = response[:i + 5]
95
+ history[-1]["content"] = chunk + "β–Œ"
96
+ yield history.copy()
97
+ time.sleep(0.01)
98
+
99
+ history[-1]["content"] = response
100
+ yield history
101
 
102
 
103
+ def clean_medical_response(response: str) -> str:
104
+ """Clean and format the medical response."""
105
  # Remove common prefixes
106
+ prefixes = ["assistant:", "doctor:", "response:", "comprehensive medical response:", "medical response:"]
107
  response_lower = response.lower()
108
  for prefix in prefixes:
109
  if response_lower.startswith(prefix):
110
  response = response[len(prefix):].strip()
111
  break
112
 
113
+ # Remove any remaining role labels
114
+ lines = response.split('\n')
115
+ cleaned_lines = []
116
+ for line in lines:
117
+ if not line.lower().strip().startswith(('doctor:', 'assistant:', 'patient:')):
118
+ cleaned_lines.append(line)
119
+ response = '\n'.join(cleaned_lines)
120
 
121
+ # Ensure proper ending
122
  if response and response[-1] not in '.!?':
123
  response += '.'
124
 
125
+ # Add disclaimer if not present
126
+ if 'βš•οΈ' not in response and 'consult' not in response.lower():
127
+ response += '\n\nβš•οΈ *Please consult a healthcare provider for proper diagnosis and personalized treatment plan.*'
128
 
129
+ # Fallback for very short responses
130
+ if len(response.strip()) < 30:
131
+ response = "I understand your concern. To provide you with comprehensive medical guidance including medications, diet, and lifestyle recommendations, could you please describe your symptoms in more detail? For example, when did they start, how severe are they, and have you noticed any triggers?"
132
+
133
+ return response.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
 
136
  # =======================================================
137
  # Gradio Interface
138
  # =======================================================
139
+ with gr.Blocks(theme=gr.themes.Soft(), css="""
140
+ .medical-header {
141
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
142
+ padding: 20px;
143
+ border-radius: 10px;
144
+ color: white;
145
+ text-align: center;
146
+ margin-bottom: 20px;
147
+ }
148
+ """) as demo:
149
 
150
+ gr.HTML("""
151
+ <div class="medical-header">
152
+ <h1>πŸ₯ AI Medical Doctor Consultation</h1>
153
+ <p>Comprehensive Medical Guidance β€’ Medications β€’ Nutrition β€’ Lifestyle</p>
154
+ </div>
155
  """)
156
 
157
  chatbot = gr.Chatbot(
158
+ label="πŸ’¬ Doctor-Patient Consultation",
159
  type='messages',
160
  avatar_images=(
161
+ "https://cdn-icons-png.flaticon.com/512/706/706830.png", # Patient
162
+ "https://cdn-icons-png.flaticon.com/512/3774/3774299.png" # Doctor
163
  ),
164
+ height=550,
165
  show_copy_button=True
166
  )
167
 
168
  with gr.Row():
169
  user_input = gr.Textbox(
170
+ placeholder="Describe your symptoms in detail (e.g., 'I have fever, headache, and body pain for 3 days')...",
171
+ label="🧍 Describe Your Symptoms",
172
+ lines=3,
173
  scale=4
174
  )
175
 
176
  with gr.Row():
177
+ send_btn = gr.Button("πŸ’¬ Consult Doctor", variant="primary", scale=1, size="lg")
178
+ clear_btn = gr.Button("🧹 New Consultation", scale=1, size="lg")
179
 
180
+ gr.Markdown("### πŸ’‘ Example Consultations")
181
  gr.Examples(
182
  examples=[
183
+ "I have a fever of 102Β°F, headache, and body aches for 2 days. What should I do?",
184
+ "I've been having persistent headaches and feeling tired. Need advice on diet and lifestyle.",
185
+ "I have acidity and stomach pain after eating. What medications and diet should I follow?",
186
+ "I'm feeling stressed and anxious. Suggest lifestyle changes and natural remedies.",
187
+ "I have high blood pressure. What diet and lifestyle changes should I make?",
188
+ "I caught a cold with sore throat and cough. What treatment do you recommend?",
189
  ],
190
  inputs=user_input,
191
  )
192
 
193
+ gr.Markdown("""
194
+ ---
195
+ ### 🩺 What This AI Doctor Provides:
196
+
197
+ βœ… **Medical Assessment** - Initial diagnosis and condition evaluation
198
+ βœ… **Medication Recommendations** - Appropriate medicines with dosage guidance
199
+ βœ… **Nutrition & Diet Plans** - Foods and nutrients to help recovery
200
+ βœ… **Lifestyle Modifications** - Exercise, sleep, stress management tips
201
+ βœ… **Follow-up Advice** - When to see a doctor and warning signs
202
+
203
+ ⚠️ **Important Medical Disclaimer:**
204
+ This AI provides general medical information for educational purposes only. It is NOT a substitute for professional medical advice, diagnosis, or treatment. Always seek the advice of your physician or qualified healthcare provider with any questions about a medical condition. Never disregard professional medical advice or delay seeking it because of something you have read here.
205
+
206
+ 🚨 **Emergency:** If you are experiencing a medical emergency, call emergency services immediately.
207
+ """)
208
+
209
+ # =======================================================
210
+ # Respond Function
211
+ # =======================================================
212
+ def respond(message, history):
213
+ user_message = message.strip()
214
+ if not user_message:
215
+ return "", history
216
+
217
+ # Show user message in chat
218
+ history.append({"role": "user", "content": user_message})
219
+
220
+ # Model sees only current message (stateless for consistent behavior)
221
+ temp_history = [{"role": "user", "content": user_message}]
222
+
223
+ for updated_history in generate_doctor_response(temp_history):
224
+ if len(history) == 0 or history[-1]["role"] != "assistant":
225
+ history.append({"role": "assistant", "content": updated_history[-1]["content"]})
226
+ else:
227
+ history[-1]["content"] = updated_history[-1]["content"]
228
+ yield "", history
229
+
230
+ # =======================================================
231
+ # Button & Input Bindings
232
+ # =======================================================
233
+ send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
234
+ user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
235
+ clear_btn.click(lambda: [], None, chatbot, queue=False)
236
 
237
 
238
  # =======================================================
239
+ # Launch App
240
  # =======================================================
241
  if __name__ == "__main__":
242
+ print("="*60)
243
+ print("πŸ₯ AI Medical Doctor Consultation System Starting...")
244
+ print("="*60)
245
  demo.queue(max_size=20)
246
+ demo.launch(
247
+ share=True,
248
+ show_error=True,
249
+ server_name="0.0.0.0"
250
+ )