Update app.py
Browse files
app.py
CHANGED
|
@@ -3,11 +3,6 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
|
| 3 |
import torch
|
| 4 |
import time
|
| 5 |
|
| 6 |
-
# =======================================================
|
| 7 |
-
# Global session state for multi-step questioning
|
| 8 |
-
# =======================================================
|
| 9 |
-
session_answers = {}
|
| 10 |
-
|
| 11 |
# =======================================================
|
| 12 |
# Load Model
|
| 13 |
# =======================================================
|
|
@@ -32,10 +27,9 @@ print(f"Device map: {model.hf_device_map}")
|
|
| 32 |
print(f"Model device: {next(model.parameters()).device}")
|
| 33 |
|
| 34 |
# =======================================================
|
| 35 |
-
# Generate Doctor Response
|
| 36 |
# =======================================================
|
| 37 |
def generate_doctor_response(history):
|
| 38 |
-
global session_answers
|
| 39 |
user_message = history[-1]["content"]
|
| 40 |
|
| 41 |
if not user_message.strip():
|
|
@@ -43,41 +37,34 @@ def generate_doctor_response(history):
|
|
| 43 |
yield history
|
| 44 |
return
|
| 45 |
|
| 46 |
-
#
|
| 47 |
prompt = """# 🩺 You are a highly knowledgeable Medical Expert
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
recent_history = history[-10:-1] if len(history) > 10 else history[:-1]
|
| 66 |
-
for msg in recent_history:
|
| 67 |
-
role = "Patient" if msg["role"] == "user" else "Doctor"
|
| 68 |
-
content = msg['content'].replace("⚕️ *Note: This is AI-generated information*", "").strip()
|
| 69 |
-
prompt += f"{role}: {content}\n"
|
| 70 |
-
prompt += f"Patient: {user_message}\nDoctor:"
|
| 71 |
|
| 72 |
# Tokenize input
|
| 73 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 74 |
|
| 75 |
-
# Generation configuration for concise, interactive answers
|
| 76 |
gen_config = GenerationConfig(
|
| 77 |
temperature=0.7,
|
| 78 |
top_p=0.9,
|
| 79 |
do_sample=True,
|
| 80 |
-
max_new_tokens=500,
|
| 81 |
pad_token_id=tokenizer.pad_token_id,
|
| 82 |
eos_token_id=tokenizer.eos_token_id,
|
| 83 |
repetition_penalty=1.2
|
|
@@ -91,27 +78,25 @@ def generate_doctor_response(history):
|
|
| 91 |
generated_ids = output_ids[0][input_len:]
|
| 92 |
response = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
|
| 93 |
|
| 94 |
-
#
|
| 95 |
response = ". ".join(response.split(". ")[:3]).strip()
|
| 96 |
if response.lower().startswith("doctor:"):
|
| 97 |
response = response[7:].strip()
|
| 98 |
if len(response) < 10:
|
| 99 |
response = "I understand your concern. Could you please provide more details about your symptoms?"
|
| 100 |
|
| 101 |
-
# Add assistant placeholder for streaming
|
| 102 |
-
history.append({"role": "assistant", "content": ""})
|
| 103 |
-
|
| 104 |
# Stream response token by token
|
|
|
|
| 105 |
for i in range(0, len(response), 4):
|
| 106 |
-
chunk = response[:i+4]
|
| 107 |
history[-1]["content"] = chunk + "▌"
|
| 108 |
yield history.copy()
|
| 109 |
time.sleep(0.015)
|
| 110 |
|
| 111 |
-
# Final response
|
| 112 |
history[-1]["content"] = response
|
| 113 |
yield history
|
| 114 |
|
|
|
|
| 115 |
# =======================================================
|
| 116 |
# Gradio Interface
|
| 117 |
# =======================================================
|
|
@@ -151,20 +136,38 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 151 |
label="💡 Example Questions"
|
| 152 |
)
|
| 153 |
|
|
|
|
|
|
|
|
|
|
| 154 |
def respond(message, history):
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
if not message.strip():
|
| 158 |
return "", history
|
| 159 |
-
history.append({"role": "user", "content": message})
|
| 160 |
-
for updated_history in generate_doctor_response(history):
|
| 161 |
-
yield "", updated_history
|
| 162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
|
| 164 |
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
|
| 165 |
clear_btn.click(lambda: [], None, chatbot, queue=False)
|
| 166 |
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
| 168 |
if __name__ == "__main__":
|
| 169 |
demo.queue()
|
| 170 |
demo.launch(share=True)
|
|
|
|
| 3 |
import torch
|
| 4 |
import time
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
# =======================================================
|
| 7 |
# Load Model
|
| 8 |
# =======================================================
|
|
|
|
| 27 |
print(f"Model device: {next(model.parameters()).device}")
|
| 28 |
|
| 29 |
# =======================================================
|
| 30 |
+
# Generate Doctor Response (Stateless)
|
| 31 |
# =======================================================
|
| 32 |
def generate_doctor_response(history):
|
|
|
|
| 33 |
user_message = history[-1]["content"]
|
| 34 |
|
| 35 |
if not user_message.strip():
|
|
|
|
| 37 |
yield history
|
| 38 |
return
|
| 39 |
|
| 40 |
+
# Single-turn prompt (no past context)
|
| 41 |
prompt = """# 🩺 You are a highly knowledgeable Medical Expert
|
| 42 |
|
| 43 |
+
**Roles:**
|
| 44 |
+
- **Doctor:** Diagnose symptoms, ask relevant follow-up questions, and provide medical advice.
|
| 45 |
+
- **Nutritionist:** Give diet and lifestyle recommendations.
|
| 46 |
+
- **Medical Teacher:** Explain complex medical terms and conditions in simple, understandable language.
|
| 47 |
+
|
| 48 |
+
**Guidelines:**
|
| 49 |
+
1. Ask **only one follow-up question at a time** if necessary.
|
| 50 |
+
2. Provide advice after collecting enough information.
|
| 51 |
+
3. Be **empathetic, professional, and conversational**.
|
| 52 |
+
4. Include disclaimers when needed:
|
| 53 |
+
⚕️ *This is AI-generated information and not a substitute for professional medical advice. Please consult a healthcare provider for proper diagnosis and treatment.*
|
| 54 |
+
5. Keep answers concise, clear, and actionable.
|
| 55 |
+
|
| 56 |
+
Respond naturally to the following patient message:
|
| 57 |
+
|
| 58 |
+
Patient: """ + user_message + "\nDoctor:"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
# Tokenize input
|
| 61 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 62 |
|
|
|
|
| 63 |
gen_config = GenerationConfig(
|
| 64 |
temperature=0.7,
|
| 65 |
top_p=0.9,
|
| 66 |
do_sample=True,
|
| 67 |
+
max_new_tokens=500,
|
| 68 |
pad_token_id=tokenizer.pad_token_id,
|
| 69 |
eos_token_id=tokenizer.eos_token_id,
|
| 70 |
repetition_penalty=1.2
|
|
|
|
| 78 |
generated_ids = output_ids[0][input_len:]
|
| 79 |
response = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
|
| 80 |
|
| 81 |
+
# Keep concise output
|
| 82 |
response = ". ".join(response.split(". ")[:3]).strip()
|
| 83 |
if response.lower().startswith("doctor:"):
|
| 84 |
response = response[7:].strip()
|
| 85 |
if len(response) < 10:
|
| 86 |
response = "I understand your concern. Could you please provide more details about your symptoms?"
|
| 87 |
|
|
|
|
|
|
|
|
|
|
| 88 |
# Stream response token by token
|
| 89 |
+
history.append({"role": "assistant", "content": ""})
|
| 90 |
for i in range(0, len(response), 4):
|
| 91 |
+
chunk = response[:i + 4]
|
| 92 |
history[-1]["content"] = chunk + "▌"
|
| 93 |
yield history.copy()
|
| 94 |
time.sleep(0.015)
|
| 95 |
|
|
|
|
| 96 |
history[-1]["content"] = response
|
| 97 |
yield history
|
| 98 |
|
| 99 |
+
|
| 100 |
# =======================================================
|
| 101 |
# Gradio Interface
|
| 102 |
# =======================================================
|
|
|
|
| 136 |
label="💡 Example Questions"
|
| 137 |
)
|
| 138 |
|
| 139 |
+
# =======================================================
|
| 140 |
+
# Respond Function — Model forgets, Chat UI remembers
|
| 141 |
+
# =======================================================
|
| 142 |
def respond(message, history):
|
| 143 |
+
user_message = message.strip()
|
| 144 |
+
if not user_message:
|
|
|
|
| 145 |
return "", history
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
+
# Show user message in chat
|
| 148 |
+
history.append({"role": "user", "content": user_message})
|
| 149 |
+
|
| 150 |
+
# Model sees only current message (no memory)
|
| 151 |
+
temp_history = [{"role": "user", "content": user_message}]
|
| 152 |
+
|
| 153 |
+
for updated_history in generate_doctor_response(temp_history):
|
| 154 |
+
if len(history) == 0 or history[-1]["role"] != "assistant":
|
| 155 |
+
history.append({"role": "assistant", "content": updated_history[-1]["content"]})
|
| 156 |
+
else:
|
| 157 |
+
history[-1]["content"] = updated_history[-1]["content"]
|
| 158 |
+
yield "", history
|
| 159 |
+
|
| 160 |
+
# =======================================================
|
| 161 |
+
# Button & Input Bindings
|
| 162 |
+
# =======================================================
|
| 163 |
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
|
| 164 |
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
|
| 165 |
clear_btn.click(lambda: [], None, chatbot, queue=False)
|
| 166 |
|
| 167 |
+
|
| 168 |
+
# =======================================================
|
| 169 |
+
# Launch App
|
| 170 |
+
# =======================================================
|
| 171 |
if __name__ == "__main__":
|
| 172 |
demo.queue()
|
| 173 |
demo.launch(share=True)
|