File size: 9,136 Bytes
8aceec0 8c2c9df 8aceec0 06ceb89 8aceec0 20c94fa 8aceec0 ec1a12b 8aceec0 20c94fa ec1a12b 8aceec0 8c2c9df 20c94fa ec1a12b 20c94fa 561281d 20c94fa 561281d 20c94fa 8c2c9df ec1a12b b16a1fc 8c2c9df 8aceec0 20c94fa 8aceec0 20c94fa 8aceec0 20c94fa b16a1fc 8aceec0 8c2c9df 8aceec0 8c2c9df 8aceec0 8c2c9df ec1a12b 20c94fa b16a1fc ec1a12b 20c94fa ec1a12b 20c94fa ec1a12b b16a1fc 20c94fa b16a1fc 20c94fa b16a1fc 20c94fa cb05c44 20c94fa cb05c44 20c94fa b16a1fc 20c94fa b16a1fc 20c94fa ec1a12b 8aceec0 561281d 8aceec0 ec1a12b b16a1fc ec1a12b 20c94fa ec1a12b b16a1fc 8c2c9df 20c94fa 8aceec0 ec1a12b 8aceec0 ec1a12b 8aceec0 20c94fa b16a1fc 8aceec0 8c2c9df 8aceec0 20c94fa 8aceec0 8c2c9df 8aceec0 20c94fa 8c2c9df 20c94fa 8aceec0 20c94fa 8aceec0 8c2c9df ec1a12b 20c94fa ec1a12b 20c94fa ec1a12b 20c94fa ec1a12b 20c94fa ec1a12b 8aceec0 561281d ec1a12b 561281d 8aceec0 ec1a12b 20c94fa ec1a12b b16a1fc ec1a12b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import torch
import time
# =======================================================
# Load Model
# =======================================================
model_name = "augtoma/qCammel-13"
print("Loading tokenizer and model...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
print("Model loaded successfully!")
print(f"Device map: {model.hf_device_map}")
print(f"Model device: {next(model.parameters()).device}")
# =======================================================
# Generate Doctor Response - Interactive Medical Consultation
# =======================================================
def generate_doctor_response(history):
user_message = history[-1]["content"]
if not user_message.strip():
history.append({"role": "assistant", "content": "How can I help you today?"})
yield history
return
# Build conversation context from history
conversation_context = ""
if len(history) > 1:
# Include previous exchanges for context
for msg in history[:-1]:
if msg["role"] == "user":
conversation_context += f"PATIENT: {msg['content']}\n"
else:
conversation_context += f"DOCTOR: {msg['content']}\n"
# Medical conversation prompt - like real doctor-patient interaction
prompt = f"""You are an experienced medical doctor conducting a patient consultation. Have a natural, interactive conversation where you:
- Ask relevant follow-up questions to understand symptoms better
- Gather medical history (medications, lifestyle, family history)
- Provide medical assessment and recommendations
- Suggest medications with dosages when appropriate
- Give diet and lifestyle advice
- Explain what tests or next steps are needed
Respond naturally as a caring doctor would. Keep responses concise (2-4 sentences). Ask ONE specific follow-up question when you need more information.
Previous conversation:
{conversation_context}
PATIENT: {user_message}
DOCTOR:"""
# Tokenize input
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
gen_config = GenerationConfig(
temperature=0.75,
top_p=0.92,
top_k=45,
do_sample=True,
max_new_tokens=250,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
repetition_penalty=1.2,
no_repeat_ngram_size=3
)
input_len = inputs["input_ids"].shape[1]
with torch.no_grad():
output_ids = model.generate(**inputs, generation_config=gen_config)
generated_ids = output_ids[0][input_len:]
response = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()
# Clean response
response = clean_doctor_response(response)
# Stream response token by token
history.append({"role": "assistant", "content": ""})
for i in range(0, len(response), 4):
chunk = response[:i + 4]
history[-1]["content"] = chunk + "β"
yield history.copy()
time.sleep(0.012)
history[-1]["content"] = response
yield history
def clean_doctor_response(response: str) -> str:
"""Clean the doctor's response to be natural and conversational."""
# Remove role labels if present
prefixes_to_remove = ["doctor:", "assistant:", "response:", "patient:"]
response_lower = response.lower()
for prefix in prefixes_to_remove:
if response_lower.startswith(prefix):
response = response[len(prefix):].strip()
break
# Stop at repetitive patterns or gibberish
stop_phrases = ["accordingly", "respectively", "speaking correctly", "faithfully yours"]
for phrase in stop_phrases:
if phrase in response.lower():
# Find first occurrence and cut there
idx = response.lower().find(phrase)
response = response[:idx].strip()
break
# Limit to reasonable number of sentences (4-6 max)
sentences = [s.strip() + '.' for s in response.split('.') if s.strip()]
if len(sentences) > 6:
response = ' '.join(sentences[:6])
else:
response = ' '.join(sentences)
# Remove incomplete sentences at the end
if response and response[-1] not in '.!?':
last_period = response.rfind('.')
if last_period > 0:
response = response[:last_period + 1]
# Clean up extra spaces
response = ' '.join(response.split())
# Fallback for very short or empty responses
if len(response.strip()) < 20:
response = "Could you tell me more about your symptoms? When did they start?"
return response.strip()
# =======================================================
# Gradio Interface
# =======================================================
with gr.Blocks(theme=gr.themes.Soft(), css="""
.medical-header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 20px;
border-radius: 10px;
color: white;
text-align: center;
margin-bottom: 20px;
}
""") as demo:
gr.HTML("""
<div class="medical-header">
<h1>π₯ AI Doctor Consultation</h1>
<p>Interactive Medical Conversation β’ Just Like Visiting Your Doctor</p>
</div>
""")
gr.Markdown("""
### π¬ How This Works:
- Describe your symptoms or health concerns
- The AI doctor will ask questions to understand your condition
- You'll get medical advice, medication suggestions, and lifestyle recommendations
- Have a natural back-and-forth conversation just like a real doctor visit
""")
chatbot = gr.Chatbot(
label="π¬ Doctor-Patient Consultation",
type='messages',
avatar_images=(
"https://cdn-icons-png.flaticon.com/512/706/706830.png", # Patient
"https://cdn-icons-png.flaticon.com/512/3774/3774299.png" # Doctor
),
height=500,
show_copy_button=True
)
with gr.Row():
user_input = gr.Textbox(
placeholder="Describe your symptoms or answer the doctor's questions...",
label="π§ Patient (You)",
lines=2,
scale=4
)
with gr.Row():
send_btn = gr.Button("π¬ Send", variant="primary", scale=1)
clear_btn = gr.Button("π New Consultation", scale=1)
gr.Markdown("### π‘ Example Conversations")
gr.Examples(
examples=[
"Hi Doctor, I've been having fever and body aches for 2 days",
"I have numbness in my toes and difficulty walking",
"I've been feeling very tired all the time lately",
"I have chest pain and shortness of breath",
"I get headaches almost every day",
"I have stomach pain after eating"
],
inputs=user_input,
)
gr.Markdown("""
---
β οΈ **Medical Disclaimer:** This AI provides general medical information for educational purposes.
It is NOT a substitute for professional medical advice. Always consult a qualified healthcare
provider for diagnosis and treatment. In case of emergency, call emergency services immediately.
""")
# =======================================================
# Respond Function with Context Memory
# =======================================================
def respond(message, history):
user_message = message.strip()
if not user_message:
return "", history
# Add user message to history
history.append({"role": "user", "content": user_message})
# Generate response with full conversation context
for updated_history in generate_doctor_response(history):
# Update the last assistant message
if history[-1]["role"] == "assistant":
history[-1]["content"] = updated_history[-1]["content"]
yield "", history
# =======================================================
# Button & Input Bindings
# =======================================================
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
clear_btn.click(lambda: [], None, chatbot, queue=False)
# =======================================================
# Launch App
# =======================================================
if __name__ == "__main__":
print("="*60)
print("π₯ AI Doctor Consultation System Starting...")
print(" Interactive medical conversation with context memory")
print("="*60)
demo.queue(max_size=20)
demo.launch(
share=True,
show_error=True,
server_name="0.0.0.0"
) |