|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig |
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name = "augtoma/qCammel-13" |
|
|
print("Loading tokenizer and model...") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
device_map="auto", |
|
|
torch_dtype=torch.float16, |
|
|
trust_remote_code=True, |
|
|
low_cpu_mem_usage=True |
|
|
) |
|
|
model.eval() |
|
|
print("β
Model loaded successfully!") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
session = { |
|
|
"name": None, |
|
|
"age": None, |
|
|
"gender": None, |
|
|
"symptoms": None, |
|
|
"duration": None, |
|
|
"stage": "intro" |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_name(text): |
|
|
text = text.lower().replace("yes", "").replace("i am", "").replace("i'm", "") |
|
|
text = text.replace("my name is", "").replace("name", "").replace("is", "").strip() |
|
|
return text.title() if text else "Patient" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def doctor_response(user_message): |
|
|
global session |
|
|
user_message = user_message.strip() |
|
|
|
|
|
|
|
|
if session["stage"] == "intro": |
|
|
session["stage"] = "ask_name" |
|
|
return "π¨ββοΈ Hello! Iβm Dr. Aiden. May I know your name, please?" |
|
|
|
|
|
|
|
|
elif session["stage"] == "ask_name": |
|
|
session["name"] = extract_name(user_message) |
|
|
session["stage"] = "ask_age" |
|
|
return f"Nice to meet you, {session['name']}! How old are you?" |
|
|
|
|
|
|
|
|
elif session["stage"] == "ask_age": |
|
|
words = user_message.split() |
|
|
for w in words: |
|
|
if w.isdigit(): |
|
|
session["age"] = int(w) |
|
|
session["stage"] = "ask_gender" |
|
|
return f"Got it, {session['name']}. Are you male or female?" |
|
|
return "Please tell me your age in numbers, like 20 or 25." |
|
|
|
|
|
|
|
|
elif session["stage"] == "ask_gender": |
|
|
if "male" in user_message.lower(): |
|
|
session["gender"] = "male" |
|
|
elif "female" in user_message.lower(): |
|
|
session["gender"] = "female" |
|
|
else: |
|
|
return "Could you please specify whether you are male or female?" |
|
|
session["stage"] = "ask_symptoms" |
|
|
return f"Thanks, {session['name']}! So you're a {session['age']}-year-old {session['gender']}. What symptoms are you experiencing?" |
|
|
|
|
|
|
|
|
elif session["stage"] == "ask_symptoms": |
|
|
session["symptoms"] = user_message |
|
|
session["stage"] = "ask_duration" |
|
|
return "Since when have you been feeling this way?" |
|
|
|
|
|
|
|
|
elif session["stage"] == "ask_duration": |
|
|
session["duration"] = user_message |
|
|
session["stage"] = "consult" |
|
|
return "Got it. Are you taking any medications or treatments currently?" |
|
|
|
|
|
|
|
|
elif session["stage"] == "consult": |
|
|
name = session["name"] |
|
|
age = session["age"] |
|
|
gender = session["gender"] |
|
|
symptoms = session["symptoms"] |
|
|
duration = session["duration"] |
|
|
|
|
|
prompt = f""" |
|
|
You are Dr. Aiden β a warm, friendly, and professional doctor having an interview-style consultation. |
|
|
The patient is a {age}-year-old {gender} named {name}. |
|
|
They have been feeling {symptoms} for {duration}. |
|
|
They said: "{user_message}" |
|
|
|
|
|
Respond like a real doctor β show empathy, analyze the symptoms, suggest likely causes, give simple medication and home care advice. |
|
|
|
|
|
Include: |
|
|
1. Acknowledge their discomfort. |
|
|
2. Explain possible causes in simple terms. |
|
|
3. Recommend over-the-counter medicines (if safe). |
|
|
4. Suggest food, hydration, and rest tips. |
|
|
5. Warn when to visit a real doctor. |
|
|
6. End with gentle reassurance. |
|
|
|
|
|
Doctor:""" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
|
|
gen_cfg = GenerationConfig( |
|
|
temperature=0.7, |
|
|
top_p=0.9, |
|
|
max_new_tokens=350, |
|
|
repetition_penalty=1.1, |
|
|
pad_token_id=tokenizer.pad_token_id, |
|
|
eos_token_id=tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output = model.generate(**inputs, generation_config=gen_cfg) |
|
|
|
|
|
output_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
output_text = output_text.split("Doctor:")[-1].strip() |
|
|
if not output_text.endswith((".", "!", "?")): |
|
|
output_text += "." |
|
|
output_text += "\n\nβοΈ *Note: This advice is AI-generated and not a substitute for professional medical care.*" |
|
|
|
|
|
return output_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
|
gr.HTML(""" |
|
|
<div style="text-align:center; background-color:#4C7DFF; color:white; padding:20px; border-radius:10px;"> |
|
|
<h1>π₯ Doctor Consultation with Dr. Aiden</h1> |
|
|
<p>AI-powered doctor interview β step-by-step and caring conversation</p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
chatbot = gr.Chatbot( |
|
|
label="π¨ββοΈ Chat with Dr. Aiden", |
|
|
height=550, |
|
|
type='messages', |
|
|
avatar_images=( |
|
|
"https://cdn-icons-png.flaticon.com/512/706/706830.png", |
|
|
"https://cdn-icons-png.flaticon.com/512/3774/3774299.png" |
|
|
) |
|
|
) |
|
|
|
|
|
user_input = gr.Textbox(placeholder="Say 'Hi Doctor' to start your consultation...", label="Your Message", lines=2) |
|
|
send_btn = gr.Button("π¬ Send", variant="primary") |
|
|
clear_btn = gr.Button("π§Ή New Consultation") |
|
|
|
|
|
def respond(message, history): |
|
|
if history is None: |
|
|
history = [] |
|
|
response = doctor_response(message) |
|
|
history.append({"role": "user", "content": message}) |
|
|
history.append({"role": "assistant", "content": response}) |
|
|
return "", history |
|
|
|
|
|
def reset(): |
|
|
global session |
|
|
session = { |
|
|
"name": None, |
|
|
"age": None, |
|
|
"gender": None, |
|
|
"symptoms": None, |
|
|
"duration": None, |
|
|
"stage": "intro" |
|
|
} |
|
|
return [] |
|
|
|
|
|
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot]) |
|
|
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot]) |
|
|
clear_btn.click(reset, None, chatbot, queue=False) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("π₯ Launching Dr. Aiden...") |
|
|
demo.queue() |
|
|
demo.launch(share=True) |
|
|
|