Muhammadidrees's picture
Update app.py
f02275d verified
raw
history blame
6.41 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import torch
# =======================================================
# Load Model
# =======================================================
model_name = "augtoma/qCammel-13"
print("Loading tokenizer and model...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
print("✅ Model loaded successfully!")
# =======================================================
# Global Memory for Doctor Flow
# =======================================================
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
# =======================================================
# Helper: Clean name input
# =======================================================
def extract_name(text):
# Remove filler words like "yes", "i am", "my name is"
text = text.lower().replace("yes", "").replace("i am", "").replace("i'm", "")
text = text.replace("name", "").replace("is", "").replace("it's", "")
text = text.strip()
# Capitalize nicely
return text.title() if text else "Patient"
# =======================================================
# Generate Doctor Response
# =======================================================
def doctor_response(user_message):
global session
user_message = user_message.strip()
# Step 1: Greeting
if session["stage"] == "intro":
session["stage"] = "ask_name"
return "👨‍⚕️ Hello! I’m Dr. Aiden. May I know your name, please?"
# Step 2: Get Name
elif session["stage"] == "ask_name":
session["name"] = extract_name(user_message)
session["stage"] = "ask_age"
return f"Nice to meet you, {session['name']}! How old are you?"
# Step 3: Get Age
elif session["stage"] == "ask_age":
words = user_message.split()
for w in words:
if w.isdigit():
session["age"] = int(w)
session["stage"] = "ask_gender"
return f"Got it, {session['name']}. Are you male or female?"
return "Please tell me your age in numbers, like 20 or 25."
# Step 4: Get Gender
elif session["stage"] == "ask_gender":
if "male" in user_message.lower():
session["gender"] = "male"
elif "female" in user_message.lower():
session["gender"] = "female"
else:
return "Could you please specify whether you are male or female?"
session["stage"] = "consult"
return f"Thanks, {session['name']}! So you're a {session['age']}-year-old {session['gender']}. What brings you in today?"
# Step 5: Medical Consultation Mode
elif session["stage"] == "consult":
name = session["name"]
age = session["age"]
gender = session["gender"]
prompt = f"""
You are Dr. Aiden — a friendly, empathetic, and experienced doctor.
You are chatting casually with a {age}-year-old {gender} patient named {name}.
The patient is describing their health concern.
Respond like a real doctor would — empathetic, natural, and informative.
Include in your response:
- Acknowledge the patient’s concern
- Possible causes (in simple words)
- Basic medicine or remedy (if applicable)
- Rest, hydration, and diet advice
- When to visit a real doctor
- End with reassurance
Patient: {user_message}
Doctor:"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
gen_cfg = GenerationConfig(
temperature=0.7,
top_p=0.9,
max_new_tokens=350,
repetition_penalty=1.12,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
with torch.no_grad():
output = model.generate(**inputs, generation_config=gen_cfg)
output_text = tokenizer.decode(output[0], skip_special_tokens=True).strip()
output_text = output_text.split("Doctor:")[-1].strip()
# Final cleanup
if not output_text.endswith((".", "!", "?")):
output_text += "."
output_text += "\n\n⚕️ *Note: This advice is AI-generated and not a substitute for professional medical care.*"
return output_text
# =======================================================
# Gradio Interface
# =======================================================
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.HTML("""
<div style="text-align:center; background-color:#4C7DFF; color:white; padding:20px; border-radius:10px;">
<h1>💙 Your Consultation with Dr. Aiden</h1>
<p>Empathetic • Knowledgeable • Natural — Your AI Medical Advisor</p>
</div>
""")
chatbot = gr.Chatbot(
label="👨‍⚕️ Chat with Dr. Aiden",
height=550,
type='messages',
avatar_images=(
"https://cdn-icons-png.flaticon.com/512/706/706830.png",
"https://cdn-icons-png.flaticon.com/512/3774/3774299.png"
)
)
user_input = gr.Textbox(placeholder="Say 'Hi Doctor' to start your consultation...", label="Your Message", lines=2)
send_btn = gr.Button("💬 Send", variant="primary")
clear_btn = gr.Button("🧹 New Consultation")
def respond(message, history):
if history is None:
history = []
response = doctor_response(message)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return "", history
def reset():
global session
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
return []
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
clear_btn.click(reset, None, chatbot, queue=False)
# =======================================================
# Launch
# =======================================================
if __name__ == "__main__":
print("🏥 Launching Dr. Aiden...")
demo.queue()
demo.launch(share=True)