File size: 5,970 Bytes
8aceec0 8c2c9df 8aceec0 1fe24dd 8aceec0 06ceb89 8aceec0 da5cbc6 8aceec0 da5cbc6 b16a1fc 1fe24dd da5cbc6 1fe24dd da5cbc6 1fe24dd da5cbc6 1fe24dd da5cbc6 1fe24dd da5cbc6 1fe24dd da5cbc6 8aceec0 561281d 8aceec0 da5cbc6 8aceec0 da5cbc6 ec1a12b da5cbc6 b16a1fc 8c2c9df 8aceec0 da5cbc6 8aceec0 da5cbc6 8aceec0 8c2c9df da5cbc6 ec1a12b da5cbc6 ec1a12b da5cbc6 8aceec0 561281d da5cbc6 561281d 8aceec0 da5cbc6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import torch
import time
# =======================================================
# Load Model
# =======================================================
model_name = "augtoma/qCammel-13"
print("Loading tokenizer and model...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
print("β
Model loaded successfully!")
# =======================================================
# Global Memory for Doctor Flow
# =======================================================
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
# =======================================================
# Generate Doctor Response
# =======================================================
def doctor_response(user_message):
global session
user_message = user_message.strip()
# Step 1: Greeting
if session["stage"] == "intro":
session["stage"] = "ask_name"
return "π¨ββοΈ Hello! Iβm Dr. Aiden. May I know your name, please?"
# Step 2: Get Name
elif session["stage"] == "ask_name":
session["name"] = user_message.split()[0].capitalize()
session["stage"] = "ask_age"
return f"Nice to meet you, {session['name']}! How old are you?"
# Step 3: Get Age
elif session["stage"] == "ask_age":
words = user_message.split()
for w in words:
if w.isdigit():
session["age"] = int(w)
session["stage"] = "ask_gender"
return f"Got it, {session['name']}. Are you male or female?"
return "Please tell me your age in numbers, like 25 or 30."
# Step 4: Get Gender
elif session["stage"] == "ask_gender":
if "male" in user_message.lower():
session["gender"] = "male"
elif "female" in user_message.lower():
session["gender"] = "female"
else:
return "Could you please specify whether you are male or female?"
session["stage"] = "consult"
return f"Thanks, {session['name']}! So you're a {session['age']}-year-old {session['gender']}. What brings you in today?"
# Step 5: Medical Consultation Mode
elif session["stage"] == "consult":
name = session["name"]
age = session["age"]
gender = session["gender"]
prompt = f"""
You are Dr. Aiden β a warm, professional, and conversational doctor talking naturally with a patient.
Patient Info:
- Name: {name}
- Age: {age}
- Gender: {gender}
Speak in a caring and natural tone (like a friendly doctor in a private clinic).
Include in your response:
1. Acknowledgement of their symptoms
2. Possible causes (simple explanation)
3. Simple medicines with dosage (if applicable)
4. Food, rest, and hydration advice
5. When to see a real doctor
6. Short closing reassurance
Patient: {user_message}
Doctor:"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device)
gen_cfg = GenerationConfig(
temperature=0.7,
top_p=0.9,
max_new_tokens=450,
repetition_penalty=1.15,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
with torch.no_grad():
output = model.generate(**inputs, generation_config=gen_cfg)
output_text = tokenizer.decode(output[0], skip_special_tokens=True).strip()
output_text = output_text.replace("Doctor:", "").replace("Patient:", "").strip()
# Final cleanup
if not output_text.endswith((".", "!", "?")):
output_text += "."
output_text += "\n\nβοΈ *Note: This advice is AI-generated and not a substitute for professional medical care.*"
return output_text
# =======================================================
# Gradio Interface
# =======================================================
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.HTML("""
<div style="text-align:center; background-color:#4C7DFF; color:white; padding:20px; border-radius:10px;">
<h1>π Your Consultation with Dr. Aiden</h1>
<p>Empathetic β’ Knowledgeable β’ Natural β Your AI Medical Advisor</p>
</div>
""")
chatbot = gr.Chatbot(
label="π¨ββοΈ Chat with Dr. Aiden",
height=550,
type='messages',
avatar_images=(
"https://cdn-icons-png.flaticon.com/512/706/706830.png",
"https://cdn-icons-png.flaticon.com/512/3774/3774299.png"
)
)
user_input = gr.Textbox(placeholder="Say 'Hi Doctor' to start your consultation...", label="Your Message", lines=2)
send_btn = gr.Button("π¬ Send", variant="primary")
clear_btn = gr.Button("π§Ή New Consultation")
def respond(message, history):
if history is None:
history = []
response = doctor_response(message)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return "", history
def reset():
global session
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
return []
send_btn.click(respond, [user_input, chatbot], [user_input, chatbot])
user_input.submit(respond, [user_input, chatbot], [user_input, chatbot])
clear_btn.click(reset, None, chatbot, queue=False)
# =======================================================
# Launch
# =======================================================
if __name__ == "__main__":
print("π₯ Launching Dr. Aiden...")
demo.queue()
demo.launch(share=True)
|