import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig import torch import time # ======================================================= # Load Model # ======================================================= model_name = "augtoma/qCammel-13" print("Loading tokenizer and model...") tokenizer = AutoTokenizer.from_pretrained(model_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True, low_cpu_mem_usage=True ) model.eval() print("โœ… Model loaded successfully!") # ======================================================= # Global Memory for Doctor Flow # ======================================================= session = {"name": None, "age": None, "gender": None, "stage": "intro"} # ======================================================= # Generate Doctor Response # ======================================================= def doctor_response(user_message): global session user_message = user_message.strip() # Step 1: Greeting if session["stage"] == "intro": session["stage"] = "ask_name" return "๐Ÿ‘จโ€โš•๏ธ Hello! Iโ€™m Dr. Aiden. May I know your name, please?" # Step 2: Get Name elif session["stage"] == "ask_name": session["name"] = user_message.split()[0].capitalize() session["stage"] = "ask_age" return f"Nice to meet you, {session['name']}! How old are you?" # Step 3: Get Age elif session["stage"] == "ask_age": words = user_message.split() for w in words: if w.isdigit(): session["age"] = int(w) session["stage"] = "ask_gender" return f"Got it, {session['name']}. Are you male or female?" return "Please tell me your age in numbers, like 25 or 30." # Step 4: Get Gender elif session["stage"] == "ask_gender": if "male" in user_message.lower(): session["gender"] = "male" elif "female" in user_message.lower(): session["gender"] = "female" else: return "Could you please specify whether you are male or female?" session["stage"] = "consult" return f"Thanks, {session['name']}! So you're a {session['age']}-year-old {session['gender']}. What brings you in today?" # Step 5: Medical Consultation Mode elif session["stage"] == "consult": name = session["name"] age = session["age"] gender = session["gender"] prompt = f""" You are Dr. Aiden โ€” a warm, professional, and conversational doctor talking naturally with a patient. Patient Info: - Name: {name} - Age: {age} - Gender: {gender} Speak in a caring and natural tone (like a friendly doctor in a private clinic). Include in your response: 1. Acknowledgement of their symptoms 2. Possible causes (simple explanation) 3. Simple medicines with dosage (if applicable) 4. Food, rest, and hydration advice 5. When to see a real doctor 6. Short closing reassurance Patient: {user_message} Doctor:""" inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) gen_cfg = GenerationConfig( temperature=0.7, top_p=0.9, max_new_tokens=450, repetition_penalty=1.15, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id ) with torch.no_grad(): output = model.generate(**inputs, generation_config=gen_cfg) output_text = tokenizer.decode(output[0], skip_special_tokens=True).strip() output_text = output_text.replace("Doctor:", "").replace("Patient:", "").strip() # Final cleanup if not output_text.endswith((".", "!", "?")): output_text += "." output_text += "\n\nโš•๏ธ *Note: This advice is AI-generated and not a substitute for professional medical care.*" return output_text # ======================================================= # Gradio Interface # ======================================================= with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.HTML("""

๐Ÿ’™ Your Consultation with Dr. Aiden

Empathetic โ€ข Knowledgeable โ€ข Natural โ€” Your AI Medical Advisor

""") chatbot = gr.Chatbot( label="๐Ÿ‘จโ€โš•๏ธ Chat with Dr. Aiden", height=550, type='messages', avatar_images=( "https://cdn-icons-png.flaticon.com/512/706/706830.png", "https://cdn-icons-png.flaticon.com/512/3774/3774299.png" ) ) user_input = gr.Textbox(placeholder="Say 'Hi Doctor' to start your consultation...", label="Your Message", lines=2) send_btn = gr.Button("๐Ÿ’ฌ Send", variant="primary") clear_btn = gr.Button("๐Ÿงน New Consultation") def respond(message, history): if history is None: history = [] response = doctor_response(message) history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": response}) return "", history def reset(): global session session = {"name": None, "age": None, "gender": None, "stage": "intro"} return [] send_btn.click(respond, [user_input, chatbot], [user_input, chatbot]) user_input.submit(respond, [user_input, chatbot], [user_input, chatbot]) clear_btn.click(reset, None, chatbot, queue=False) # ======================================================= # Launch # ======================================================= if __name__ == "__main__": print("๐Ÿฅ Launching Dr. Aiden...") demo.queue() demo.launch(share=True)