import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig import torch # ======================================================= # Load Model # ======================================================= model_name = "augtoma/qCammel-13" print("Loading tokenizer and model...") tokenizer = AutoTokenizer.from_pretrained(model_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True, low_cpu_mem_usage=True ) model.eval() print("โ Model loaded successfully!") # ======================================================= # Global Session Memory # ======================================================= session = { "name": None, "age": None, "gender": None, "symptoms": None, "duration": None, "stage": "intro" } # ======================================================= # Helper: Extract Name # ======================================================= def extract_name(text): text = text.lower().replace("yes", "").replace("i am", "").replace("i'm", "") text = text.replace("my name is", "").replace("name", "").replace("is", "").strip() return text.title() if text else "Patient" # ======================================================= # Doctor Response Logic # ======================================================= def doctor_response(user_message): global session user_message = user_message.strip() # Intro if session["stage"] == "intro": session["stage"] = "ask_name" return "๐จโโ๏ธ Hello! Iโm Dr. Aiden. May I know your name, please?" # Ask Name elif session["stage"] == "ask_name": session["name"] = extract_name(user_message) session["stage"] = "ask_age" return f"Nice to meet you, {session['name']}! How old are you?" # Ask Age elif session["stage"] == "ask_age": words = user_message.split() for w in words: if w.isdigit(): session["age"] = int(w) session["stage"] = "ask_gender" return f"Got it, {session['name']}. Are you male or female?" return "Please tell me your age in numbers, like 20 or 25." # Ask Gender elif session["stage"] == "ask_gender": if "male" in user_message.lower(): session["gender"] = "male" elif "female" in user_message.lower(): session["gender"] = "female" else: return "Could you please specify whether you are male or female?" session["stage"] = "ask_symptoms" return f"Thanks, {session['name']}! So you're a {session['age']}-year-old {session['gender']}. What symptoms are you experiencing?" # Ask Symptoms elif session["stage"] == "ask_symptoms": session["symptoms"] = user_message session["stage"] = "ask_duration" return "Since when have you been feeling this way?" # Ask Duration elif session["stage"] == "ask_duration": session["duration"] = user_message session["stage"] = "consult" return "Got it. Are you taking any medications or treatments currently?" # Consultation elif session["stage"] == "consult": name = session["name"] age = session["age"] gender = session["gender"] symptoms = session["symptoms"] duration = session["duration"] prompt = f""" You are Dr. Aiden โ a warm, friendly, and professional doctor having an interview-style consultation. The patient is a {age}-year-old {gender} named {name}. They have been feeling {symptoms} for {duration}. They said: "{user_message}" Respond like a real doctor โ show empathy, analyze the symptoms, suggest likely causes, give simple medication and home care advice. Include: 1. Acknowledge their discomfort. 2. Explain possible causes in simple terms. 3. Recommend over-the-counter medicines (if safe). 4. Suggest food, hydration, and rest tips. 5. Warn when to visit a real doctor. 6. End with gentle reassurance. Doctor:""" inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) gen_cfg = GenerationConfig( temperature=0.7, top_p=0.9, max_new_tokens=350, repetition_penalty=1.1, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id ) with torch.no_grad(): output = model.generate(**inputs, generation_config=gen_cfg) output_text = tokenizer.decode(output[0], skip_special_tokens=True) output_text = output_text.split("Doctor:")[-1].strip() if not output_text.endswith((".", "!", "?")): output_text += "." output_text += "\n\nโ๏ธ *Note: This advice is AI-generated and not a substitute for professional medical care.*" return output_text # ======================================================= # Gradio Interface # ======================================================= with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.HTML("""
AI-powered doctor interview โ step-by-step and caring conversation