Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
| 3 |
import torch
|
| 4 |
-
import time
|
| 5 |
|
| 6 |
# =======================================================
|
| 7 |
# Load Model
|
|
@@ -21,7 +20,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 21 |
low_cpu_mem_usage=True
|
| 22 |
)
|
| 23 |
model.eval()
|
| 24 |
-
|
| 25 |
print("✅ Model loaded successfully!")
|
| 26 |
|
| 27 |
|
|
@@ -31,6 +29,18 @@ print("✅ Model loaded successfully!")
|
|
| 31 |
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
|
| 32 |
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
# =======================================================
|
| 35 |
# Generate Doctor Response
|
| 36 |
# =======================================================
|
|
@@ -45,7 +55,7 @@ def doctor_response(user_message):
|
|
| 45 |
|
| 46 |
# Step 2: Get Name
|
| 47 |
elif session["stage"] == "ask_name":
|
| 48 |
-
session["name"] = user_message
|
| 49 |
session["stage"] = "ask_age"
|
| 50 |
return f"Nice to meet you, {session['name']}! How old are you?"
|
| 51 |
|
|
@@ -57,7 +67,7 @@ def doctor_response(user_message):
|
|
| 57 |
session["age"] = int(w)
|
| 58 |
session["stage"] = "ask_gender"
|
| 59 |
return f"Got it, {session['name']}. Are you male or female?"
|
| 60 |
-
return "Please tell me your age in numbers, like
|
| 61 |
|
| 62 |
# Step 4: Get Gender
|
| 63 |
elif session["stage"] == "ask_gender":
|
|
@@ -78,22 +88,19 @@ def doctor_response(user_message):
|
|
| 78 |
gender = session["gender"]
|
| 79 |
|
| 80 |
prompt = f"""
|
| 81 |
-
You are Dr. Aiden — a
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
- Name: {name}
|
| 85 |
-
- Age: {age}
|
| 86 |
-
- Gender: {gender}
|
| 87 |
|
| 88 |
-
|
| 89 |
|
| 90 |
Include in your response:
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
|
| 98 |
Patient: {user_message}
|
| 99 |
Doctor:"""
|
|
@@ -102,8 +109,8 @@ Doctor:"""
|
|
| 102 |
gen_cfg = GenerationConfig(
|
| 103 |
temperature=0.7,
|
| 104 |
top_p=0.9,
|
| 105 |
-
max_new_tokens=
|
| 106 |
-
repetition_penalty=1.
|
| 107 |
pad_token_id=tokenizer.pad_token_id,
|
| 108 |
eos_token_id=tokenizer.eos_token_id
|
| 109 |
)
|
|
@@ -112,7 +119,7 @@ Doctor:"""
|
|
| 112 |
output = model.generate(**inputs, generation_config=gen_cfg)
|
| 113 |
|
| 114 |
output_text = tokenizer.decode(output[0], skip_special_tokens=True).strip()
|
| 115 |
-
output_text = output_text.
|
| 116 |
|
| 117 |
# Final cleanup
|
| 118 |
if not output_text.endswith((".", "!", "?")):
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
| 3 |
import torch
|
|
|
|
| 4 |
|
| 5 |
# =======================================================
|
| 6 |
# Load Model
|
|
|
|
| 20 |
low_cpu_mem_usage=True
|
| 21 |
)
|
| 22 |
model.eval()
|
|
|
|
| 23 |
print("✅ Model loaded successfully!")
|
| 24 |
|
| 25 |
|
|
|
|
| 29 |
session = {"name": None, "age": None, "gender": None, "stage": "intro"}
|
| 30 |
|
| 31 |
|
| 32 |
+
# =======================================================
|
| 33 |
+
# Helper: Clean name input
|
| 34 |
+
# =======================================================
|
| 35 |
+
def extract_name(text):
|
| 36 |
+
# Remove filler words like "yes", "i am", "my name is"
|
| 37 |
+
text = text.lower().replace("yes", "").replace("i am", "").replace("i'm", "")
|
| 38 |
+
text = text.replace("name", "").replace("is", "").replace("it's", "")
|
| 39 |
+
text = text.strip()
|
| 40 |
+
# Capitalize nicely
|
| 41 |
+
return text.title() if text else "Patient"
|
| 42 |
+
|
| 43 |
+
|
| 44 |
# =======================================================
|
| 45 |
# Generate Doctor Response
|
| 46 |
# =======================================================
|
|
|
|
| 55 |
|
| 56 |
# Step 2: Get Name
|
| 57 |
elif session["stage"] == "ask_name":
|
| 58 |
+
session["name"] = extract_name(user_message)
|
| 59 |
session["stage"] = "ask_age"
|
| 60 |
return f"Nice to meet you, {session['name']}! How old are you?"
|
| 61 |
|
|
|
|
| 67 |
session["age"] = int(w)
|
| 68 |
session["stage"] = "ask_gender"
|
| 69 |
return f"Got it, {session['name']}. Are you male or female?"
|
| 70 |
+
return "Please tell me your age in numbers, like 20 or 25."
|
| 71 |
|
| 72 |
# Step 4: Get Gender
|
| 73 |
elif session["stage"] == "ask_gender":
|
|
|
|
| 88 |
gender = session["gender"]
|
| 89 |
|
| 90 |
prompt = f"""
|
| 91 |
+
You are Dr. Aiden — a friendly, empathetic, and experienced doctor.
|
| 92 |
+
You are chatting casually with a {age}-year-old {gender} patient named {name}.
|
| 93 |
+
The patient is describing their health concern.
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
+
Respond like a real doctor would — empathetic, natural, and informative.
|
| 96 |
|
| 97 |
Include in your response:
|
| 98 |
+
- Acknowledge the patient’s concern
|
| 99 |
+
- Possible causes (in simple words)
|
| 100 |
+
- Basic medicine or remedy (if applicable)
|
| 101 |
+
- Rest, hydration, and diet advice
|
| 102 |
+
- When to visit a real doctor
|
| 103 |
+
- End with reassurance
|
| 104 |
|
| 105 |
Patient: {user_message}
|
| 106 |
Doctor:"""
|
|
|
|
| 109 |
gen_cfg = GenerationConfig(
|
| 110 |
temperature=0.7,
|
| 111 |
top_p=0.9,
|
| 112 |
+
max_new_tokens=350,
|
| 113 |
+
repetition_penalty=1.12,
|
| 114 |
pad_token_id=tokenizer.pad_token_id,
|
| 115 |
eos_token_id=tokenizer.eos_token_id
|
| 116 |
)
|
|
|
|
| 119 |
output = model.generate(**inputs, generation_config=gen_cfg)
|
| 120 |
|
| 121 |
output_text = tokenizer.decode(output[0], skip_special_tokens=True).strip()
|
| 122 |
+
output_text = output_text.split("Doctor:")[-1].strip()
|
| 123 |
|
| 124 |
# Final cleanup
|
| 125 |
if not output_text.endswith((".", "!", "?")):
|