MedSoft-Ro / app.py
safiaa02's picture
Update app.py
f850f7c verified
import gradio as gr
from transformers import pipeline
print("🔄 Loading models...")
# 1️⃣ Speech recognition
whisper = pipeline("automatic-speech-recognition", model="openai/whisper-small")
# 2️⃣ Emotion detection
emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
# 3️⃣ Conversational model (lightweight + chat tuned)
dialogue_model = pipeline(
"text-generation",
model="microsoft/Phi-3-mini-4k-instruct",
torch_dtype="auto",
device_map="auto"
)
conversation_history = []
def chat_response(message, audio):
global conversation_history
# Input
if audio is not None:
user_text = whisper(audio)["text"]
else:
user_text = message.strip() if message else ""
if not user_text:
return conversation_history, ""
# Emotion
emotion = emotion_model(user_text)[0]["label"]
# ✨ Refined system prompt — avoids meta or role-playing outputs
system_prompt = (
"System: You are a compassionate, professional mental health support agent. "
"Your goal is to listen actively, validate the user's emotions, and provide "
"calm reassurance or safe coping suggestions. "
"Speak in a gentle, supportive, and respectful tone—like a trained counselor, "
"but do not diagnose or prescribe. "
"Respond in 2–3 short sentences, showing empathy and understanding. "
"Do NOT mention being an AI or assistant, and never describe your tasks. "
"Keep responses concise (2–4 sentences), focused on empathy and emotional safety.\n\n"
)
# Build short history context
context = system_prompt
for t in conversation_history[-6:]:
context += f"User: {t[0]}\nAssistant: {t[1]}\n"
context += f"User: {user_text}\nAssistant:"
# Generate
response = dialogue_model(
context,
max_new_tokens=100,
temperature=0.7,
top_p=0.9
)
reply = response[0]["generated_text"].split("Assistant:")[-1].strip()
# Update history
conversation_history.append((user_text, reply))
return conversation_history, ""
def reset():
global conversation_history
conversation_history = []
return []
# 🌙 Dark WhatsApp-like interface
dark_css = """
body {background-color: #121212;}
.gradio-container {background-color: #121212 !important; color: #e0e0e0;}
#chatbox .message.user {background-color: #075E54; color: #fff;}
#chatbox .message.bot {background-color: #262d31; color: #e0e0e0;}
#chatbox {background-color: #1e1e1e; border-radius: 10px; height: 550px; overflow-y: auto;}
.footer-row {display: flex; gap: .5rem; align-items: center;}
button.primary {background-color: #25D366 !important; color: black !important;}
button.secondary {background-color: #3c3c3c !important; color: #eee !important;}
"""
with gr.Blocks(css=dark_css, title="🖤 MindSoft | Dark Mode Companion") as demo:
gr.HTML("<h2 style='text-align:center; color:#25D366;'>💬 MindSoft — Your Compassionate Companion</h2>")
chatbot = gr.Chatbot(elem_id="chatbox", label="", height=550, bubble_full_width=False)
with gr.Row(elem_classes="footer-row"):
audio = gr.Audio(type="filepath", label="", interactive=True)
txt = gr.Textbox(placeholder="Type a message...", show_label=False, scale=8)
send = gr.Button("Send", variant="primary", scale=1)
clear = gr.Button("Reset", variant="secondary", scale=1)
send.click(chat_response, [txt, audio], [chatbot, txt])
txt.submit(chat_response, [txt, audio], [chatbot, txt])
clear.click(reset, None, chatbot)
demo.launch()