Poojasreeh's picture
Upload 3 files
9ee7e2a verified
import gradio as gr
import requests
import os
# Use Hugging Face Inference API for instant deployment
API_URL = "https://api-inference.huggingface.co/models/BioMistral/BioMistral-7B"
def mental_health_chat(user_message, history):
"""Process mental health queries using BioMistral via Inference API"""
if not user_message.strip():
return "Please share what's on your mind. I'm here to listen and support you."
try:
# Get API token from environment variable (set in Hugging Face Spaces)
api_token = os.environ.get('HF_TOKEN')
if not api_token:
return "API configuration needed. Please set HF_TOKEN environment variable."
headers = {"Authorization": f"Bearer {api_token}"}
# Create mental health focused prompt
prompt = f"""<s>[INST] You are a compassionate mental health assistant with medical knowledge.
Provide empathetic, supportive, and clinically-informed responses.
Be understanding, non-judgmental, and focus on active listening.
User: {user_message} [/INST] Assistant:"""
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": 256,
"temperature": 0.7,
"top_p": 0.9,
"do_sample": True,
"repetition_penalty": 1.1
},
"options": {
"wait_for_model": True
}
}
# Make API request
response = requests.post(API_URL, headers=headers, json=payload)
result = response.json()
# Handle API errors
if 'error' in result:
if "loading" in result['error'].lower():
return "The AI model is loading. Please try again in 30-60 seconds."
return "I'm here to support you. Could you tell me more about what you're experiencing?"
if isinstance(result, list) and len(result) > 0:
generated_text = result[0]['generated_text']
assistant_response = generated_text.split("Assistant:")[-1].strip()
return assistant_response
else:
return "Thank you for sharing. I'm here to listen and provide support."
except Exception as e:
return "I'm here to listen and support you. Could you share what's on your mind?"
# Custom CSS for better styling
custom_css = """
#helpline-info {
position: fixed;
bottom: 10px;
right: 10px;
background: #f8f9fa;
padding: 12px;
border-radius: 8px;
border: 1px solid #dee2e6;
font-size: 12px;
max-width: 200px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
z-index: 1000;
}
.gradio-container {
max-width: 800px !important;
}
"""
# Create Gradio interface
with gr.Blocks(css=custom_css, title="Mental Health Companion") as demo:
gr.Markdown("""
# 🧠 Mental Health Companion
### Powered by BioMistral - Medical AI Assistant
*Using Hugging Face Inference API for fast responses*
""")
chatbot = gr.Chatbot(
label="Conversation",
height=400,
show_copy_button=True
)
with gr.Row():
msg = gr.Textbox(
placeholder="Share what's on your mind...",
label="Your Message",
scale=8,
container=False
)
send_btn = gr.Button("Send", variant="primary", scale=1)
with gr.Row():
clear_btn = gr.Button("Clear Conversation", variant="secondary")
gr.Examples(
examples=[
["I've been feeling really overwhelmed lately"],
["How can I manage anxiety and stress?"],
["I'm having trouble sleeping and it's affecting my mood"],
["What are some healthy coping mechanisms?"],
["I've been feeling isolated and lonely recently"]
],
inputs=msg
)
# Helpline info in bottom right
gr.HTML("""
<div id="helpline-info">
<strong>🆘 Emergency Helplines:</strong><br>
• National Suicide Prevention: 988<br>
• Crisis Text Line: TEXT HOME to 741741<br>
• SAMHSA Helpline: 1-800-662-4357
</div>
""")
def respond(message, chat_history):
if not message.strip():
return "", chat_history
response = mental_health_chat(message, chat_history)
chat_history.append((message, response))
return "", chat_history
# Set up event handlers
msg.submit(respond, [msg, chatbot], [msg, chatbot])
send_btn.click(respond, [msg, chatbot], [msg, chatbot])
clear_btn.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch()