Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +136 -0
- requirements.txt +14 -0
app.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mental Health Support Chatbot
|
| 3 |
+
Built with Gradio + HuggingFace InferenceClient (correct chat API)
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import gradio as gr
|
| 8 |
+
from huggingface_hub import InferenceClient
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
# Load .env file
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
HF_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# β
System + therapy prompt
|
| 17 |
+
SYSTEM_PROMPT = """
|
| 18 |
+
You are a calm, compassionate Indian mental-health support companion.
|
| 19 |
+
You listen patiently, respond warmly, and provide emotional safety.
|
| 20 |
+
|
| 21 |
+
Tone guidelines:
|
| 22 |
+
- Gentle, caring, like a supportive Indian friend
|
| 23 |
+
- Validate feelings, never judge
|
| 24 |
+
- Encourage grounding: deep breathing, water, journaling, walk, sunlight
|
| 25 |
+
- Avoid diagnosing, medication, medical claims
|
| 26 |
+
- Encourage professional help when needed
|
| 27 |
+
- Replies short, caring, human
|
| 28 |
+
|
| 29 |
+
If the user expresses self-harm or suicidal thoughts:
|
| 30 |
+
|
| 31 |
+
ONLY reply this message:
|
| 32 |
+
|
| 33 |
+
"Iβm really sorry you're feeling this pain. You are not alone, and your feelings matter.
|
| 34 |
+
Please reach out to someone right now β your safety is most important.
|
| 35 |
+
|
| 36 |
+
π **India Suicide Prevention Helplines**
|
| 37 |
+
β’ AASRA: +91-9820996549 (24Γ7)
|
| 38 |
+
β’ Fortis Stress Helpline: +91-8376804102
|
| 39 |
+
β’ iCall: +91-9152987821
|
| 40 |
+
β’ Snehi: +91-9582208181
|
| 41 |
+
|
| 42 |
+
More verified helplines: https://mhcare.in/
|
| 43 |
+
|
| 44 |
+
If possible, talk to a trusted family member or friend nearby.
|
| 45 |
+
Your life is precious, and help is available. I'm here with you."
|
| 46 |
+
|
| 47 |
+
(Stop conversation after this message.)
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# β
Initialize HF model with proper API
|
| 51 |
+
client = InferenceClient(
|
| 52 |
+
model="meta-llama/Llama-3.1-8B-Instruct",
|
| 53 |
+
token=HF_TOKEN
|
| 54 |
+
)
|
| 55 |
+
# β
Crisis detection
|
| 56 |
+
def is_crisis(text):
|
| 57 |
+
crisis_keywords = ["suicide", "kill myself", "end my life", "can't live", "self harm", "harm myself"]
|
| 58 |
+
return any(word in text.lower() for word in crisis_keywords)
|
| 59 |
+
|
| 60 |
+
# β
Chat model call
|
| 61 |
+
def generate_reply(history, user_message):
|
| 62 |
+
# Crisis check
|
| 63 |
+
if is_crisis(user_message):
|
| 64 |
+
return SYSTEM_PROMPT.split("ONLY reply this message:")[1].strip()
|
| 65 |
+
|
| 66 |
+
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 67 |
+
|
| 68 |
+
# Handle both tuple & dict history formats
|
| 69 |
+
for h in history:
|
| 70 |
+
if isinstance(h, dict):
|
| 71 |
+
messages.append(h)
|
| 72 |
+
else:
|
| 73 |
+
messages.append({"role": "user", "content": h[0]})
|
| 74 |
+
messages.append({"role": "assistant", "content": h[1]})
|
| 75 |
+
|
| 76 |
+
messages.append({"role": "user", "content": user_message})
|
| 77 |
+
|
| 78 |
+
resp = client.chat_completion(
|
| 79 |
+
messages=messages,
|
| 80 |
+
max_tokens=250,
|
| 81 |
+
temperature=0.7,
|
| 82 |
+
top_p=0.9
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
return resp.choices[0].message["content"]
|
| 86 |
+
|
| 87 |
+
# β
Gradio handler
|
| 88 |
+
def chat_response(message, history):
|
| 89 |
+
if history is None:
|
| 90 |
+
history = []
|
| 91 |
+
|
| 92 |
+
reply = generate_reply(history, message)
|
| 93 |
+
|
| 94 |
+
# Convert to dict messages for Gradio
|
| 95 |
+
formatted = []
|
| 96 |
+
for h in history:
|
| 97 |
+
if isinstance(h, dict):
|
| 98 |
+
formatted.append(h)
|
| 99 |
+
else:
|
| 100 |
+
formatted.append({"role": "user", "content": h[0]})
|
| 101 |
+
formatted.append({"role": "assistant", "content": h[1]})
|
| 102 |
+
|
| 103 |
+
formatted.append({"role": "user", "content": message})
|
| 104 |
+
formatted.append({"role": "assistant", "content": reply})
|
| 105 |
+
|
| 106 |
+
return formatted
|
| 107 |
+
|
| 108 |
+
def clear_chat():
|
| 109 |
+
return []
|
| 110 |
+
|
| 111 |
+
# β
UI
|
| 112 |
+
with gr.Blocks(title="π§ Mental Health Support Buddy", theme=gr.themes.Soft()) as demo:
|
| 113 |
+
gr.Markdown("""
|
| 114 |
+
# π§ Mental Wellness Chatbot
|
| 115 |
+
Here to listen. Here to support. You're not alone π
|
| 116 |
+
> β οΈ Not a substitute for professional therapy
|
| 117 |
+
""")
|
| 118 |
+
|
| 119 |
+
chatbot = gr.Chatbot(type="messages", height=450)
|
| 120 |
+
txt = gr.Textbox(label="How are you feeling today?", placeholder="Share your thoughts...")
|
| 121 |
+
|
| 122 |
+
with gr.Row():
|
| 123 |
+
send = gr.Button("Send", variant="primary")
|
| 124 |
+
clear = gr.Button("Clear Chat")
|
| 125 |
+
|
| 126 |
+
send.click(chat_response, [txt, chatbot], chatbot)
|
| 127 |
+
send.click(lambda: "", None, txt)
|
| 128 |
+
|
| 129 |
+
txt.submit(chat_response, [txt, chatbot], chatbot)
|
| 130 |
+
txt.submit(lambda: "", None, txt)
|
| 131 |
+
|
| 132 |
+
clear.click(clear_chat, None, chatbot)
|
| 133 |
+
|
| 134 |
+
# β
Run app
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
demo.launch(share=True)
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core
|
| 2 |
+
gradio==4.44.1
|
| 3 |
+
huggingface-hub>=0.17.2
|
| 4 |
+
python-dotenv>=1.0.1
|
| 5 |
+
|
| 6 |
+
# Optional (if using extra NLP/ML utilities)
|
| 7 |
+
torch>=2.1.0
|
| 8 |
+
transformers>=4.35.0
|
| 9 |
+
|
| 10 |
+
# Networking / HTTP requests
|
| 11 |
+
requests>=2.31.0
|
| 12 |
+
|
| 13 |
+
# For typing & utils
|
| 14 |
+
typing-extensions>=4.7.1
|