phxdev commited on
Commit
fe49dd8
·
verified ·
1 Parent(s): c8f78c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -243
app.py CHANGED
@@ -3,8 +3,6 @@ import torch
3
  from peft import PeftModel
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import warnings
6
- import time
7
- import spaces
8
 
9
  warnings.filterwarnings('ignore')
10
 
@@ -14,19 +12,17 @@ tokenizer = None
14
  device = None
15
 
16
  def load_model():
17
- """Load the psychology-tuned model with error handling"""
18
  global model, tokenizer, device
19
 
20
  try:
21
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
- print(f"Loading model on {device}...")
23
 
24
- # Load tokenizer
25
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B")
26
  if tokenizer.pad_token is None:
27
  tokenizer.pad_token = tokenizer.eos_token
28
 
29
- # Load base model
30
  base_model = AutoModelForCausalLM.from_pretrained(
31
  "Qwen/Qwen2.5-0.5B",
32
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
@@ -34,66 +30,43 @@ def load_model():
34
  trust_remote_code=True
35
  )
36
 
37
- # Load PEFT adapter
38
  model = PeftModel.from_pretrained(base_model, "phxdev/psychology-qwen-0.5b")
39
  model = model.merge_and_unload()
40
 
41
  if not torch.cuda.is_available():
42
  model = model.to(device)
43
 
44
- print("✅ Model loaded successfully!")
45
  return True
46
 
47
  except Exception as e:
48
  print(f"❌ Error loading model: {e}")
49
  return False
50
 
51
- @spaces.GPU
52
- def generate_response(
53
- message: str,
54
- history: list,
55
- temperature: float = 0.8,
56
- max_tokens: int = 300,
57
- prompt_style: str = "Therapeutic"
58
- ) -> str:
59
- """Generate psychology-focused response"""
 
 
 
 
 
60
 
61
  if model is None:
62
- return "⚠️ Model is still loading. Please wait a moment and try again."
63
 
64
- # Define prompt templates
65
- prompt_templates = {
66
- "Therapeutic": """You're a supportive therapist in session. The client just said: "{message}"
67
-
68
- Respond with empathy and practical guidance. Start with validation, then give 2-3 specific strategies they can try:""",
69
-
70
- "Supportive Friend": """You're a caring friend who studied psychology. Someone you care about just told you: "{message}"
71
-
72
- Give them warm, understanding support with practical advice:""",
73
-
74
- "Crisis Support": """You are a crisis counselor. Someone is reaching out for support. They said: "{message}"
75
-
76
- Provide immediate, caring support and grounding techniques:""",
77
-
78
- "Anxiety Coach": """You're an anxiety specialist. Help them manage their anxiety with evidence-based techniques.
79
-
80
- They're struggling with: "{message}"
81
 
82
- Here are specific techniques they can try right now:""",
83
-
84
- "Mindfulness Guide": """You're a mindfulness teacher. Guide them toward present-moment awareness and self-compassion.
85
-
86
- They shared: "{message}"
87
-
88
- Offer mindful perspective and a gentle practice:"""
89
- }
90
-
91
- # Select and format prompt
92
- template = prompt_templates.get(prompt_style, prompt_templates["Therapeutic"])
93
- formatted_prompt = template.format(message=message)
94
 
95
  try:
96
- # Tokenize
97
  inputs = tokenizer(
98
  formatted_prompt,
99
  return_tensors="pt",
@@ -101,12 +74,11 @@ Offer mindful perspective and a gentle practice:"""
101
  max_length=512
102
  ).to(device)
103
 
104
- # Generate
105
  with torch.no_grad():
106
  outputs = model.generate(
107
  **inputs,
108
- max_length=len(inputs.input_ids[0]) + max_tokens,
109
- temperature=temperature,
110
  top_p=0.9,
111
  do_sample=True,
112
  repetition_penalty=1.1,
@@ -114,11 +86,9 @@ Offer mindful perspective and a gentle practice:"""
114
  eos_token_id=tokenizer.eos_token_id,
115
  )
116
 
117
- # Decode
118
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
119
  generated_text = response[len(formatted_prompt):].strip()
120
 
121
- # Clean up response
122
  if generated_text.startswith('"') and generated_text.endswith('"'):
123
  generated_text = generated_text[1:-1]
124
 
@@ -127,211 +97,109 @@ Offer mindful perspective and a gentle practice:"""
127
  except Exception as e:
128
  return f"⚠️ Error generating response: {str(e)}"
129
 
130
- def create_interface():
131
- """Create the main Gradio interface"""
 
 
 
 
132
 
133
- # Custom CSS for modern, calming design
134
- custom_css = """
135
- .main-header {
136
- text-align: center;
137
- padding: 2rem;
138
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
139
- color: white;
140
- border-radius: 10px;
141
- margin-bottom: 2rem;
142
- }
143
- .disclaimer-box {
144
- background-color: #f8f9fa;
145
- border-left: 4px solid #17a2b8;
146
- padding: 1rem;
147
- margin: 1rem 0;
148
- border-radius: 5px;
149
- }
 
 
 
 
 
150
  """
 
151
 
152
- with gr.Blocks(css=custom_css, title="Psychology AI Assistant", theme=gr.themes.Soft()) as demo:
153
-
154
- # Header
155
- gr.HTML("""
156
- <div class="main-header">
157
- <h1>🧠 Psychology AI Assistant</h1>
158
- <p>A supportive AI trained in psychology and mental health</p>
159
- <p><em>Powered by fine-tuned Qwen 2.5-0.5B</em></p>
160
- </div>
161
- """)
162
-
163
- # Disclaimer
164
- gr.HTML("""
165
- <div class="disclaimer-box">
166
- <strong>⚠️ Important Disclaimer:</strong> This AI assistant provides supportive guidance based on psychological principles,
167
- but it is not a replacement for professional therapy or medical advice. If you're experiencing a mental health crisis,
168
- please contact a mental health professional or crisis hotline immediately.
169
- <br><br>
170
- <strong>Crisis Resources:</strong>
171
- <ul>
172
- <li><strong>988 Suicide & Crisis Lifeline:</strong> Call or text 988</li>
173
- <li><strong>Crisis Text Line:</strong> Text HOME to 741741</li>
174
- <li><strong>Emergency:</strong> Call 911 or your local emergency number</li>
175
- </ul>
176
- </div>
177
- """)
178
-
179
- with gr.Row():
180
- with gr.Column(scale=3):
181
- # Main chat interface
182
- chatbot = gr.Chatbot(
183
- height=500,
184
- placeholder="👋 Hi! I'm here to provide supportive guidance and practical strategies for mental wellness. What's on your mind today?",
185
- avatar_images=("🧑‍💼", "🧠")
186
- )
187
-
188
- with gr.Row():
189
- message_input = gr.Textbox(
190
- placeholder="Share what's on your mind...",
191
- container=False,
192
- scale=4,
193
- lines=2
194
- )
195
- send_btn = gr.Button("Send", variant="primary", scale=1)
196
-
197
- # Simplified example prompts
198
- gr.HTML("<h3>💡 Try these conversation starters:</h3>")
199
-
200
- with gr.Row():
201
- example1 = gr.Button("I feel overwhelmed with work and don't know how to manage everything", size="sm")
202
- example2 = gr.Button("I can't stop worrying about things that might go wrong", size="sm")
203
-
204
- with gr.Row():
205
- example3 = gr.Button("I have trouble setting boundaries with people", size="sm")
206
- example4 = gr.Button("I constantly criticize myself and focus on my mistakes", size="sm")
207
-
208
- with gr.Row():
209
- example5 = gr.Button("I'm going through a major life transition and feel lost", size="sm")
210
- example6 = gr.Button("I have panic attacks before important meetings", size="sm")
211
-
212
- with gr.Column(scale=1):
213
- # Settings panel
214
- gr.HTML("<h3>⚙️ Settings</h3>")
215
-
216
- prompt_style = gr.Dropdown(
217
- choices=["Therapeutic", "Supportive Friend", "Crisis Support", "Anxiety Coach", "Mindfulness Guide"],
218
- value="Therapeutic",
219
- label="Response Style",
220
- info="Choose the type of support you prefer"
221
- )
222
-
223
- temperature = gr.Slider(
224
- minimum=0.1,
225
- maximum=1.0,
226
- value=0.8,
227
- step=0.1,
228
- label="Creativity",
229
- info="Higher = more creative responses"
230
- )
231
-
232
- max_tokens = gr.Slider(
233
- minimum=100,
234
- maximum=500,
235
- value=300,
236
- step=50,
237
- label="Response Length",
238
- info="Maximum response length"
239
- )
240
-
241
- clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary")
242
-
243
- # Model info
244
- gr.HTML("""
245
- <div style="margin-top: 2rem; padding: 1rem; background-color: #f8f9fa; border-radius: 5px;">
246
- <h4>📊 Model Info</h4>
247
- <p><strong>Base Model:</strong> Qwen/Qwen2.5-0.5B</p>
248
- <p><strong>Fine-tuned:</strong> phxdev/psychology-qwen-0.5b</p>
249
- <p><strong>Specialization:</strong> Psychology & Mental Health</p>
250
- <p><strong>Training:</strong> PEFT/LoRA</p>
251
- </div>
252
- """)
253
-
254
- # Chat functionality
255
- def respond(message, history, temp, max_tok, style):
256
- if not message.strip():
257
- return history, ""
258
-
259
- # Add user message
260
- history = history + [[message, None]]
261
-
262
- # Generate response
263
- bot_response = generate_response(message, history, temp, max_tok, style)
264
-
265
- # Add bot response
266
- history[-1][1] = bot_response
267
-
268
- return history, ""
269
-
270
- def clear_chat():
271
- return [], ""
272
-
273
- def set_example(text):
274
- return text
275
-
276
- # Event handlers
277
- send_btn.click(
278
- respond,
279
- inputs=[message_input, chatbot, temperature, max_tokens, prompt_style],
280
- outputs=[chatbot, message_input]
281
- )
282
-
283
- message_input.submit(
284
- respond,
285
- inputs=[message_input, chatbot, temperature, max_tokens, prompt_style],
286
- outputs=[chatbot, message_input]
287
- )
288
-
289
- clear_btn.click(clear_chat, outputs=[chatbot, message_input])
290
-
291
- # Example button handlers (simplified)
292
- example1.click(set_example, inputs=[gr.State("I feel overwhelmed with work and don't know how to manage everything")], outputs=[message_input])
293
- example2.click(set_example, inputs=[gr.State("I can't stop worrying about things that might go wrong")], outputs=[message_input])
294
- example3.click(set_example, inputs=[gr.State("I have trouble setting boundaries with people")], outputs=[message_input])
295
- example4.click(set_example, inputs=[gr.State("I constantly criticize myself and focus on my mistakes")], outputs=[message_input])
296
- example5.click(set_example, inputs=[gr.State("I'm going through a major life transition and feel lost")], outputs=[message_input])
297
- example6.click(set_example, inputs=[gr.State("I have panic attacks before important meetings")], outputs=[message_input])
298
-
299
- # Footer
300
- gr.HTML("""
301
- <div style="text-align: center; margin-top: 2rem; padding: 1rem; color: #666;">
302
- <p>Built with ❤️ using Gradio • Fine-tuned by @phxdev</p>
303
- <p><strong>For Mark. For Josh. For everyone who needs someone.</strong></p>
304
- </div>
305
- """)
306
 
307
- return demo
 
 
 
 
 
 
 
 
 
308
 
309
- # Initialize the model
310
- print("���� Starting Psychology AI Assistant...")
 
 
 
 
 
 
 
 
 
311
  model_loaded = load_model()
312
 
313
  if model_loaded:
314
- print("✅ Model loaded successfully!")
315
- demo = create_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
 
317
  if __name__ == "__main__":
318
  demo.launch(
319
- share=True,
320
- server_name="0.0.0.0",
 
321
  server_port=7860,
322
  show_error=True
323
  )
 
324
  else:
325
- print("❌ Failed to load model. Creating error interface...")
326
-
327
- with gr.Blocks() as demo:
328
- gr.HTML("""
329
- <div style="text-align: center; padding: 2rem;">
330
- <h1>⚠️ Model Loading Error</h1>
331
- <p>Sorry, there was an issue loading the psychology model.</p>
332
- <p>Please try refreshing the page or contact support.</p>
333
- </div>
334
- """)
335
 
336
  if __name__ == "__main__":
337
  demo.launch()
 
3
  from peft import PeftModel
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import warnings
 
 
6
 
7
  warnings.filterwarnings('ignore')
8
 
 
12
  device = None
13
 
14
  def load_model():
15
+ """Load the psychology-tuned model"""
16
  global model, tokenizer, device
17
 
18
  try:
19
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+ print(f"Loading psychology model on {device}...")
21
 
 
22
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B")
23
  if tokenizer.pad_token is None:
24
  tokenizer.pad_token = tokenizer.eos_token
25
 
 
26
  base_model = AutoModelForCausalLM.from_pretrained(
27
  "Qwen/Qwen2.5-0.5B",
28
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
 
30
  trust_remote_code=True
31
  )
32
 
 
33
  model = PeftModel.from_pretrained(base_model, "phxdev/psychology-qwen-0.5b")
34
  model = model.merge_and_unload()
35
 
36
  if not torch.cuda.is_available():
37
  model = model.to(device)
38
 
39
+ print("✅ Psychology model loaded successfully!")
40
  return True
41
 
42
  except Exception as e:
43
  print(f"❌ Error loading model: {e}")
44
  return False
45
 
46
+ def psychology_support(message: str) -> str:
47
+ """
48
+ Provide empathetic psychology support and mental health guidance.
49
+
50
+ This tool provides specialized psychological support using a fine-tuned AI model
51
+ trained specifically for mental health conversations. It offers evidence-based
52
+ coping strategies, emotional validation, and practical guidance.
53
+
54
+ Args:
55
+ message (str): The user's concern, struggle, or question about mental health
56
+
57
+ Returns:
58
+ str: Empathetic, evidence-based psychological guidance and support
59
+ """
60
 
61
  if model is None:
62
+ return "⚠️ Psychology model is loading. Please try again in a moment."
63
 
64
+ # Therapeutic prompt template
65
+ formatted_prompt = f"""You're a supportive therapist in session. The client just said: "{message}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ Respond with empathy and practical guidance. Start with validation, then give 2-3 specific strategies they can try:"""
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  try:
 
70
  inputs = tokenizer(
71
  formatted_prompt,
72
  return_tensors="pt",
 
74
  max_length=512
75
  ).to(device)
76
 
 
77
  with torch.no_grad():
78
  outputs = model.generate(
79
  **inputs,
80
+ max_length=len(inputs.input_ids[0]) + 300,
81
+ temperature=0.8,
82
  top_p=0.9,
83
  do_sample=True,
84
  repetition_penalty=1.1,
 
86
  eos_token_id=tokenizer.eos_token_id,
87
  )
88
 
 
89
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
90
  generated_text = response[len(formatted_prompt):].strip()
91
 
 
92
  if generated_text.startswith('"') and generated_text.endswith('"'):
93
  generated_text = generated_text[1:-1]
94
 
 
97
  except Exception as e:
98
  return f"⚠️ Error generating response: {str(e)}"
99
 
100
+ def crisis_resources() -> str:
101
+ """
102
+ Get immediate crisis intervention resources and emergency contacts.
103
+
104
+ Returns critical mental health crisis resources including suicide prevention
105
+ hotlines, crisis text lines, and emergency contacts for immediate help.
106
 
107
+ Returns:
108
+ str: Formatted list of crisis intervention resources and emergency contacts
109
+ """
110
+ return """🆘 IMMEDIATE CRISIS RESOURCES:
111
+
112
+ **If you're in immediate danger, call 911 or your local emergency number.**
113
+
114
+ **24/7 Crisis Support:**
115
+ • 988 Suicide & Crisis Lifeline: Call or text 988 (US)
116
+ • Crisis Text Line: Text HOME to 741741 (US)
117
+ • International Association for Suicide Prevention: https://iasp.info/resources/Crisis_Centres/
118
+
119
+ **Specialized Support:**
120
+ • National Domestic Violence Hotline: 1-800-799-7233
121
+ • SAMHSA National Helpline: 1-800-662-4357 (mental health & substance abuse)
122
+ • Trans Lifeline: 877-565-8860
123
+ • LGBT National Hotline: 1-888-843-4564
124
+ • Veterans Crisis Line: 1-800-273-8255
125
+
126
+ You are not alone. Help is available 24/7."""
127
+
128
+ def mindfulness_exercise() -> str:
129
  """
130
+ Get a guided mindfulness or grounding exercise for immediate stress relief.
131
 
132
+ Provides step-by-step mindfulness and grounding exercises that can be done
133
+ anywhere to help manage anxiety, panic, or overwhelming emotions.
134
+
135
+ Returns:
136
+ str: Step-by-step guided mindfulness or grounding exercise
137
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
+ return """🧘 5-MINUTE GROUNDING EXERCISE:
140
+
141
+ **Right now, notice:**
142
+ 1. **5 things you can SEE** around you (colors, shapes, objects)
143
+ 2. **4 things you can TOUCH** (chair, table, your clothes, air temperature)
144
+ 3. **3 things you can HEAR** (traffic, breathing, birds, AC)
145
+ 4. **2 things you can SMELL** (coffee, soap, air freshener)
146
+ 5. **1 thing you can TASTE** (gum, coffee, or just your mouth)
147
+
148
+ **Breathe slowly:** In for 4 counts, hold for 4, out for 6.
149
 
150
+ **Box Breathing:**
151
+ Inhale for 4 counts
152
+ • Hold for 4 counts
153
+ • Exhale for 4 counts
154
+ • Hold empty for 4 counts
155
+ • Repeat 4-6 times
156
+
157
+ You are safe. You are here. This moment will pass."""
158
+
159
+ # Initialize model
160
+ print("🚀 Starting Psychology Personal Agent...")
161
  model_loaded = load_model()
162
 
163
  if model_loaded:
164
+ print("✅ Creating Personal Agent interface...")
165
+
166
+ # Create simple interface
167
+ demo = gr.Interface(
168
+ fn=psychology_support,
169
+ inputs=gr.Textbox(
170
+ label="What's on your mind?",
171
+ placeholder="Share your thoughts, concerns, or struggles...",
172
+ lines=3
173
+ ),
174
+ outputs=gr.Textbox(label="Personal Agent Response"),
175
+ title="🧠 Personal Agent - Psychology Support",
176
+ description="Specialized mental health support • For Mark. For Josh. For everyone who needs someone.",
177
+ examples=[
178
+ "I've been having panic attacks at work and don't know what to do",
179
+ "I can't stop worrying about things that might go wrong",
180
+ "I feel like I'm not good enough at my job",
181
+ "I'm going through a major life transition and feel lost",
182
+ "I have trouble setting boundaries with people"
183
+ ]
184
+ )
185
 
186
  if __name__ == "__main__":
187
  demo.launch(
188
+ mcp_server=True, # MCP enabled
189
+ share=False,
190
+ server_name="0.0.0.0",
191
  server_port=7860,
192
  show_error=True
193
  )
194
+
195
  else:
196
+ print("❌ Failed to load psychology model")
197
+ demo = gr.Interface(
198
+ fn=lambda x: "⚠️ Model loading error. Please try again later.",
199
+ inputs="text",
200
+ outputs="text",
201
+ title="⚠️ Personal Agent - Loading Error"
202
+ )
 
 
 
203
 
204
  if __name__ == "__main__":
205
  demo.launch()