avtak commited on
Commit
213f41f
Β·
verified Β·
1 Parent(s): fd5498f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -177
app.py CHANGED
@@ -9,7 +9,7 @@ hf_token = os.getenv("HF_TOKEN")
9
  if hf_token:
10
  login(token=hf_token)
11
 
12
- # --- 2. LOAD TOOL ---
13
  print("Loading Mental-Longformer...")
14
  model_name = "avtak/erisk-longformer-depression-v1"
15
  classifier = pipeline("text-classification", model=model_name, truncation=True, max_length=4096, top_k=None)
@@ -23,6 +23,7 @@ def get_crisis_resources(location: str = "Global") -> str:
23
  "Malaysia": "πŸ‡²πŸ‡Ύ **Malaysia:** Befrienders KL: 03-76272929 | Talian Kasih: 15999",
24
  "Global": "🌍 **International:** [befrienders.org](https://www.befrienders.org)"
25
  }
 
26
  for key in resources:
27
  if location and key.lower() in location.lower():
28
  return resources[key]
@@ -30,238 +31,159 @@ def get_crisis_resources(location: str = "Global") -> str:
30
 
31
  def detect_depression_risk(text: str) -> dict:
32
  """Analyzes text using Mental-Longformer (eRisk 2025)."""
33
-
34
- # --- THESIS LOGIC: AGGREGATION (CRITICAL) ---
35
  # We replace single newlines with double newlines so the model sees distinct posts
36
- # This matches your Master's Thesis data preparation method.
37
- processed_text = text.replace("\n", "\n\n")
38
 
39
  results = classifier(processed_text)[0]
40
  prob = next((r['score'] for r in results if r['label'] == 'LABEL_1'), 0)
41
 
42
- # Thesis Thresholds (Figure 4.15)
43
- if prob < 0.40:
44
- level = "Low Risk"
45
- biomarker = "Healthy External Focus"
46
- desc = "Matches 'Isolated Control' group. High lexical diversity, focus on hobbies/events."
47
- color = "#10b981" # Green
48
- elif 0.40 <= prob < 0.60:
49
- level = "Moderate Risk"
50
- biomarker = "Echo Chamber Interaction"
51
- desc = "Matches 'Interactive Non-Depressed' group. Engaging in support forums but likely not clinically depressed (Supportive Responder)."
52
- color = "#f59e0b" # Yellow
53
- else:
54
- level = "High Risk"
55
- biomarker = "Nocturnal & High-Effort"
56
- desc = "Matches 'Depressed' cohort. Indicators: Nocturnal posting spikes (00-05 UTC), high-effort/low-frequency posting."
57
- color = "#ef4444" # Red
58
 
59
- return {
60
- "probability": prob,
61
- "risk_level": level,
62
- "biomarker": biomarker,
63
- "description": desc,
64
- "color": color,
65
- "word_count": len(processed_text.split())
66
- }
67
-
68
- # --- 4. AGENT LOGIC (Dual State Management) ---
69
-
70
- def generate_response(api_history, risk_context, provider):
71
- """Generates response using Sponsor API."""
72
- if not risk_context:
73
- risk_context = {"risk_level": "Unknown", "probability": 0.0, "description": "No analysis run yet."}
74
 
 
 
 
 
 
75
  client = None
76
- model_id = None
77
 
78
- if provider == "SambaNova":
79
- client = OpenAI(base_url="https://api.sambanova.ai/v1", api_key=os.getenv("SAMBANOVA_API_KEY"))
80
- model_id = "Meta-Llama-3.3-70B-Instruct"
81
- else:
82
- client = OpenAI(base_url="https://api.tokenfactory.nebius.com/v1/", api_key=os.getenv("NEBIUS_API_KEY"))
83
- model_id = "moonshotai/Kimi-K2-Thinking"
84
-
85
  system_prompt = f"""
86
- You are 'Dr. Longformer', a specialized Clinical AI Assistant based on Hassan's 2025 Thesis.
87
- CURRENT USER CONTEXT:
88
- - Analyzed Risk: {risk_context['risk_level']} ({risk_context['probability']:.1%})
89
- - Detected Pattern: {risk_context['description']}
90
- YOUR GOAL: Provide supportive, scientifically-grounded chat. Max 2 sentences.
 
 
 
 
 
 
 
 
91
  """
92
 
93
- messages = [{"role": "system", "content": system_prompt}]
94
- messages.extend(api_history)
95
-
96
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  response = client.chat.completions.create(
98
- model=model_id,
99
- messages=messages,
100
- temperature=0.7,
 
 
 
101
  max_tokens=300
102
  )
103
  return response.choices[0].message.content
104
- except Exception as e:
105
- return f"⚠️ Error with {provider}: {str(e)}"
106
 
107
- # --- 5. UI ORCHESTRATION ---
 
108
 
109
- def run_analysis(text, location, provider):
110
- if not text.strip(): return None, [], [], None
 
111
 
112
- # 1. Run Tool
113
- data = detect_depression_risk(text)
114
  resources = get_crisis_resources(location)
 
115
 
116
- # 2. Visual Dashboard
117
- html_dashboard = f"""
118
- <div style="padding: 20px; border-radius: 12px; background-color: {data['color']}15; border: 1px solid {data['color']};">
119
- <div style="display: flex; justify-content: space-between; align-items: center;">
120
- <h2 style="color: {data['color']}; margin: 0;">{data['risk_level']}</h2>
121
- <h3 style="margin: 0;">{data['probability']*100:.1f}% Match</h3>
122
- </div>
123
- <div style="background-color: #e5e7eb; border-radius: 9999px; height: 10px; margin-top: 10px; width: 100%;">
124
- <div style="background-color: {data['color']}; height: 10px; border-radius: 9999px; width: {data['probability']*100}%;"></div>
125
- </div>
126
- <div style="margin-top: 20px;">
127
- <p><strong>🧠 Thesis Reflection:</strong> {data['biomarker']}</p>
128
- <p style="opacity: 0.8; font-size: 0.9em;">{data['description']}</p>
129
- </div>
130
- <div style="margin-top: 15px; padding-top: 15px; border-top: 1px solid #ccc;">
131
- <p><strong>πŸ†˜ Local Resources ({location}):</strong></p>
132
- <pre style="font-family: inherit; white-space: pre-wrap; margin: 0;">{resources}</pre>
133
- </div>
134
- </div>
135
- """
136
-
137
- # 3. Agent Greeting
138
- # Format for API: List of Dicts
139
- api_history = [{"role": "user", "content": "I just ran the analysis. Please explain my results."}]
140
- ai_msg = generate_response(api_history, data, provider)
141
- api_history.append({"role": "assistant", "content": ai_msg})
142
 
143
- # Format for UI: List of Tuples [(None, AI_Message)]
144
- # CRITICAL FIX: This standard format works on ALL Gradio versions
145
- ui_history = [(None, ai_msg)]
146
-
147
- return html_dashboard, ui_history, api_history, data
148
-
149
- def user_chat(user_message, ui_history, api_history, risk_data, provider):
150
- if not user_message: return ui_history, api_history, ""
151
-
152
- # 1. Update API History
153
- api_history.append({"role": "user", "content": user_message})
154
-
155
- # 2. Update UI History (User message only first)
156
- ui_history.append([user_message, None])
157
-
158
- # 3. Generate Response
159
- ai_msg = generate_response(api_history, risk_data, provider)
160
-
161
- # 4. Update API History
162
- api_history.append({"role": "assistant", "content": ai_msg})
163
 
164
- # 5. Update UI History (Add AI response)
165
- ui_history[-1][1] = ai_msg
166
 
167
- return ui_history, api_history, ""
 
 
168
 
169
- # --- 6. EXAMPLES (RESTORED) ---
170
  example_low = """The new update for the Linux kernel (6.8) finally addressed the driver issues I was seeing with Realtek cards. I read the changelog on kernel.org and noticed they patched the module specific to the rtl8821ce chipset.
171
-
172
  On a different note, the defensive stats for the basketball team have improved significantly. Allowing 15% fewer points per possession is a game changer.
173
-
174
  I also spent some time woodworking. I'm building a walnut coffee table and the grain is tricky to plane. I might switch to a polyurethane finish for durability."""
175
 
176
  example_mod = """Things have been pretty busy at work. We're in the middle of a big project and deadlines are tight. Staying a bit later than usual but that's just how it goes sometimes. Team is handling it well overall.
177
-
178
  Sleep could be better. Been staying up too late scrolling my phone. Need to work on that. Usually feel okay once I get moving in the morning though.
179
-
180
- Went hiking with some friends last weekend which was nice. Good to get outside and move around. We're talking about doing another trip next month. Weather should be better by then."""
181
 
182
  example_high = """I don't know why I even bother getting out of bed anymore. I slept for 12 hours yesterday and I'm still exhausted. It feels like my limbs weigh a thousand pounds.
183
-
184
  I've been avoiding my friends for weeks. They keep texting me, but I can't bring myself to reply. The thought of socializing is terrifying.
185
-
186
  Everything feels like a shade of grey. I can't concentrate on my work. I feel like I'm drowning while everyone else is breathing fine. I was up until 4 AM again last night just staring at the ceiling."""
187
 
188
- # --- 7. UI LAYOUT ---
189
  with gr.Blocks(title="Depression Risk Agent") as demo:
190
  gr.Markdown("# 🧠 Early Depression Detection Agent (MCP)")
191
- gr.Markdown("Agentic system using **Mental-Longformer** (Tool) + **SambaNova/Nebius** (Reasoning).")
192
- gr.Markdown("⚑ **Powered by:** [SambaNova](https://sambanova.ai/) (Llama 3.3) & [Nebius](https://nebius.com/) (Kimi K2)")
193
-
194
- # Internal State
195
- risk_state = gr.State(None)
196
- api_state = gr.State([]) # Stores [{"role":...}] for the LLM
197
 
198
  with gr.Row():
199
- # LEFT: INPUT
200
  with gr.Column(scale=1):
 
201
  input_text = gr.Textbox(
202
- label="User Timeline (Paste posts here)",
203
- lines=8,
204
- placeholder="[Post 1] ...\n\n[Post 2] ...",
205
- value=example_high
206
  )
207
 
208
- # EXAMPLES ROW
209
- gr.Markdown("### πŸ” Try Thesis Patterns")
210
  with gr.Row():
211
- btn_low = gr.Button("🟒 Low Risk", size="sm")
212
- btn_mod = gr.Button("🟑 Moderate", size="sm")
213
- btn_high = gr.Button("πŸ”΄ High Risk", size="sm")
214
 
215
- # SETTINGS ROW
216
- gr.Markdown("### βš™οΈ Settings")
217
  with gr.Row():
218
- loc_drop = gr.Dropdown(["Global", "US", "Malaysia"], value="Malaysia", label="Crisis Resource Region")
219
- prov_drop = gr.Dropdown(["SambaNova", "Nebius"], value="SambaNova", label="Agent Brain")
 
220
 
221
- analyze_btn = gr.Button("πŸš€ Run Clinical Analysis", variant="primary", size="lg")
222
 
223
- with gr.Accordion("πŸ”§ MCP Tools Exposed", open=False):
224
- gr.Markdown("- `detect_depression_risk`\n- `get_crisis_resources`")
225
-
226
- # RIGHT: DASHBOARD & CHAT
227
  with gr.Column(scale=1):
228
- dashboard = gr.HTML(label="Clinical Dashboard")
229
-
230
- # FIXED: REMOVED type="messages" to prevent crash. Uses standard Tuples.
231
- chatbot = gr.Chatbot(label="Agent Chat", height=400)
232
 
233
- msg_input = gr.Textbox(label="Chat with Agent", placeholder="Ask about your results or get advice...")
234
- send_btn = gr.Button("Send Message")
 
 
 
 
235
 
236
- # WIRING
237
 
238
- # 1. Analyze Button -> Updates Dashboard, Chatbot, Risk State
239
- analyze_btn.click(
240
- run_analysis,
241
- inputs=[input_text, loc_drop, prov_drop],
242
- outputs=[dashboard, chatbot, api_state, risk_state]
243
- )
244
-
245
- # 2. Example Buttons
246
  btn_low.click(lambda: example_low, None, input_text)
247
  btn_mod.click(lambda: example_mod, None, input_text)
248
  btn_high.click(lambda: example_high, None, input_text)
249
-
250
- # 3. Chat Interactions
251
- # Note: We pass 'chatbot' as both input and output (it holds the history in 'tuples' format)
252
- send_btn.click(
253
- user_chat,
254
- inputs=[msg_input, chatbot, api_state, risk_state, prov_drop],
255
- outputs=[chatbot, api_state, msg_input]
256
- )
257
- msg_input.submit(
258
- user_chat,
259
- inputs=[msg_input, chatbot, api_state, risk_state, prov_drop],
260
- outputs=[chatbot, api_state, msg_input]
261
- )
262
 
263
  if __name__ == "__main__":
264
- demo.launch(
265
- mcp_server=True,
266
- theme=gr.themes.Soft()
267
- )
 
9
  if hf_token:
10
  login(token=hf_token)
11
 
12
+ # --- 2. TOOL LOADING ---
13
  print("Loading Mental-Longformer...")
14
  model_name = "avtak/erisk-longformer-depression-v1"
15
  classifier = pipeline("text-classification", model=model_name, truncation=True, max_length=4096, top_k=None)
 
23
  "Malaysia": "πŸ‡²πŸ‡Ύ **Malaysia:** Befrienders KL: 03-76272929 | Talian Kasih: 15999",
24
  "Global": "🌍 **International:** [befrienders.org](https://www.befrienders.org)"
25
  }
26
+ # Fallback logic
27
  for key in resources:
28
  if location and key.lower() in location.lower():
29
  return resources[key]
 
31
 
32
  def detect_depression_risk(text: str) -> dict:
33
  """Analyzes text using Mental-Longformer (eRisk 2025)."""
34
+ # Thesis Aggregation Logic:
 
35
  # We replace single newlines with double newlines so the model sees distinct posts
36
+ processed_text = text.replace("\n", "\n\n")
 
37
 
38
  results = classifier(processed_text)[0]
39
  prob = next((r['score'] for r in results if r['label'] == 'LABEL_1'), 0)
40
 
41
+ if prob < 0.40: level = "Low Risk (Isolated Pattern)"
42
+ elif 0.40 <= prob < 0.60: level = "Moderate Risk (Interactive Echo Chamber Pattern)"
43
+ else: level = "High Risk (Depressed Pattern)"
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ return {"probability": prob, "risk_level": level, "word_count": len(processed_text.split())}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ # --- 4. AGENT REASONING (UPDATED MODELS) ---
48
+ def agent_reasoning(text, risk_data, provider="SambaNova"):
49
+ """
50
+ Uses Sponsor APIs to interpret the clinical data.
51
+ """
52
  client = None
53
+ model = None
54
 
55
+ # SYSTEM PROMPT (Thesis-Informed)
 
 
 
 
 
 
56
  system_prompt = f"""
57
+ You are 'Dr. Longformer', an empathetic Clinical AI Research Agent.
58
+
59
+ CLINICAL DATA (Mental-Longformer):
60
+ - Risk Level: {risk_data['risk_level']}
61
+ - Probability: {risk_data['probability']:.1%}
62
+ - Input Length: {risk_data['word_count']} words
63
+
64
+ USER TEXT SNIPPET: "{text[:800]}..."
65
+
66
+ INSTRUCTIONS:
67
+ 1. Acknowledge the user's situation based on the text.
68
+ 2. Explain the risk level using thesis terms: 'Nocturnal Posting' (High), 'Supportive Responder' (Moderate), or 'Healthy External Focus' (Low).
69
+ 3. Be compassionate. Max 100 words.
70
  """
71
 
 
 
 
72
  try:
73
+ # --- SPONSOR 1: NEBIUS (Kimi K2) ---
74
+ if provider == "Nebius (Kimi K2)":
75
+ api_key = os.getenv("NEBIUS_API_KEY")
76
+ if not api_key: return "⚠️ Nebius API Key missing."
77
+
78
+ client = OpenAI(base_url="https://api.tokenfactory.nebius.com/v1/", api_key=api_key)
79
+ # UPDATED MODEL ID
80
+ model = "moonshotai/Kimi-K2-Thinking"
81
+
82
+ # --- SPONSOR 2: SAMBANOVA (Llama 3.3) ---
83
+ elif provider == "SambaNova":
84
+ api_key = os.getenv("SAMBANOVA_API_KEY")
85
+ if not api_key: return "⚠️ SambaNova API Key missing."
86
+
87
+ client = OpenAI(base_url="https://api.sambanova.ai/v1", api_key=api_key)
88
+ # UPDATED MODEL ID
89
+ model = "Meta-Llama-3.3-70B-Instruct"
90
+
91
+ # EXECUTE
92
  response = client.chat.completions.create(
93
+ model=model,
94
+ messages=[
95
+ {"role": "system", "content": system_prompt},
96
+ {"role": "user", "content": "Analyze this."}
97
+ ],
98
+ temperature=0.6,
99
  max_tokens=300
100
  )
101
  return response.choices[0].message.content
 
 
102
 
103
+ except Exception as e:
104
+ return f"Reasoning Error ({provider}): {str(e)}"
105
 
106
+ # --- 5. PIPELINE ---
107
+ def full_analysis_pipeline(user_text, location, provider):
108
+ if not user_text.strip(): return "Please enter text.", "No data", "No data"
109
 
110
+ risk_data = detect_depression_risk(user_text)
 
111
  resources = get_crisis_resources(location)
112
+ explanation = agent_reasoning(user_text, risk_data, provider)
113
 
114
+ # Color logic
115
+ color = "green" if risk_data['probability'] < 0.4 else "orange" if risk_data['probability'] < 0.6 else "red"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ return f"""
118
+ <div style="border-left: 5px solid {color}; padding-left: 10px;">
119
+ <h2 style="color:{color}; margin:0;">{risk_data['risk_level']}</h2>
120
+ <h3 style="margin-top:0;">Probability: {risk_data['probability']*100:.1f}%</h3>
121
+ </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
+ ### 🧠 Agent Analysis ({provider})
124
+ {explanation}
125
 
126
+ ### πŸ†˜ Recommended Resources
127
+ {resources}
128
+ """
129
 
130
+ # --- 6. EXAMPLES ---
131
  example_low = """The new update for the Linux kernel (6.8) finally addressed the driver issues I was seeing with Realtek cards. I read the changelog on kernel.org and noticed they patched the module specific to the rtl8821ce chipset.
 
132
  On a different note, the defensive stats for the basketball team have improved significantly. Allowing 15% fewer points per possession is a game changer.
 
133
  I also spent some time woodworking. I'm building a walnut coffee table and the grain is tricky to plane. I might switch to a polyurethane finish for durability."""
134
 
135
  example_mod = """Things have been pretty busy at work. We're in the middle of a big project and deadlines are tight. Staying a bit later than usual but that's just how it goes sometimes. Team is handling it well overall.
 
136
  Sleep could be better. Been staying up too late scrolling my phone. Need to work on that. Usually feel okay once I get moving in the morning though.
137
+ Went hiking with some friends last weekend which was nice. Good to get outside and move around."""
 
138
 
139
  example_high = """I don't know why I even bother getting out of bed anymore. I slept for 12 hours yesterday and I'm still exhausted. It feels like my limbs weigh a thousand pounds.
 
140
  I've been avoiding my friends for weeks. They keep texting me, but I can't bring myself to reply. The thought of socializing is terrifying.
 
141
  Everything feels like a shade of grey. I can't concentrate on my work. I feel like I'm drowning while everyone else is breathing fine. I was up until 4 AM again last night just staring at the ceiling."""
142
 
143
+ # --- 7. UI ---
144
  with gr.Blocks(title="Depression Risk Agent") as demo:
145
  gr.Markdown("# 🧠 Early Depression Detection Agent (MCP)")
146
+ gr.Markdown("Agentic system using **Mental-Longformer** (Tool) + **Multi-Provider Reasoning** (Nebius/SambaNova).")
 
 
 
 
 
147
 
148
  with gr.Row():
 
149
  with gr.Column(scale=1):
150
+ gr.Markdown("### 1. User History")
151
  input_text = gr.Textbox(
152
+ label="User Timeline",
153
+ lines=10,
154
+ placeholder="[Post 1] ...\n\n[Post 2] ...\n\n(Context >100 words recommended)"
 
155
  )
156
 
157
+ gr.Markdown("### 2. Thesis Patterns (Click to Load)")
 
158
  with gr.Row():
159
+ btn_low = gr.Button("🟒 Low Risk", size="sm", variant="secondary")
160
+ btn_mod = gr.Button("🟑 Moderate", size="sm", variant="secondary")
161
+ btn_high = gr.Button("πŸ”΄ High Risk", size="sm", variant="secondary")
162
 
163
+ gr.Markdown("### 3. Agent Settings")
 
164
  with gr.Row():
165
+ # RENAMED to be clearer
166
+ loc_dropdown = gr.Dropdown(["Global", "US", "Malaysia"], value="Global", label="Crisis Resource Region")
167
+ provider_dropdown = gr.Dropdown(["SambaNova", "Nebius (Kimi K2)"], value="SambaNova", label="Reasoning Brain")
168
 
169
+ submit = gr.Button("πŸš€ Run Analysis Agent", variant="primary", size="lg")
170
 
 
 
 
 
171
  with gr.Column(scale=1):
172
+ gr.Markdown("### 4. Agent Response")
173
+ output = gr.Markdown(label="Response")
 
 
174
 
175
+ with gr.Accordion("πŸ”§ MCP Tools Exposed", open=True):
176
+ gr.Markdown("""
177
+ The following functions are exposed to the MCP Client:
178
+ - `detect_depression_risk(text)`: Returns probability & thesis classification.
179
+ - `get_crisis_resources(location)`: Returns localized help.
180
+ """)
181
 
182
+ submit.click(full_analysis_pipeline, inputs=[input_text, loc_dropdown, provider_dropdown], outputs=output)
183
 
 
 
 
 
 
 
 
 
184
  btn_low.click(lambda: example_low, None, input_text)
185
  btn_mod.click(lambda: example_mod, None, input_text)
186
  btn_high.click(lambda: example_high, None, input_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
  if __name__ == "__main__":
189
+ demo.launch(mcp_server=True, theme=gr.themes.Soft())