avtak commited on
Commit
9bde0b8
·
verified ·
1 Parent(s): 7761a92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -26
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import pipeline
3
  from huggingface_hub import login
4
  from openai import OpenAI
5
  import os
 
6
 
7
  # --- 1. SETUP ---
8
  hf_token = os.getenv("HF_TOKEN")
@@ -33,7 +34,7 @@ def detect_depression_risk(text: str) -> dict:
33
 
34
  # --- THESIS LOGIC: AGGREGATION ---
35
  # Strictly preserving your requirement for double newlines
36
- processed_text = text.replace("\n", "\n\n")
37
 
38
  results = classifier(processed_text)[0]
39
  prob = next((r['score'] for r in results if r['label'] == 'LABEL_1'), 0)
@@ -63,7 +64,14 @@ def detect_depression_risk(text: str) -> dict:
63
  "word_count": len(processed_text.split())
64
  }
65
 
66
- # --- 4. AGENT REASONING (Distinct Models) ---
 
 
 
 
 
 
 
67
  def agent_reasoning(text, risk_data, provider="SambaNova"):
68
  """
69
  Uses Sponsor APIs to generate the analysis report.
@@ -85,26 +93,25 @@ def agent_reasoning(text, risk_data, provider="SambaNova"):
85
  INSTRUCTIONS:
86
  1. Acknowledge the user's situation based on the text.
87
  2. Explain the risk level using thesis terms: 'Nocturnal Posting' (High), 'Supportive Responder' (Moderate), or 'Healthy External Focus' (Low).
88
- 3. Be compassionate. Max 100 words.
 
89
  """
90
 
91
  try:
92
- # --- SPONSOR 1: NEBIUS (DeepSeek R1 - REASONING MODEL) ---
93
  if provider == "Nebius (DeepSeek R1)":
94
  api_key = os.getenv("NEBIUS_API_KEY")
95
  if not api_key: return "⚠️ Nebius API Key missing."
96
 
97
  client = OpenAI(base_url="https://api.tokenfactory.nebius.com/v1/", api_key=api_key)
98
- # Using DeepSeek R1 Distill for reasoning capabilities
99
- model_id = "deepseek-ai/DeepSeek-R1-0528"
100
 
101
- # --- SPONSOR 2: SAMBANOVA (Llama 3.3 - SPEED MODEL) ---
102
  elif provider == "SambaNova":
103
  api_key = os.getenv("SAMBANOVA_API_KEY")
104
  if not api_key: return "⚠️ SambaNova API Key missing."
105
 
106
  client = OpenAI(base_url="https://api.sambanova.ai/v1", api_key=api_key)
107
- # Using latest Llama 3.3
108
  model_id = "Meta-Llama-3.3-70B-Instruct"
109
 
110
  # EXECUTE
@@ -115,16 +122,23 @@ def agent_reasoning(text, risk_data, provider="SambaNova"):
115
  {"role": "user", "content": "Analyze this."}
116
  ],
117
  temperature=0.6,
118
- max_tokens=300
119
  )
120
- return response.choices[0].message.content
 
 
 
 
 
 
121
 
122
  except Exception as e:
123
  return f"Reasoning Error ({provider}): {str(e)}"
124
 
125
- # --- 5. PIPELINE (Stable Report UI) ---
 
126
  def full_analysis_pipeline(user_text, location, provider):
127
- if not user_text.strip(): return "Please enter text."
128
 
129
  # 1. Run Tool
130
  risk_data = detect_depression_risk(user_text)
@@ -138,20 +152,34 @@ def full_analysis_pipeline(user_text, location, provider):
138
  # 4. Color Logic
139
  color = "green" if risk_data['probability'] < 0.4 else "orange" if risk_data['probability'] < 0.6 else "red"
140
 
141
- # 5. Build HTML/Markdown Report
142
- return f"""
143
- <div style="border-left: 6px solid {color}; padding-left: 15px; background-color: {color}10; border-radius: 5px; padding: 20px;">
144
- <h2 style="color:{color}; margin:0;">{risk_data['risk_level']}</h2>
145
- <h3 style="margin-top:5px;">Probability: {risk_data['probability']*100:.1f}%</h3>
 
 
 
 
 
 
 
 
 
146
  </div>
 
147
 
148
- ### 🧠 Agent Analysis ({provider})
 
 
149
  {explanation}
150
 
151
  ---
152
- ### 🆘 Recommended Resources
153
  {resources}
154
  """
 
 
155
 
156
  # --- 6. EXAMPLES ---
157
  example_low = """The new update for the Linux kernel (6.8) finally addressed the driver issues I was seeing with Realtek cards. I read the changelog on kernel.org and noticed they patched the module specific to the rtl8821ce chipset.
@@ -160,7 +188,7 @@ I also spent some time woodworking. I'm building a walnut coffee table and the g
160
 
161
  example_mod = """Things have been pretty busy at work. We're in the middle of a big project and deadlines are tight. Staying a bit later than usual but that's just how it goes sometimes. Team is handling it well overall.
162
  Sleep could be better. Been staying up too late scrolling my phone. Need to work on that. Usually feel okay once I get moving in the morning though.
163
- Went hiking with some friends last weekend which was nice. Good to get outside and move around."""
164
 
165
  example_high = """I don't know why I even bother getting out of bed anymore. I slept for 12 hours yesterday and I'm still exhausted. It feels like my limbs weigh a thousand pounds.
166
  I've been avoiding my friends for weeks. They keep texting me, but I can't bring myself to reply. The thought of socializing is terrifying.
@@ -177,7 +205,7 @@ with gr.Blocks(title="Depression Risk Agent") as demo:
177
  gr.Markdown("### 1. User History")
178
  input_text = gr.Textbox(
179
  label="User Timeline",
180
- lines=10,
181
  placeholder="[Post 1] ...\n\n[Post 2] ...\n\n(Context >100 words recommended)"
182
  )
183
 
@@ -196,10 +224,6 @@ with gr.Blocks(title="Depression Risk Agent") as demo:
196
 
197
  submit = gr.Button("🚀 Run Analysis Agent", variant="primary", size="lg")
198
 
199
- with gr.Column(scale=1):
200
- gr.Markdown("### 4. Agent Response")
201
- output = gr.Markdown(label="Response")
202
-
203
  with gr.Accordion("🔧 MCP Tools Exposed", open=True):
204
  gr.Markdown("""
205
  The following functions are exposed to the MCP Client:
@@ -207,7 +231,14 @@ with gr.Blocks(title="Depression Risk Agent") as demo:
207
  - `get_crisis_resources(location)`: Returns localized help.
208
  """)
209
 
210
- submit.click(full_analysis_pipeline, inputs=[input_text, loc_dropdown, provider_dropdown], outputs=output)
 
 
 
 
 
 
 
211
 
212
  btn_low.click(lambda: example_low, None, input_text)
213
  btn_mod.click(lambda: example_mod, None, input_text)
 
3
  from huggingface_hub import login
4
  from openai import OpenAI
5
  import os
6
+ import re
7
 
8
  # --- 1. SETUP ---
9
  hf_token = os.getenv("HF_TOKEN")
 
34
 
35
  # --- THESIS LOGIC: AGGREGATION ---
36
  # Strictly preserving your requirement for double newlines
37
+ processed_text = text.replace("\n", "\n\n")
38
 
39
  results = classifier(processed_text)[0]
40
  prob = next((r['score'] for r in results if r['label'] == 'LABEL_1'), 0)
 
64
  "word_count": len(processed_text.split())
65
  }
66
 
67
+ # --- 4. AGENT REASONING (With DeepSeek Cleaning) ---
68
+
69
+ def clean_deepseek_output(text):
70
+ """Removes the <think> tags from DeepSeek R1 models."""
71
+ # Regex to remove <think>...</think> content (dotall to match newlines)
72
+ cleaned = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
73
+ return cleaned.strip()
74
+
75
  def agent_reasoning(text, risk_data, provider="SambaNova"):
76
  """
77
  Uses Sponsor APIs to generate the analysis report.
 
93
  INSTRUCTIONS:
94
  1. Acknowledge the user's situation based on the text.
95
  2. Explain the risk level using thesis terms: 'Nocturnal Posting' (High), 'Supportive Responder' (Moderate), or 'Healthy External Focus' (Low).
96
+ 3. Be compassionate but professional.
97
+ 4. Keep it under 100 words.
98
  """
99
 
100
  try:
101
+ # --- SPONSOR 1: NEBIUS (DeepSeek R1) ---
102
  if provider == "Nebius (DeepSeek R1)":
103
  api_key = os.getenv("NEBIUS_API_KEY")
104
  if not api_key: return "⚠️ Nebius API Key missing."
105
 
106
  client = OpenAI(base_url="https://api.tokenfactory.nebius.com/v1/", api_key=api_key)
107
+ model_id = "deepseek-ai/DeepSeek-R1" # Standard R1 ID
 
108
 
109
+ # --- SPONSOR 2: SAMBANOVA (Llama 3.3) ---
110
  elif provider == "SambaNova":
111
  api_key = os.getenv("SAMBANOVA_API_KEY")
112
  if not api_key: return "⚠️ SambaNova API Key missing."
113
 
114
  client = OpenAI(base_url="https://api.sambanova.ai/v1", api_key=api_key)
 
115
  model_id = "Meta-Llama-3.3-70B-Instruct"
116
 
117
  # EXECUTE
 
122
  {"role": "user", "content": "Analyze this."}
123
  ],
124
  temperature=0.6,
125
+ max_tokens=500 # Needs more tokens for thinking
126
  )
127
+
128
+ raw_output = response.choices[0].message.content
129
+
130
+ # CLEANUP: Remove the "Thinking" part if it's DeepSeek
131
+ final_output = clean_deepseek_output(raw_output)
132
+
133
+ return final_output
134
 
135
  except Exception as e:
136
  return f"Reasoning Error ({provider}): {str(e)}"
137
 
138
+ # --- 5. PIPELINE (Visual Report) ---
139
+
140
  def full_analysis_pipeline(user_text, location, provider):
141
+ if not user_text.strip(): return "Please enter text.", "No data"
142
 
143
  # 1. Run Tool
144
  risk_data = detect_depression_risk(user_text)
 
152
  # 4. Color Logic
153
  color = "green" if risk_data['probability'] < 0.4 else "orange" if risk_data['probability'] < 0.6 else "red"
154
 
155
+ # 5. Build Dashboard (HTML) - Fixed Spacing
156
+ html_dashboard = f"""
157
+ <div style="padding: 20px; border-radius: 12px; background-color: {data['color']}15; border: 1px solid {data['color']};">
158
+ <h2 style="color: {data['color']}; margin: 0;">{data['risk_level']}</h2>
159
+ <h3 style="margin: 0;">{data['probability']*100:.1f}% Match</h3>
160
+
161
+ <div style="background-color: #e5e7eb; border-radius: 9999px; height: 10px; margin-top: 10px; width: 100%;">
162
+ <div style="background-color: {data['color']}; height: 10px; border-radius: 9999px; width: {data['probability']*100}%;"></div>
163
+ </div>
164
+
165
+ <div style="margin-top: 15px; white-space: pre-wrap;">
166
+ <p><strong>🧠 Thesis Biomarker:</strong> {data['biomarker']}</p>
167
+ <p style="font-size: 0.9em; opacity: 0.8;">{data['description']}</p>
168
+ </div>
169
  </div>
170
+ """
171
 
172
+ # 6. Build Text Report (Markdown)
173
+ report_markdown = f"""
174
+ ### 🤖 Agent Analysis ({provider})
175
  {explanation}
176
 
177
  ---
178
+ ### 🆘 Recommended Resources ({location})
179
  {resources}
180
  """
181
+
182
+ return html_dashboard, report_markdown
183
 
184
  # --- 6. EXAMPLES ---
185
  example_low = """The new update for the Linux kernel (6.8) finally addressed the driver issues I was seeing with Realtek cards. I read the changelog on kernel.org and noticed they patched the module specific to the rtl8821ce chipset.
 
188
 
189
  example_mod = """Things have been pretty busy at work. We're in the middle of a big project and deadlines are tight. Staying a bit later than usual but that's just how it goes sometimes. Team is handling it well overall.
190
  Sleep could be better. Been staying up too late scrolling my phone. Need to work on that. Usually feel okay once I get moving in the morning though.
191
+ Went hiking with some friends last weekend which was nice. Good to get outside and move around. We're talking about doing another trip next month. Weather should be better by then."""
192
 
193
  example_high = """I don't know why I even bother getting out of bed anymore. I slept for 12 hours yesterday and I'm still exhausted. It feels like my limbs weigh a thousand pounds.
194
  I've been avoiding my friends for weeks. They keep texting me, but I can't bring myself to reply. The thought of socializing is terrifying.
 
205
  gr.Markdown("### 1. User History")
206
  input_text = gr.Textbox(
207
  label="User Timeline",
208
+ lines=8,
209
  placeholder="[Post 1] ...\n\n[Post 2] ...\n\n(Context >100 words recommended)"
210
  )
211
 
 
224
 
225
  submit = gr.Button("🚀 Run Analysis Agent", variant="primary", size="lg")
226
 
 
 
 
 
227
  with gr.Accordion("🔧 MCP Tools Exposed", open=True):
228
  gr.Markdown("""
229
  The following functions are exposed to the MCP Client:
 
231
  - `get_crisis_resources(location)`: Returns localized help.
232
  """)
233
 
234
+ with gr.Column(scale=1):
235
+ # OUTPUTS: ONE FOR HTML, ONE FOR MARKDOWN
236
+ out_dashboard = gr.HTML(label="Clinical Dashboard")
237
+ out_report = gr.Markdown(label="Agent Report")
238
+
239
+ # WIRING
240
+ # Ensure correct mapping: input_text -> full_analysis -> [dashboard, report]
241
+ submit.click(full_analysis_pipeline, inputs=[input_text, loc_dropdown, provider_dropdown], outputs=[out_dashboard, out_report])
242
 
243
  btn_low.click(lambda: example_low, None, input_text)
244
  btn_mod.click(lambda: example_mod, None, input_text)