Spaces:
Sleeping
Sleeping
stanlee47 commited on
Commit ·
21f026d
1
Parent(s): 3d99ce6
beck chnages
Browse files- __pycache__/app.cpython-312.pyc +0 -0
- __pycache__/database.cpython-312.pyc +0 -0
- __pycache__/groq_client.cpython-312.pyc +0 -0
- __pycache__/prompts.cpython-312.pyc +0 -0
- app.py +176 -160
- database.py +147 -0
- groq_client.py +261 -127
- prompts.py +169 -102
- test_beck_protocol.py +253 -0
__pycache__/app.cpython-312.pyc
ADDED
|
Binary file (17 kB). View file
|
|
|
__pycache__/database.cpython-312.pyc
ADDED
|
Binary file (45.5 kB). View file
|
|
|
__pycache__/groq_client.cpython-312.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
__pycache__/prompts.cpython-312.pyc
ADDED
|
Binary file (6.58 kB). View file
|
|
|
app.py
CHANGED
|
@@ -11,7 +11,7 @@ from groq_client import GroqClient
|
|
| 11 |
from database import get_db
|
| 12 |
from auth import register_user, login_user, token_required
|
| 13 |
from crisis_detector import check_for_crisis, get_crisis_response, get_crisis_resources
|
| 14 |
-
from prompts import
|
| 15 |
from exercises import get_exercise_for_group
|
| 16 |
from wearable import wearable_bp
|
| 17 |
from admin import admin_bp
|
|
@@ -159,182 +159,172 @@ def get_sessions():
|
|
| 159 |
@token_required
|
| 160 |
def chat():
|
| 161 |
"""
|
| 162 |
-
Main chat endpoint.
|
| 163 |
-
Handles classification, stage tracking, crisis detection, and response generation.
|
| 164 |
"""
|
| 165 |
try:
|
| 166 |
user = request.current_user
|
| 167 |
db = get_db()
|
| 168 |
-
|
| 169 |
data = request.json
|
| 170 |
user_message = data.get("message", "").strip()
|
| 171 |
session_id = data.get("session_id")
|
| 172 |
-
conversation_history = data.get("conversation_history", [])
|
| 173 |
-
|
| 174 |
-
if not user_message:
|
| 175 |
-
return jsonify({"error": "Message
|
| 176 |
-
|
| 177 |
-
if not session_id:
|
| 178 |
-
return jsonify({"error": "session_id is required"}), 400
|
| 179 |
-
|
| 180 |
# Get session
|
| 181 |
session = db.get_session(session_id)
|
| 182 |
-
if not session:
|
| 183 |
return jsonify({"error": "Session not found"}), 404
|
| 184 |
-
|
| 185 |
-
if session["user_id"] != user["id"]:
|
| 186 |
-
return jsonify({"error": "Unauthorized"}), 403
|
| 187 |
-
|
| 188 |
# ========== CRISIS DETECTION ==========
|
| 189 |
is_crisis, trigger_word = check_for_crisis(user_message)
|
| 190 |
-
|
| 191 |
if is_crisis:
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
user_id=user["id"],
|
| 195 |
-
user_name=user["name"],
|
| 196 |
-
user_email=user["email"],
|
| 197 |
-
session_id=session_id,
|
| 198 |
-
message_content=user_message,
|
| 199 |
-
trigger_word=trigger_word
|
| 200 |
-
)
|
| 201 |
-
|
| 202 |
-
# Save user message
|
| 203 |
-
db.add_message(session_id, user["id"], "user", user_message)
|
| 204 |
-
|
| 205 |
-
# Get crisis response
|
| 206 |
crisis_response = get_crisis_response(user["name"])
|
| 207 |
-
db.add_message(session_id, user["id"], "assistant", crisis_response)
|
| 208 |
-
|
| 209 |
return jsonify({
|
| 210 |
"response": crisis_response,
|
| 211 |
"is_crisis": True,
|
| 212 |
-
"crisis_resources": get_crisis_resources()
|
| 213 |
-
"session_state": session["state"],
|
| 214 |
-
"current_stage": session["current_stage"]
|
| 215 |
})
|
| 216 |
-
|
| 217 |
# ========== NATURAL EXIT CHECK ==========
|
| 218 |
if is_natural_exit(user_message):
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
if session["state"] == "WAITING_FOR_PROBLEM" or locked_group == "G0":
|
| 228 |
-
# Run classifier
|
| 229 |
classification = classifier.classify(user_message)
|
| 230 |
-
detected_group = classification["group"]
|
| 231 |
-
confidence = classification["confidence"]
|
| 232 |
-
|
| 233 |
-
if detected_group == "G0":
|
| 234 |
-
# No distortion - listen supportively
|
| 235 |
-
db.update_session(session_id, locked_group="G0")
|
| 236 |
|
|
|
|
|
|
|
| 237 |
response_text = groq_client.generate_supportive_response(
|
| 238 |
user_message=user_message,
|
| 239 |
-
conversation_history=conversation_history[-6:],
|
| 240 |
user_name=user["name"]
|
| 241 |
)
|
| 242 |
-
|
| 243 |
-
# Don't save to DB - only save crisis messages
|
| 244 |
-
|
| 245 |
return jsonify({
|
| 246 |
"response": response_text,
|
| 247 |
-
"
|
| 248 |
-
"
|
| 249 |
-
"confidence": confidence,
|
| 250 |
-
"current_stage": None,
|
| 251 |
-
"total_stages": None,
|
| 252 |
-
"session_state": "LISTENING"
|
| 253 |
})
|
| 254 |
-
|
| 255 |
else:
|
| 256 |
-
# Distortion detected -
|
| 257 |
-
db.
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
db.
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
return jsonify({
|
| 323 |
"response": response_text,
|
| 324 |
-
"
|
| 325 |
-
"
|
| 326 |
-
"
|
| 327 |
-
"
|
| 328 |
-
"stage_name": STAGE_GOALS[locked_group][current_stage]["name"],
|
| 329 |
-
"session_state": "COMPLETED" if session_complete else "IN_PROGRESS",
|
| 330 |
-
"session_complete": session_complete
|
| 331 |
})
|
| 332 |
-
|
| 333 |
except Exception as e:
|
| 334 |
print(f"Error in /api/chat: {str(e)}")
|
| 335 |
import traceback
|
| 336 |
traceback.print_exc()
|
| 337 |
-
return jsonify({"error": "Something went wrong
|
| 338 |
|
| 339 |
|
| 340 |
# ==================== EXERCISE ROUTES ====================
|
|
@@ -454,33 +444,59 @@ def is_natural_exit(message: str) -> bool:
|
|
| 454 |
def handle_natural_exit(session: dict, user: dict, db, session_id: str) -> dict:
|
| 455 |
"""Handle natural conversation exit."""
|
| 456 |
import random
|
| 457 |
-
|
| 458 |
endings = NATURAL_ENDINGS
|
| 459 |
response = random.choice(endings).replace("{name}", user["name"])
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
|
|
|
|
|
|
|
|
|
| 474 |
return {
|
| 475 |
"response": response,
|
| 476 |
-
"session_state": "COMPLETED"
|
| 477 |
-
"detected_group": locked_group,
|
| 478 |
-
"current_stage": current_stage,
|
| 479 |
"natural_exit": True,
|
| 480 |
-
"session_complete":
|
| 481 |
-
"suggest_exercise": current_stage == 3 and locked_group != "G0"
|
| 482 |
}
|
| 483 |
|
| 484 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 485 |
if __name__ == "__main__":
|
| 486 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|
|
| 11 |
from database import get_db
|
| 12 |
from auth import register_user, login_user, token_required
|
| 13 |
from crisis_detector import check_for_crisis, get_crisis_response, get_crisis_resources
|
| 14 |
+
from prompts import NATURAL_ENDINGS
|
| 15 |
from exercises import get_exercise_for_group
|
| 16 |
from wearable import wearable_bp
|
| 17 |
from admin import admin_bp
|
|
|
|
| 159 |
@token_required
|
| 160 |
def chat():
|
| 161 |
"""
|
| 162 |
+
Main chat endpoint using Beck's 3-Agent Protocol.
|
|
|
|
| 163 |
"""
|
| 164 |
try:
|
| 165 |
user = request.current_user
|
| 166 |
db = get_db()
|
| 167 |
+
|
| 168 |
data = request.json
|
| 169 |
user_message = data.get("message", "").strip()
|
| 170 |
session_id = data.get("session_id")
|
| 171 |
+
conversation_history = data.get("conversation_history", [])
|
| 172 |
+
|
| 173 |
+
if not user_message or not session_id:
|
| 174 |
+
return jsonify({"error": "Message and session_id required"}), 400
|
| 175 |
+
|
|
|
|
|
|
|
|
|
|
| 176 |
# Get session
|
| 177 |
session = db.get_session(session_id)
|
| 178 |
+
if not session or session["user_id"] != user["id"]:
|
| 179 |
return jsonify({"error": "Session not found"}), 404
|
| 180 |
+
|
|
|
|
|
|
|
|
|
|
| 181 |
# ========== CRISIS DETECTION ==========
|
| 182 |
is_crisis, trigger_word = check_for_crisis(user_message)
|
|
|
|
| 183 |
if is_crisis:
|
| 184 |
+
db.flag_crisis(user["id"], user["name"], user["email"],
|
| 185 |
+
session_id, user_message, trigger_word)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
crisis_response = get_crisis_response(user["name"])
|
|
|
|
|
|
|
| 187 |
return jsonify({
|
| 188 |
"response": crisis_response,
|
| 189 |
"is_crisis": True,
|
| 190 |
+
"crisis_resources": get_crisis_resources()
|
|
|
|
|
|
|
| 191 |
})
|
| 192 |
+
|
| 193 |
# ========== NATURAL EXIT CHECK ==========
|
| 194 |
if is_natural_exit(user_message):
|
| 195 |
+
return jsonify(handle_natural_exit(session, user, db, session_id))
|
| 196 |
+
|
| 197 |
+
# ========== GET OR CREATE BECK SESSION ==========
|
| 198 |
+
beck_data = db.get_beck_session(session_id)
|
| 199 |
+
|
| 200 |
+
if not beck_data:
|
| 201 |
+
# First message - check if distorted
|
|
|
|
|
|
|
|
|
|
| 202 |
classification = classifier.classify(user_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
|
| 204 |
+
if classification["group"] == "G0":
|
| 205 |
+
# No distortion - supportive listening
|
| 206 |
response_text = groq_client.generate_supportive_response(
|
| 207 |
user_message=user_message,
|
| 208 |
+
conversation_history=conversation_history[-6:],
|
| 209 |
user_name=user["name"]
|
| 210 |
)
|
|
|
|
|
|
|
|
|
|
| 211 |
return jsonify({
|
| 212 |
"response": response_text,
|
| 213 |
+
"beck_state": None,
|
| 214 |
+
"is_beck_protocol": False
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
})
|
|
|
|
| 216 |
else:
|
| 217 |
+
# Distortion detected - START BECK PROTOCOL
|
| 218 |
+
db.create_beck_session(session_id)
|
| 219 |
+
db.update_beck_state(session_id, "VALIDATE",
|
| 220 |
+
original_thought=user_message)
|
| 221 |
+
beck_data = db.get_beck_session(session_id)
|
| 222 |
+
|
| 223 |
+
# ========== BECK PROTOCOL STATE MACHINE ==========
|
| 224 |
+
from prompts import BECK_STATES, AGENT1_STATES, AGENT3_STATES, get_next_state, get_field_to_save
|
| 225 |
+
|
| 226 |
+
current_state = beck_data["beck_state"]
|
| 227 |
+
|
| 228 |
+
# Determine which agent handles this state
|
| 229 |
+
if current_state in AGENT1_STATES:
|
| 230 |
+
# Agent 1: Warm Questioner
|
| 231 |
+
response_text = groq_client.agent1_warm_questioner(
|
| 232 |
+
current_state=current_state,
|
| 233 |
+
user_message=user_message,
|
| 234 |
+
beck_data=beck_data,
|
| 235 |
+
user_name=user["name"],
|
| 236 |
+
conversation_history=conversation_history[-6:]
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Save user's response to appropriate field
|
| 240 |
+
field_to_save = get_field_to_save(current_state)
|
| 241 |
+
next_state = get_next_state(current_state)
|
| 242 |
+
|
| 243 |
+
# Extract rating if this is a rating state
|
| 244 |
+
if field_to_save and ('rating' in field_to_save or 'intensity' in field_to_save):
|
| 245 |
+
rating = extract_rating(user_message)
|
| 246 |
+
if rating is not None:
|
| 247 |
+
db.update_beck_state(session_id, next_state, **{field_to_save: rating})
|
| 248 |
+
else:
|
| 249 |
+
db.update_beck_state(session_id, next_state)
|
| 250 |
+
elif field_to_save:
|
| 251 |
+
db.update_beck_state(session_id, next_state, **{field_to_save: user_message})
|
| 252 |
+
else:
|
| 253 |
+
db.update_beck_state(session_id, next_state)
|
| 254 |
+
|
| 255 |
+
elif current_state == "SUMMARIZING":
|
| 256 |
+
# Agent 2: Clinical Summarizer (internal)
|
| 257 |
+
clinical_summary = groq_client.agent2_clinical_summarizer(beck_data)
|
| 258 |
+
|
| 259 |
+
# Move to Agent 3
|
| 260 |
+
db.update_beck_state(session_id, "DELIVER_REFRAME")
|
| 261 |
+
|
| 262 |
+
# Get updated beck_data and call Agent 3
|
| 263 |
+
beck_data = db.get_beck_session(session_id)
|
| 264 |
+
response_text = groq_client.agent3_treatment_agent(
|
| 265 |
+
current_state="DELIVER_REFRAME",
|
| 266 |
+
user_message=user_message,
|
| 267 |
+
beck_data=beck_data,
|
| 268 |
+
clinical_summary=clinical_summary,
|
| 269 |
+
user_name=user["name"],
|
| 270 |
+
conversation_history=conversation_history[-6:]
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
elif current_state in AGENT3_STATES:
|
| 274 |
+
# Agent 3: Treatment Agent
|
| 275 |
+
# Need clinical summary - regenerate if needed
|
| 276 |
+
clinical_summary = groq_client.agent2_clinical_summarizer(beck_data)
|
| 277 |
+
|
| 278 |
+
response_text = groq_client.agent3_treatment_agent(
|
| 279 |
+
current_state=current_state,
|
| 280 |
+
user_message=user_message,
|
| 281 |
+
beck_data=beck_data,
|
| 282 |
+
clinical_summary=clinical_summary,
|
| 283 |
+
user_name=user["name"],
|
| 284 |
+
conversation_history=conversation_history[-6:]
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
# Save response and advance state
|
| 288 |
+
field_to_save = get_field_to_save(current_state)
|
| 289 |
+
next_state = get_next_state(current_state)
|
| 290 |
+
|
| 291 |
+
if field_to_save and ('rating' in field_to_save or 'belief' in field_to_save or 'intensity' in field_to_save):
|
| 292 |
+
rating = extract_rating(user_message)
|
| 293 |
+
if rating is not None:
|
| 294 |
+
db.update_beck_state(session_id, next_state, **{field_to_save: rating})
|
| 295 |
+
else:
|
| 296 |
+
db.update_beck_state(session_id, next_state)
|
| 297 |
+
elif field_to_save:
|
| 298 |
+
db.update_beck_state(session_id, next_state, **{field_to_save: user_message})
|
| 299 |
+
else:
|
| 300 |
+
if next_state:
|
| 301 |
+
db.update_beck_state(session_id, next_state)
|
| 302 |
+
else:
|
| 303 |
+
# Protocol complete
|
| 304 |
+
db.complete_beck_session(session_id)
|
| 305 |
+
|
| 306 |
+
else:
|
| 307 |
+
# Fallback
|
| 308 |
+
response_text = groq_client.generate_supportive_response(
|
| 309 |
+
user_message, conversation_history[-6:], user["name"]
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
# Get updated state for response
|
| 313 |
+
beck_data = db.get_beck_session(session_id)
|
| 314 |
+
|
| 315 |
return jsonify({
|
| 316 |
"response": response_text,
|
| 317 |
+
"beck_state": beck_data["beck_state"] if beck_data else None,
|
| 318 |
+
"is_beck_protocol": beck_data is not None,
|
| 319 |
+
"protocol_complete": beck_data["beck_state"] == "COMPLETE" if beck_data else False,
|
| 320 |
+
"belief_improvement": calculate_improvement(beck_data) if beck_data else None
|
|
|
|
|
|
|
|
|
|
| 321 |
})
|
| 322 |
+
|
| 323 |
except Exception as e:
|
| 324 |
print(f"Error in /api/chat: {str(e)}")
|
| 325 |
import traceback
|
| 326 |
traceback.print_exc()
|
| 327 |
+
return jsonify({"error": "Something went wrong"}), 500
|
| 328 |
|
| 329 |
|
| 330 |
# ==================== EXERCISE ROUTES ====================
|
|
|
|
| 444 |
def handle_natural_exit(session: dict, user: dict, db, session_id: str) -> dict:
|
| 445 |
"""Handle natural conversation exit."""
|
| 446 |
import random
|
| 447 |
+
|
| 448 |
endings = NATURAL_ENDINGS
|
| 449 |
response = random.choice(endings).replace("{name}", user["name"])
|
| 450 |
+
|
| 451 |
+
# Check if Beck protocol was completed
|
| 452 |
+
beck_data = db.get_beck_session(session_id)
|
| 453 |
+
if beck_data and beck_data.get("beck_state") == "COMPLETE":
|
| 454 |
+
# Get improvement stats
|
| 455 |
+
improvement = calculate_improvement(beck_data)
|
| 456 |
+
if improvement:
|
| 457 |
+
improvement_msg = "You made real progress today! "
|
| 458 |
+
if improvement.get('belief_change', 0) >= 10:
|
| 459 |
+
improvement_msg += f"Your belief shifted by {improvement['belief_change']}%. "
|
| 460 |
+
if improvement.get('emotion_change', 0) >= 10:
|
| 461 |
+
improvement_msg += f"Your emotion intensity dropped by {improvement['emotion_change']}%. "
|
| 462 |
+
response = improvement_msg + "\n\n" + response
|
| 463 |
+
|
| 464 |
+
# Update session
|
| 465 |
+
db.update_session(session_id, completed=1)
|
| 466 |
+
|
| 467 |
return {
|
| 468 |
"response": response,
|
| 469 |
+
"session_state": "COMPLETED",
|
|
|
|
|
|
|
| 470 |
"natural_exit": True,
|
| 471 |
+
"session_complete": True
|
|
|
|
| 472 |
}
|
| 473 |
|
| 474 |
|
| 475 |
+
def extract_rating(message: str) -> int:
|
| 476 |
+
"""Extract a 0-100 rating from user message."""
|
| 477 |
+
import re
|
| 478 |
+
# Look for numbers
|
| 479 |
+
numbers = re.findall(r'\d+', message)
|
| 480 |
+
for num in numbers:
|
| 481 |
+
n = int(num)
|
| 482 |
+
if 0 <= n <= 100:
|
| 483 |
+
return n
|
| 484 |
+
return None
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
def calculate_improvement(beck_data: dict) -> dict:
|
| 488 |
+
"""Calculate belief and emotion improvement."""
|
| 489 |
+
if not beck_data:
|
| 490 |
+
return None
|
| 491 |
+
|
| 492 |
+
result = {}
|
| 493 |
+
if beck_data.get('initial_belief_rating') and beck_data.get('final_belief_rating'):
|
| 494 |
+
result['belief_change'] = beck_data['initial_belief_rating'] - beck_data['final_belief_rating']
|
| 495 |
+
if beck_data.get('initial_emotion_intensity') and beck_data.get('final_emotion_intensity'):
|
| 496 |
+
result['emotion_change'] = beck_data['initial_emotion_intensity'] - beck_data['final_emotion_intensity']
|
| 497 |
+
|
| 498 |
+
return result if result else None
|
| 499 |
+
|
| 500 |
+
|
| 501 |
if __name__ == "__main__":
|
| 502 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
database.py
CHANGED
|
@@ -176,6 +176,52 @@ class Database:
|
|
| 176 |
ON device_keys(api_key)
|
| 177 |
""")
|
| 178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
self.conn.commit()
|
| 180 |
|
| 181 |
# ==================== USER OPERATIONS ====================
|
|
@@ -986,6 +1032,107 @@ class Database:
|
|
| 986 |
"G4": counts.get("G4", 0)
|
| 987 |
}
|
| 988 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 989 |
|
| 990 |
# Singleton instance
|
| 991 |
_db_instance = None
|
|
|
|
| 176 |
ON device_keys(api_key)
|
| 177 |
""")
|
| 178 |
|
| 179 |
+
# Beck sessions table for cognitive restructuring protocol
|
| 180 |
+
self.conn.execute("""
|
| 181 |
+
CREATE TABLE IF NOT EXISTS beck_sessions (
|
| 182 |
+
session_id TEXT PRIMARY KEY,
|
| 183 |
+
|
| 184 |
+
-- Current state in the protocol
|
| 185 |
+
beck_state TEXT DEFAULT 'VALIDATE',
|
| 186 |
+
|
| 187 |
+
-- Phase 1: Capture
|
| 188 |
+
original_thought TEXT,
|
| 189 |
+
initial_belief_rating INTEGER,
|
| 190 |
+
emotion TEXT,
|
| 191 |
+
initial_emotion_intensity INTEGER,
|
| 192 |
+
|
| 193 |
+
-- Phase 2: Discovery (6 Questions)
|
| 194 |
+
q1_evidence_for TEXT,
|
| 195 |
+
q1_evidence_against TEXT,
|
| 196 |
+
q2_alternative TEXT,
|
| 197 |
+
q3_worst TEXT,
|
| 198 |
+
q3_best TEXT,
|
| 199 |
+
q3_realistic TEXT,
|
| 200 |
+
q4_effect TEXT,
|
| 201 |
+
q5_friend TEXT,
|
| 202 |
+
q6_action TEXT,
|
| 203 |
+
|
| 204 |
+
-- Phase 3: Reframe
|
| 205 |
+
adaptive_thought TEXT,
|
| 206 |
+
new_thought_belief INTEGER,
|
| 207 |
+
|
| 208 |
+
-- Phase 4: Measure
|
| 209 |
+
final_belief_rating INTEGER,
|
| 210 |
+
final_emotion_intensity INTEGER,
|
| 211 |
+
|
| 212 |
+
-- Phase 5: Action
|
| 213 |
+
action_plan TEXT,
|
| 214 |
+
|
| 215 |
+
-- Metadata
|
| 216 |
+
started_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
| 217 |
+
completed_at TEXT,
|
| 218 |
+
belief_improvement INTEGER,
|
| 219 |
+
emotion_improvement INTEGER,
|
| 220 |
+
|
| 221 |
+
FOREIGN KEY (session_id) REFERENCES sessions(id)
|
| 222 |
+
)
|
| 223 |
+
""")
|
| 224 |
+
|
| 225 |
self.conn.commit()
|
| 226 |
|
| 227 |
# ==================== USER OPERATIONS ====================
|
|
|
|
| 1032 |
"G4": counts.get("G4", 0)
|
| 1033 |
}
|
| 1034 |
|
| 1035 |
+
# ==================== BECK SESSION OPERATIONS ====================
|
| 1036 |
+
|
| 1037 |
+
def create_beck_session(self, session_id: str) -> dict:
|
| 1038 |
+
"""Initialize a Beck session when distortion is detected."""
|
| 1039 |
+
try:
|
| 1040 |
+
self.conn.execute(
|
| 1041 |
+
"""INSERT INTO beck_sessions (session_id, beck_state)
|
| 1042 |
+
VALUES (?, 'VALIDATE')""",
|
| 1043 |
+
(session_id,)
|
| 1044 |
+
)
|
| 1045 |
+
self.conn.commit()
|
| 1046 |
+
return {"session_id": session_id, "beck_state": "VALIDATE"}
|
| 1047 |
+
except Exception as e:
|
| 1048 |
+
print(f"Error creating Beck session: {e}")
|
| 1049 |
+
return None
|
| 1050 |
+
|
| 1051 |
+
def get_beck_session(self, session_id: str) -> dict:
|
| 1052 |
+
"""Get current Beck session state and all data."""
|
| 1053 |
+
result = self.conn.execute(
|
| 1054 |
+
"""SELECT session_id, beck_state, original_thought, initial_belief_rating,
|
| 1055 |
+
emotion, initial_emotion_intensity, q1_evidence_for, q1_evidence_against,
|
| 1056 |
+
q2_alternative, q3_worst, q3_best, q3_realistic, q4_effect, q5_friend,
|
| 1057 |
+
q6_action, adaptive_thought, new_thought_belief, final_belief_rating,
|
| 1058 |
+
final_emotion_intensity, action_plan, started_at, completed_at,
|
| 1059 |
+
belief_improvement, emotion_improvement
|
| 1060 |
+
FROM beck_sessions WHERE session_id = ?""",
|
| 1061 |
+
(session_id,)
|
| 1062 |
+
).fetchone()
|
| 1063 |
+
|
| 1064 |
+
if result:
|
| 1065 |
+
return {
|
| 1066 |
+
"session_id": result[0],
|
| 1067 |
+
"beck_state": result[1],
|
| 1068 |
+
"original_thought": result[2],
|
| 1069 |
+
"initial_belief_rating": result[3],
|
| 1070 |
+
"emotion": result[4],
|
| 1071 |
+
"initial_emotion_intensity": result[5],
|
| 1072 |
+
"q1_evidence_for": result[6],
|
| 1073 |
+
"q1_evidence_against": result[7],
|
| 1074 |
+
"q2_alternative": result[8],
|
| 1075 |
+
"q3_worst": result[9],
|
| 1076 |
+
"q3_best": result[10],
|
| 1077 |
+
"q3_realistic": result[11],
|
| 1078 |
+
"q4_effect": result[12],
|
| 1079 |
+
"q5_friend": result[13],
|
| 1080 |
+
"q6_action": result[14],
|
| 1081 |
+
"adaptive_thought": result[15],
|
| 1082 |
+
"new_thought_belief": result[16],
|
| 1083 |
+
"final_belief_rating": result[17],
|
| 1084 |
+
"final_emotion_intensity": result[18],
|
| 1085 |
+
"action_plan": result[19],
|
| 1086 |
+
"started_at": result[20],
|
| 1087 |
+
"completed_at": result[21],
|
| 1088 |
+
"belief_improvement": result[22],
|
| 1089 |
+
"emotion_improvement": result[23]
|
| 1090 |
+
}
|
| 1091 |
+
return None
|
| 1092 |
+
|
| 1093 |
+
def update_beck_state(self, session_id: str, new_state: str, **fields):
|
| 1094 |
+
"""Update state and save any new field values."""
|
| 1095 |
+
# Start with state update
|
| 1096 |
+
updates = {"beck_state": new_state}
|
| 1097 |
+
updates.update(fields)
|
| 1098 |
+
|
| 1099 |
+
set_clause = ", ".join(f"{k} = ?" for k in updates.keys())
|
| 1100 |
+
values = tuple(list(updates.values()) + [session_id])
|
| 1101 |
+
|
| 1102 |
+
self.conn.execute(
|
| 1103 |
+
f"UPDATE beck_sessions SET {set_clause} WHERE session_id = ?",
|
| 1104 |
+
values
|
| 1105 |
+
)
|
| 1106 |
+
self.conn.commit()
|
| 1107 |
+
|
| 1108 |
+
def complete_beck_session(self, session_id: str):
|
| 1109 |
+
"""Mark session complete, calculate improvements."""
|
| 1110 |
+
# Get current data
|
| 1111 |
+
beck_data = self.get_beck_session(session_id)
|
| 1112 |
+
if not beck_data:
|
| 1113 |
+
return
|
| 1114 |
+
|
| 1115 |
+
# Calculate improvements
|
| 1116 |
+
belief_improvement = None
|
| 1117 |
+
emotion_improvement = None
|
| 1118 |
+
|
| 1119 |
+
if beck_data.get('initial_belief_rating') and beck_data.get('final_belief_rating'):
|
| 1120 |
+
belief_improvement = beck_data['initial_belief_rating'] - beck_data['final_belief_rating']
|
| 1121 |
+
|
| 1122 |
+
if beck_data.get('initial_emotion_intensity') and beck_data.get('final_emotion_intensity'):
|
| 1123 |
+
emotion_improvement = beck_data['initial_emotion_intensity'] - beck_data['final_emotion_intensity']
|
| 1124 |
+
|
| 1125 |
+
# Update completion status
|
| 1126 |
+
self.conn.execute(
|
| 1127 |
+
"""UPDATE beck_sessions SET
|
| 1128 |
+
completed_at = CURRENT_TIMESTAMP,
|
| 1129 |
+
belief_improvement = ?,
|
| 1130 |
+
emotion_improvement = ?
|
| 1131 |
+
WHERE session_id = ?""",
|
| 1132 |
+
(belief_improvement, emotion_improvement, session_id)
|
| 1133 |
+
)
|
| 1134 |
+
self.conn.commit()
|
| 1135 |
+
|
| 1136 |
|
| 1137 |
# Singleton instance
|
| 1138 |
_db_instance = None
|
groq_client.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
"""
|
| 2 |
-
Groq API Client
|
| 3 |
-
Generates therapeutic responses using LLaMA 3
|
| 4 |
"""
|
| 5 |
|
| 6 |
import json
|
|
@@ -9,82 +8,96 @@ from groq import Groq
|
|
| 9 |
|
| 10 |
class GroqClient:
|
| 11 |
"""
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
| 14 |
"""
|
| 15 |
-
|
| 16 |
MODEL = "llama-3.3-70b-versatile"
|
| 17 |
-
|
| 18 |
def __init__(self, api_key: str):
|
| 19 |
if not api_key:
|
| 20 |
raise ValueError("GROQ_API_KEY is required")
|
| 21 |
self.client = Groq(api_key=api_key)
|
| 22 |
print(f"✅ Groq client initialized with model: {self.MODEL}")
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
| 25 |
self,
|
|
|
|
| 26 |
user_message: str,
|
| 27 |
-
|
| 28 |
user_name: str,
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
current_stage: int,
|
| 32 |
-
stage_goal: str,
|
| 33 |
-
stage_instruction: str
|
| 34 |
-
) -> dict:
|
| 35 |
"""
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
Returns:
|
| 39 |
-
|
| 40 |
"""
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
{
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
{history_text}
|
| 84 |
|
| 85 |
{user_name}'s latest message: "{user_message}"
|
| 86 |
|
| 87 |
-
Respond as the CBT companion. Remember:
|
| 88 |
|
| 89 |
try:
|
| 90 |
response = self.client.chat.completions.create(
|
|
@@ -94,45 +107,191 @@ Respond as the CBT companion. Remember: warm, friendly, like a supportive friend
|
|
| 94 |
{"role": "user", "content": user_prompt}
|
| 95 |
],
|
| 96 |
temperature=0.7,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
max_tokens=500,
|
| 98 |
response_format={"type": "json_object"}
|
| 99 |
)
|
| 100 |
-
|
| 101 |
-
result_text = response.choices[0].message.content
|
| 102 |
-
result = json.loads(result_text)
|
| 103 |
-
|
| 104 |
-
return {
|
| 105 |
-
"response": result.get("response", "I hear you. Tell me more."),
|
| 106 |
-
"advance_to_next_stage": result.get("advance_to_next_stage", False)
|
| 107 |
-
}
|
| 108 |
-
|
| 109 |
-
except json.JSONDecodeError:
|
| 110 |
-
# If JSON parsing fails, extract response text
|
| 111 |
-
return {
|
| 112 |
-
"response": response.choices[0].message.content,
|
| 113 |
-
"advance_to_next_stage": False
|
| 114 |
-
}
|
| 115 |
except Exception as e:
|
| 116 |
-
print(f"
|
| 117 |
return {
|
| 118 |
-
"
|
| 119 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
}
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
def generate_supportive_response(
|
| 123 |
self,
|
| 124 |
user_message: str,
|
| 125 |
conversation_history: list,
|
| 126 |
user_name: str
|
| 127 |
) -> str:
|
| 128 |
-
"""
|
| 129 |
-
|
| 130 |
-
Just listen and be supportive, gently ask if anything else is bothering them.
|
| 131 |
-
"""
|
| 132 |
-
|
| 133 |
history_text = self._format_history(conversation_history)
|
| 134 |
-
|
| 135 |
-
system_prompt = f"""You are a warm
|
| 136 |
They're sharing something with you, and right now they don't seem caught in negative thinking.
|
| 137 |
|
| 138 |
YOUR TASK:
|
|
@@ -140,13 +299,13 @@ YOUR TASK:
|
|
| 140 |
2. Gently ask if there's anything else on their mind
|
| 141 |
3. Keep it friendly and brief
|
| 142 |
|
| 143 |
-
|
| 144 |
- Use {user_name}'s name occasionally
|
| 145 |
- Keep responses SHORT (1-2 sentences max)
|
| 146 |
-
- Use 1-2 emojis
|
| 147 |
- Don't lecture - just be a supportive friend"""
|
| 148 |
|
| 149 |
-
user_prompt = f"""CONVERSATION
|
| 150 |
{history_text}
|
| 151 |
|
| 152 |
{user_name}'s latest message: "{user_message}"
|
|
@@ -161,48 +320,23 @@ Respond warmly as their supportive companion."""
|
|
| 161 |
{"role": "user", "content": user_prompt}
|
| 162 |
],
|
| 163 |
temperature=0.7,
|
| 164 |
-
max_tokens=
|
| 165 |
)
|
| 166 |
-
|
| 167 |
return response.choices[0].message.content
|
| 168 |
-
|
| 169 |
except Exception as e:
|
| 170 |
-
print(f"
|
| 171 |
-
return f"I hear you, {user_name}. Thanks for sharing that with me.
|
| 172 |
-
|
|
|
|
|
|
|
| 173 |
def _format_history(self, history: list) -> str:
|
| 174 |
-
"""Format conversation history for
|
| 175 |
if not history:
|
| 176 |
return "(This is the start of the conversation)"
|
| 177 |
-
|
| 178 |
-
formatted = []
|
| 179 |
-
for msg in history:
|
| 180 |
-
role = "User" if msg["role"] == "user" else "Companion"
|
| 181 |
-
formatted.append(f"{role}: {msg['content']}")
|
| 182 |
-
|
| 183 |
-
return "\n".join(formatted)
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
-
|
| 187 |
-
if __name__ == "__main__":
|
| 188 |
-
import os
|
| 189 |
-
|
| 190 |
-
api_key = os.environ.get("GROQ_API_KEY")
|
| 191 |
-
if api_key:
|
| 192 |
-
client = GroqClient(api_key)
|
| 193 |
-
|
| 194 |
-
result = client.generate_therapeutic_response(
|
| 195 |
-
user_message="I failed my exam and I feel like I'll never succeed at anything",
|
| 196 |
-
conversation_history=[],
|
| 197 |
-
user_name="Max",
|
| 198 |
-
user_context="college student",
|
| 199 |
-
detected_group="G2",
|
| 200 |
-
current_stage=1,
|
| 201 |
-
stage_goal="Make underlying assumption explicit",
|
| 202 |
-
stage_instruction="Help Max identify the prediction or assumption he's making. Gently point out the overgeneralization."
|
| 203 |
-
)
|
| 204 |
-
|
| 205 |
-
print("Response:", result["response"])
|
| 206 |
-
print("Advance:", result["advance_to_next_stage"])
|
| 207 |
-
else:
|
| 208 |
-
print("Set GROQ_API_KEY to test")
|
|
|
|
| 1 |
"""
|
| 2 |
+
Groq API Client - 3 Agent System for Beck CBT Protocol
|
|
|
|
| 3 |
"""
|
| 4 |
|
| 5 |
import json
|
|
|
|
| 8 |
|
| 9 |
class GroqClient:
|
| 10 |
"""
|
| 11 |
+
3-Agent system for Beck's Cognitive Restructuring Protocol.
|
| 12 |
+
- Agent 1: Warm Questioner (validation + 6 questions)
|
| 13 |
+
- Agent 2: Clinical Summarizer (internal analysis)
|
| 14 |
+
- Agent 3: Treatment Agent (reframe + measurement + action)
|
| 15 |
"""
|
| 16 |
+
|
| 17 |
MODEL = "llama-3.3-70b-versatile"
|
| 18 |
+
|
| 19 |
def __init__(self, api_key: str):
|
| 20 |
if not api_key:
|
| 21 |
raise ValueError("GROQ_API_KEY is required")
|
| 22 |
self.client = Groq(api_key=api_key)
|
| 23 |
print(f"✅ Groq client initialized with model: {self.MODEL}")
|
| 24 |
+
|
| 25 |
+
# ==================== AGENT 1: WARM QUESTIONER ====================
|
| 26 |
+
|
| 27 |
+
def agent1_warm_questioner(
|
| 28 |
self,
|
| 29 |
+
current_state: str,
|
| 30 |
user_message: str,
|
| 31 |
+
beck_data: dict,
|
| 32 |
user_name: str,
|
| 33 |
+
conversation_history: list = None
|
| 34 |
+
) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
"""
|
| 36 |
+
Agent 1: Asks Beck's questions one at a time in a warm, supportive way.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
current_state: Current Beck protocol state (e.g., "Q1_EVIDENCE_FOR")
|
| 40 |
+
user_message: User's latest response
|
| 41 |
+
beck_data: All collected data so far
|
| 42 |
+
user_name: User's name for personalization
|
| 43 |
+
conversation_history: Recent messages for context
|
| 44 |
+
|
| 45 |
Returns:
|
| 46 |
+
Response string to send to user
|
| 47 |
"""
|
| 48 |
+
from prompts import BECK_STATES
|
| 49 |
+
|
| 50 |
+
state_info = BECK_STATES.get(current_state, {})
|
| 51 |
+
instruction = state_info.get("instruction", "")
|
| 52 |
+
example = state_info.get("example", "")
|
| 53 |
+
|
| 54 |
+
# Build context of what we know so far
|
| 55 |
+
context_parts = []
|
| 56 |
+
if beck_data.get("original_thought"):
|
| 57 |
+
context_parts.append(f"Their thought: \"{beck_data['original_thought']}\"")
|
| 58 |
+
if beck_data.get("initial_belief_rating"):
|
| 59 |
+
context_parts.append(f"Belief rating: {beck_data['initial_belief_rating']}%")
|
| 60 |
+
if beck_data.get("emotion"):
|
| 61 |
+
context_parts.append(f"Emotion: {beck_data['emotion']}")
|
| 62 |
+
if beck_data.get("initial_emotion_intensity"):
|
| 63 |
+
context_parts.append(f"Emotion intensity: {beck_data['initial_emotion_intensity']}%")
|
| 64 |
+
|
| 65 |
+
context_text = "\n".join(context_parts) if context_parts else "Just starting the conversation."
|
| 66 |
+
|
| 67 |
+
history_text = self._format_history(conversation_history) if conversation_history else ""
|
| 68 |
+
|
| 69 |
+
system_prompt = f"""You are a warm, caring CBT companion named Aria talking to {user_name}.
|
| 70 |
+
You are NOT a clinical therapist - you're like a supportive friend who knows CBT techniques.
|
| 71 |
+
|
| 72 |
+
CURRENT STATE: {current_state}
|
| 73 |
+
YOUR TASK: {instruction}
|
| 74 |
+
EXAMPLE RESPONSE: "{example}"
|
| 75 |
+
|
| 76 |
+
WHAT WE KNOW SO FAR:
|
| 77 |
+
{context_text}
|
| 78 |
+
|
| 79 |
+
CRITICAL RULES:
|
| 80 |
+
1. Be WARM and GENTLE - like a caring friend, not a clinician
|
| 81 |
+
2. Keep responses SHORT (1-3 sentences max) - be concise and warm
|
| 82 |
+
3. Ask only ONE thing at a time
|
| 83 |
+
4. Use {user_name}'s name occasionally (not every message)
|
| 84 |
+
5. Use 1-2 emojis to feel warm (💙 🌟 ✨)
|
| 85 |
+
6. VALIDATE before asking - acknowledge what they shared
|
| 86 |
+
7. Don't lecture or be preachy - keep it light
|
| 87 |
+
8. If they give a number (like "80"), acknowledge it warmly before moving on
|
| 88 |
+
9. Don't explain what you're doing - just do it naturally
|
| 89 |
+
|
| 90 |
+
IMPORTANT:
|
| 91 |
+
- If they share something painful, validate it first
|
| 92 |
+
- Match their emotional tone
|
| 93 |
+
- Be conversational, not scripted"""
|
| 94 |
+
|
| 95 |
+
user_prompt = f"""RECENT CONVERSATION:
|
| 96 |
{history_text}
|
| 97 |
|
| 98 |
{user_name}'s latest message: "{user_message}"
|
| 99 |
|
| 100 |
+
Respond as Aria, the warm CBT companion. Remember: short, warm, one question at a time."""
|
| 101 |
|
| 102 |
try:
|
| 103 |
response = self.client.chat.completions.create(
|
|
|
|
| 107 |
{"role": "user", "content": user_prompt}
|
| 108 |
],
|
| 109 |
temperature=0.7,
|
| 110 |
+
max_tokens=300
|
| 111 |
+
)
|
| 112 |
+
return response.choices[0].message.content
|
| 113 |
+
except Exception as e:
|
| 114 |
+
print(f"Agent 1 error: {e}")
|
| 115 |
+
return f"I hear you, {user_name}. 💙 Tell me more about that."
|
| 116 |
+
|
| 117 |
+
# ==================== AGENT 2: CLINICAL SUMMARIZER ====================
|
| 118 |
+
|
| 119 |
+
def agent2_clinical_summarizer(self, beck_data: dict) -> dict:
|
| 120 |
+
"""
|
| 121 |
+
Agent 2: Analyzes all responses and prepares summary for Agent 3.
|
| 122 |
+
This is INTERNAL - user never sees this output.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
beck_data: All collected data from Agent 1
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
dict with analysis for Agent 3
|
| 129 |
+
"""
|
| 130 |
+
system_prompt = """You are a clinical analyzer for a CBT system.
|
| 131 |
+
Analyze the patient's responses and extract key patterns for the treatment agent.
|
| 132 |
+
|
| 133 |
+
Your task:
|
| 134 |
+
1. Identify CONTRADICTIONS (where their evidence contradicts their belief)
|
| 135 |
+
2. Extract their OWN WISDOM (what they'd tell a friend)
|
| 136 |
+
3. Note the COST of their belief (how it hurts them)
|
| 137 |
+
4. Pull their REALISTIC prediction (usually more balanced than their fear)
|
| 138 |
+
5. Suggest elements for a reframe using THEIR OWN WORDS
|
| 139 |
+
|
| 140 |
+
Respond ONLY in JSON format."""
|
| 141 |
+
|
| 142 |
+
user_prompt = f"""Analyze these responses:
|
| 143 |
+
|
| 144 |
+
ORIGINAL THOUGHT: {beck_data.get('original_thought', 'Not captured')}
|
| 145 |
+
INITIAL BELIEF: {beck_data.get('initial_belief_rating', '?')}%
|
| 146 |
+
EMOTION: {beck_data.get('emotion', '?')} at {beck_data.get('initial_emotion_intensity', '?')}%
|
| 147 |
+
|
| 148 |
+
EVIDENCE FOR THE THOUGHT: {beck_data.get('q1_evidence_for', 'Not answered')}
|
| 149 |
+
EVIDENCE AGAINST: {beck_data.get('q1_evidence_against', 'Not answered')}
|
| 150 |
+
ALTERNATIVE EXPLANATION: {beck_data.get('q2_alternative', 'Not answered')}
|
| 151 |
+
WORST CASE: {beck_data.get('q3_worst', 'Not answered')}
|
| 152 |
+
BEST CASE: {beck_data.get('q3_best', 'Not answered')}
|
| 153 |
+
REALISTIC CASE: {beck_data.get('q3_realistic', 'Not answered')}
|
| 154 |
+
EFFECT OF BELIEVING: {beck_data.get('q4_effect', 'Not answered')}
|
| 155 |
+
WHAT THEY'D TELL A FRIEND: {beck_data.get('q5_friend', 'Not answered')}
|
| 156 |
+
WHAT THEY THINK THEY SHOULD DO: {beck_data.get('q6_action', 'Not answered')}
|
| 157 |
+
|
| 158 |
+
Return JSON with:
|
| 159 |
+
{{
|
| 160 |
+
"contradictions": ["list of contradictions between thought and evidence"],
|
| 161 |
+
"patient_wisdom": "what they said they'd tell a friend",
|
| 162 |
+
"cost_of_belief": "how believing this thought hurts them",
|
| 163 |
+
"realistic_prediction": "their realistic outcome prediction",
|
| 164 |
+
"reframe_elements": ["key phrases from their answers to use in reframe"],
|
| 165 |
+
"suggested_balanced_thought": "a balanced thought using their own words"
|
| 166 |
+
}}"""
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
response = self.client.chat.completions.create(
|
| 170 |
+
model=self.MODEL,
|
| 171 |
+
messages=[
|
| 172 |
+
{"role": "system", "content": system_prompt},
|
| 173 |
+
{"role": "user", "content": user_prompt}
|
| 174 |
+
],
|
| 175 |
+
temperature=0.3,
|
| 176 |
max_tokens=500,
|
| 177 |
response_format={"type": "json_object"}
|
| 178 |
)
|
| 179 |
+
return json.loads(response.choices[0].message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
except Exception as e:
|
| 181 |
+
print(f"Agent 2 error: {e}")
|
| 182 |
return {
|
| 183 |
+
"contradictions": [],
|
| 184 |
+
"patient_wisdom": beck_data.get('q5_friend', ''),
|
| 185 |
+
"cost_of_belief": beck_data.get('q4_effect', ''),
|
| 186 |
+
"realistic_prediction": beck_data.get('q3_realistic', ''),
|
| 187 |
+
"reframe_elements": [],
|
| 188 |
+
"suggested_balanced_thought": "Things are hard right now, but one moment doesn't define everything."
|
| 189 |
}
|
| 190 |
+
|
| 191 |
+
# ==================== AGENT 3: TREATMENT AGENT ====================
|
| 192 |
+
|
| 193 |
+
def agent3_treatment_agent(
|
| 194 |
+
self,
|
| 195 |
+
current_state: str,
|
| 196 |
+
user_message: str,
|
| 197 |
+
beck_data: dict,
|
| 198 |
+
clinical_summary: dict,
|
| 199 |
+
user_name: str,
|
| 200 |
+
conversation_history: list = None
|
| 201 |
+
) -> str:
|
| 202 |
+
"""
|
| 203 |
+
Agent 3: Delivers the reframe, measures change, creates action plan.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
current_state: Current state (DELIVER_REFRAME, RATE_NEW_THOUGHT, etc.)
|
| 207 |
+
user_message: User's latest response
|
| 208 |
+
beck_data: All collected data
|
| 209 |
+
clinical_summary: Analysis from Agent 2
|
| 210 |
+
user_name: User's name
|
| 211 |
+
conversation_history: Recent messages
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Response string to send to user
|
| 215 |
+
"""
|
| 216 |
+
from prompts import BECK_STATES
|
| 217 |
+
|
| 218 |
+
state_info = BECK_STATES.get(current_state, {})
|
| 219 |
+
instruction = state_info.get("instruction", "")
|
| 220 |
+
|
| 221 |
+
# Build improvement stats if available
|
| 222 |
+
improvement_text = ""
|
| 223 |
+
if beck_data.get('initial_belief_rating') and beck_data.get('final_belief_rating'):
|
| 224 |
+
belief_change = beck_data['initial_belief_rating'] - beck_data['final_belief_rating']
|
| 225 |
+
improvement_text += f"Belief dropped from {beck_data['initial_belief_rating']}% to {beck_data['final_belief_rating']}% ({belief_change}% improvement). "
|
| 226 |
+
if beck_data.get('initial_emotion_intensity') and beck_data.get('final_emotion_intensity'):
|
| 227 |
+
emotion_change = beck_data['initial_emotion_intensity'] - beck_data['final_emotion_intensity']
|
| 228 |
+
improvement_text += f"Emotion dropped from {beck_data['initial_emotion_intensity']}% to {beck_data['final_emotion_intensity']}% ({emotion_change}% improvement)."
|
| 229 |
+
|
| 230 |
+
history_text = self._format_history(conversation_history) if conversation_history else ""
|
| 231 |
+
|
| 232 |
+
system_prompt = f"""You are Aria, a warm CBT companion helping {user_name} complete cognitive restructuring.
|
| 233 |
+
|
| 234 |
+
CURRENT STATE: {current_state}
|
| 235 |
+
YOUR TASK: {instruction}
|
| 236 |
+
|
| 237 |
+
ORIGINAL THOUGHT: "{beck_data.get('original_thought', '')}"
|
| 238 |
+
INITIAL BELIEF: {beck_data.get('initial_belief_rating', '?')}%
|
| 239 |
+
EMOTION: {beck_data.get('emotion', '?')} at {beck_data.get('initial_emotion_intensity', '?')}%
|
| 240 |
+
|
| 241 |
+
CLINICAL ANALYSIS:
|
| 242 |
+
- Contradictions found: {clinical_summary.get('contradictions', [])}
|
| 243 |
+
- Their own wisdom: "{clinical_summary.get('patient_wisdom', '')}"
|
| 244 |
+
- Cost of belief: "{clinical_summary.get('cost_of_belief', '')}"
|
| 245 |
+
- Realistic prediction: "{clinical_summary.get('realistic_prediction', '')}"
|
| 246 |
+
- Suggested reframe: "{clinical_summary.get('suggested_balanced_thought', '')}"
|
| 247 |
+
|
| 248 |
+
{f"IMPROVEMENT SO FAR: {improvement_text}" if improvement_text else ""}
|
| 249 |
+
|
| 250 |
+
CRITICAL RULES:
|
| 251 |
+
1. Use THEIR OWN WORDS when possible - this makes the reframe feel like theirs
|
| 252 |
+
2. Be warm and celebratory about any progress
|
| 253 |
+
3. Keep responses SHORT but meaningful
|
| 254 |
+
4. If delivering reframe, list what THEY discovered first
|
| 255 |
+
5. For re-ratings, acknowledge the number warmly
|
| 256 |
+
6. 10%+ improvement = SUCCESS - celebrate it!
|
| 257 |
+
7. Use 1-2 emojis (💙 🌟 ✨)
|
| 258 |
+
8. Don't be clinical or lecture-y"""
|
| 259 |
+
|
| 260 |
+
user_prompt = f"""RECENT CONVERSATION:
|
| 261 |
+
{history_text}
|
| 262 |
+
|
| 263 |
+
{user_name}'s latest message: "{user_message}"
|
| 264 |
+
|
| 265 |
+
Respond as Aria for the {current_state} state."""
|
| 266 |
+
|
| 267 |
+
try:
|
| 268 |
+
response = self.client.chat.completions.create(
|
| 269 |
+
model=self.MODEL,
|
| 270 |
+
messages=[
|
| 271 |
+
{"role": "system", "content": system_prompt},
|
| 272 |
+
{"role": "user", "content": user_prompt}
|
| 273 |
+
],
|
| 274 |
+
temperature=0.7,
|
| 275 |
+
max_tokens=400
|
| 276 |
+
)
|
| 277 |
+
return response.choices[0].message.content
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f"Agent 3 error: {e}")
|
| 280 |
+
return f"You've done really good work here, {user_name}. 💙"
|
| 281 |
+
|
| 282 |
+
# ==================== SUPPORTIVE RESPONSE (G0 - No Distortion) ====================
|
| 283 |
+
|
| 284 |
def generate_supportive_response(
|
| 285 |
self,
|
| 286 |
user_message: str,
|
| 287 |
conversation_history: list,
|
| 288 |
user_name: str
|
| 289 |
) -> str:
|
| 290 |
+
"""For G0 cases - just listen supportively, no intervention."""
|
| 291 |
+
|
|
|
|
|
|
|
|
|
|
| 292 |
history_text = self._format_history(conversation_history)
|
| 293 |
+
|
| 294 |
+
system_prompt = f"""You are Aria, a warm companion talking to {user_name}.
|
| 295 |
They're sharing something with you, and right now they don't seem caught in negative thinking.
|
| 296 |
|
| 297 |
YOUR TASK:
|
|
|
|
| 299 |
2. Gently ask if there's anything else on their mind
|
| 300 |
3. Keep it friendly and brief
|
| 301 |
|
| 302 |
+
RULES:
|
| 303 |
- Use {user_name}'s name occasionally
|
| 304 |
- Keep responses SHORT (1-2 sentences max)
|
| 305 |
+
- Use 1-2 emojis (💙 🌟 ✨)
|
| 306 |
- Don't lecture - just be a supportive friend"""
|
| 307 |
|
| 308 |
+
user_prompt = f"""CONVERSATION:
|
| 309 |
{history_text}
|
| 310 |
|
| 311 |
{user_name}'s latest message: "{user_message}"
|
|
|
|
| 320 |
{"role": "user", "content": user_prompt}
|
| 321 |
],
|
| 322 |
temperature=0.7,
|
| 323 |
+
max_tokens=200
|
| 324 |
)
|
|
|
|
| 325 |
return response.choices[0].message.content
|
|
|
|
| 326 |
except Exception as e:
|
| 327 |
+
print(f"Supportive response error: {e}")
|
| 328 |
+
return f"I hear you, {user_name}. Thanks for sharing that with me. 💙"
|
| 329 |
+
|
| 330 |
+
# ==================== HELPER METHODS ====================
|
| 331 |
+
|
| 332 |
def _format_history(self, history: list) -> str:
|
| 333 |
+
"""Format conversation history for prompts."""
|
| 334 |
if not history:
|
| 335 |
return "(This is the start of the conversation)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
|
| 337 |
+
formatted = []
|
| 338 |
+
for msg in history[-6:]: # Last 6 messages
|
| 339 |
+
role = "User" if msg.get("role") == "user" else "Aria"
|
| 340 |
+
formatted.append(f"{role}: {msg.get('content', '')}")
|
| 341 |
|
| 342 |
+
return "\n".join(formatted)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompts.py
CHANGED
|
@@ -1,108 +1,183 @@
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
-
|
| 4 |
"""
|
| 5 |
|
| 6 |
-
#
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
"
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
-
|
|
|
|
|
|
|
| 99 |
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
| 104 |
|
| 105 |
-
# Natural conversation endings
|
| 106 |
NATURAL_ENDINGS = [
|
| 107 |
"Take care of yourself, {name}! I'm always here when you need to talk. 💙",
|
| 108 |
"Glad I could help, {name}! Remember, you're doing better than you think. See you next time! 💙",
|
|
@@ -110,11 +185,3 @@ NATURAL_ENDINGS = [
|
|
| 110 |
"Take it easy, {name}! You've got this. See you soon! 💙",
|
| 111 |
"Catch you later, {name}! Remember, one step at a time. 💙"
|
| 112 |
]
|
| 113 |
-
|
| 114 |
-
# Greeting messages
|
| 115 |
-
GREETINGS = [
|
| 116 |
-
"Hey {name}! 👋 What's on your mind today?",
|
| 117 |
-
"Hi {name}! How are you doing? I'm here to listen.",
|
| 118 |
-
"Hey there, {name}! 💙 What would you like to talk about?",
|
| 119 |
-
"Hi {name}! Good to see you. What's going on?"
|
| 120 |
-
]
|
|
|
|
| 1 |
"""
|
| 2 |
+
Beck CBT Protocol - States and Prompts
|
| 3 |
+
Based on Beck Institute Thought Record Worksheet (J. Beck, 2020)
|
| 4 |
"""
|
| 5 |
|
| 6 |
+
# All possible states in the Beck protocol
|
| 7 |
+
BECK_STATES = {
|
| 8 |
+
# Agent 1: Warm Questioner States
|
| 9 |
+
"VALIDATE": {
|
| 10 |
+
"agent": 1,
|
| 11 |
+
"instruction": "Acknowledge their pain warmly. Don't ask questions yet. Just validate.",
|
| 12 |
+
"example": "That sounds really heavy. Thank you for sharing that with me. 💙",
|
| 13 |
+
"next_state": "RATE_BELIEF",
|
| 14 |
+
"saves_field": None
|
| 15 |
+
},
|
| 16 |
+
"RATE_BELIEF": {
|
| 17 |
+
"agent": 1,
|
| 18 |
+
"instruction": "Ask how much they believe the thought, 0-100%. Be warm about it.",
|
| 19 |
+
"example": "When that thought hits you — how much does it feel true? Like 0 being 'not at all' and 100 being 'completely certain'?",
|
| 20 |
+
"next_state": "CAPTURE_EMOTION",
|
| 21 |
+
"saves_field": "initial_belief_rating"
|
| 22 |
+
},
|
| 23 |
+
"CAPTURE_EMOTION": {
|
| 24 |
+
"agent": 1,
|
| 25 |
+
"instruction": "Ask what emotion comes up with this thought.",
|
| 26 |
+
"example": "And when you think this thought, what emotion shows up? Sadness? Anxiety? Anger? Something else?",
|
| 27 |
+
"next_state": "RATE_EMOTION",
|
| 28 |
+
"saves_field": "emotion"
|
| 29 |
+
},
|
| 30 |
+
"RATE_EMOTION": {
|
| 31 |
+
"agent": 1,
|
| 32 |
+
"instruction": "Ask how intense the emotion is, 0-100%.",
|
| 33 |
+
"example": "How intense is that feeling right now, 0 to 100?",
|
| 34 |
+
"next_state": "Q1_EVIDENCE_FOR",
|
| 35 |
+
"saves_field": "initial_emotion_intensity"
|
| 36 |
+
},
|
| 37 |
+
"Q1_EVIDENCE_FOR": {
|
| 38 |
+
"agent": 1,
|
| 39 |
+
"instruction": "Ask what happened that made them feel this way. Get the evidence FOR the thought.",
|
| 40 |
+
"example": "I'd like to understand better. What happened that made this thought feel so true?",
|
| 41 |
+
"next_state": "Q1_EVIDENCE_AGAINST",
|
| 42 |
+
"saves_field": "q1_evidence_for"
|
| 43 |
+
},
|
| 44 |
+
"Q1_EVIDENCE_AGAINST": {
|
| 45 |
+
"agent": 1,
|
| 46 |
+
"instruction": "Gently ask if there's any evidence against the thought. Any exceptions.",
|
| 47 |
+
"example": "That makes sense. Can I ask — has there been any time, even small, when things went differently? When you didn't fail, or it worked out okay?",
|
| 48 |
+
"next_state": "Q2_ALTERNATIVE",
|
| 49 |
+
"saves_field": "q1_evidence_against"
|
| 50 |
+
},
|
| 51 |
+
"Q2_ALTERNATIVE": {
|
| 52 |
+
"agent": 1,
|
| 53 |
+
"instruction": "Ask if there could be another explanation for what happened.",
|
| 54 |
+
"example": "If your best friend was in this exact situation, what other explanation might you suggest to them?",
|
| 55 |
+
"next_state": "Q3_WORST",
|
| 56 |
+
"saves_field": "q2_alternative"
|
| 57 |
+
},
|
| 58 |
+
"Q3_WORST": {
|
| 59 |
+
"agent": 1,
|
| 60 |
+
"instruction": "Ask what the worst case scenario would be.",
|
| 61 |
+
"example": "Let's imagine together for a moment. What's the worst that could happen here?",
|
| 62 |
+
"next_state": "Q3_BEST",
|
| 63 |
+
"saves_field": "q3_worst"
|
| 64 |
+
},
|
| 65 |
+
"Q3_BEST": {
|
| 66 |
+
"agent": 1,
|
| 67 |
+
"instruction": "Ask what the best case scenario would be.",
|
| 68 |
+
"example": "And what's the best that could happen?",
|
| 69 |
+
"next_state": "Q3_REALISTIC",
|
| 70 |
+
"saves_field": "q3_best"
|
| 71 |
+
},
|
| 72 |
+
"Q3_REALISTIC": {
|
| 73 |
+
"agent": 1,
|
| 74 |
+
"instruction": "Ask what will realistically/probably happen.",
|
| 75 |
+
"example": "And honestly — what do you think will probably happen? The realistic middle ground?",
|
| 76 |
+
"next_state": "Q4_EFFECT",
|
| 77 |
+
"saves_field": "q3_realistic"
|
| 78 |
+
},
|
| 79 |
+
"Q4_EFFECT": {
|
| 80 |
+
"agent": 1,
|
| 81 |
+
"instruction": "Ask how believing this thought affects them. Does it help or hurt?",
|
| 82 |
+
"example": "When you carry this thought around with you, how does it affect you? Does believing it help you in any way, or does it mostly hurt?",
|
| 83 |
+
"next_state": "Q5_FRIEND",
|
| 84 |
+
"saves_field": "q4_effect"
|
| 85 |
+
},
|
| 86 |
+
"Q5_FRIEND": {
|
| 87 |
+
"agent": 1,
|
| 88 |
+
"instruction": "Ask what they would tell a friend who had this exact thought.",
|
| 89 |
+
"example": "Last question in this part. If someone you really loved — a close friend, a sibling — came to you feeling exactly this way, with this exact thought... what would you say to them?",
|
| 90 |
+
"next_state": "Q6_ACTION",
|
| 91 |
+
"saves_field": "q5_friend"
|
| 92 |
+
},
|
| 93 |
+
"Q6_ACTION": {
|
| 94 |
+
"agent": 1,
|
| 95 |
+
"instruction": "Ask what would be good to do about the situation.",
|
| 96 |
+
"example": "And what do you think would be good to do about this situation?",
|
| 97 |
+
"next_state": "SUMMARIZING",
|
| 98 |
+
"saves_field": "q6_action"
|
| 99 |
+
},
|
| 100 |
|
| 101 |
+
# Agent 2: Internal summarization (no user interaction)
|
| 102 |
+
"SUMMARIZING": {
|
| 103 |
+
"agent": 2,
|
| 104 |
+
"instruction": "INTERNAL: Analyze all responses, extract contradictions, prepare for Agent 3.",
|
| 105 |
+
"next_state": "DELIVER_REFRAME",
|
| 106 |
+
"saves_field": None
|
| 107 |
+
},
|
| 108 |
|
| 109 |
+
# Agent 3: Treatment Agent States
|
| 110 |
+
"DELIVER_REFRAME": {
|
| 111 |
+
"agent": 3,
|
| 112 |
+
"instruction": "Summarize what they discovered and present a balanced thought using THEIR words.",
|
| 113 |
+
"example": "I really appreciate you walking through this with me. Here's what I noticed from YOUR words: [list their insights]. So maybe a thought that fits better might be: '[balanced thought]'. How does that land?",
|
| 114 |
+
"next_state": "RATE_NEW_THOUGHT",
|
| 115 |
+
"saves_field": "adaptive_thought"
|
| 116 |
+
},
|
| 117 |
+
"RATE_NEW_THOUGHT": {
|
| 118 |
+
"agent": 3,
|
| 119 |
+
"instruction": "Ask how much they believe the new balanced thought, 0-100%.",
|
| 120 |
+
"example": "How much do you believe that new thought? 0 to 100?",
|
| 121 |
+
"next_state": "RERATE_ORIGINAL",
|
| 122 |
+
"saves_field": "new_thought_belief"
|
| 123 |
+
},
|
| 124 |
+
"RERATE_ORIGINAL": {
|
| 125 |
+
"agent": 3,
|
| 126 |
+
"instruction": "Ask how much they NOW believe the original thought. Should be lower.",
|
| 127 |
+
"example": "Now thinking back to the original thought — '[original]' — how much do you believe that one now?",
|
| 128 |
+
"next_state": "RERATE_EMOTION",
|
| 129 |
+
"saves_field": "final_belief_rating"
|
| 130 |
+
},
|
| 131 |
+
"RERATE_EMOTION": {
|
| 132 |
+
"agent": 3,
|
| 133 |
+
"instruction": "Ask how intense the emotion is now. Should be lower.",
|
| 134 |
+
"example": "And that [emotion] you mentioned — how intense is it now, 0 to 100?",
|
| 135 |
+
"next_state": "ACTION_PLAN",
|
| 136 |
+
"saves_field": "final_emotion_intensity"
|
| 137 |
+
},
|
| 138 |
+
"ACTION_PLAN": {
|
| 139 |
+
"agent": 3,
|
| 140 |
+
"instruction": "Help them create a small behavioral experiment or action plan.",
|
| 141 |
+
"example": "You've made some real shifts today. Is there one small thing you could do this week to test this new perspective? Even something tiny counts.",
|
| 142 |
+
"next_state": "COMPLETE",
|
| 143 |
+
"saves_field": "action_plan"
|
| 144 |
+
},
|
| 145 |
+
"COMPLETE": {
|
| 146 |
+
"agent": 3,
|
| 147 |
+
"instruction": "Celebrate their progress. Summarize the improvement. End warmly.",
|
| 148 |
+
"example": "You started at [X]% and you're at [Y]% now — that's real movement. 💙 You did good work today. Remember, you can always come back when you need to talk.",
|
| 149 |
+
"next_state": None,
|
| 150 |
+
"saves_field": None
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
|
| 154 |
+
# States handled by each agent
|
| 155 |
+
AGENT1_STATES = [
|
| 156 |
+
"VALIDATE", "RATE_BELIEF", "CAPTURE_EMOTION", "RATE_EMOTION",
|
| 157 |
+
"Q1_EVIDENCE_FOR", "Q1_EVIDENCE_AGAINST", "Q2_ALTERNATIVE",
|
| 158 |
+
"Q3_WORST", "Q3_BEST", "Q3_REALISTIC", "Q4_EFFECT", "Q5_FRIEND", "Q6_ACTION"
|
| 159 |
+
]
|
| 160 |
|
| 161 |
+
AGENT3_STATES = [
|
| 162 |
+
"DELIVER_REFRAME", "RATE_NEW_THOUGHT", "RERATE_ORIGINAL",
|
| 163 |
+
"RERATE_EMOTION", "ACTION_PLAN", "COMPLETE"
|
| 164 |
+
]
|
| 165 |
|
| 166 |
+
def get_state_info(state: str) -> dict:
|
| 167 |
+
"""Get info about a Beck state."""
|
| 168 |
+
return BECK_STATES.get(state, {})
|
| 169 |
|
| 170 |
+
def get_next_state(current_state: str) -> str:
|
| 171 |
+
"""Get the next state in the protocol."""
|
| 172 |
+
state_info = BECK_STATES.get(current_state, {})
|
| 173 |
+
return state_info.get("next_state")
|
| 174 |
|
| 175 |
+
def get_field_to_save(state: str) -> str:
|
| 176 |
+
"""Get which database field this state saves to."""
|
| 177 |
+
state_info = BECK_STATES.get(state, {})
|
| 178 |
+
return state_info.get("saves_field")
|
| 179 |
|
| 180 |
+
# Natural conversation endings (kept for compatibility)
|
| 181 |
NATURAL_ENDINGS = [
|
| 182 |
"Take care of yourself, {name}! I'm always here when you need to talk. 💙",
|
| 183 |
"Glad I could help, {name}! Remember, you're doing better than you think. See you next time! 💙",
|
|
|
|
| 185 |
"Take it easy, {name}! You've got this. See you soon! 💙",
|
| 186 |
"Catch you later, {name}! Remember, one step at a time. 💙"
|
| 187 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test_beck_protocol.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Quick test script for Beck Protocol implementation
|
| 3 |
+
Run this to verify the 3-agent system works
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from groq_client import GroqClient
|
| 8 |
+
from classifier import DistortionClassifier
|
| 9 |
+
|
| 10 |
+
# Test the components
|
| 11 |
+
def test_classifier():
|
| 12 |
+
"""Test that classifier still works for binary detection"""
|
| 13 |
+
print("=" * 60)
|
| 14 |
+
print("TEST 1: CLASSIFIER (Binary Detection)")
|
| 15 |
+
print("=" * 60)
|
| 16 |
+
|
| 17 |
+
classifier = DistortionClassifier()
|
| 18 |
+
|
| 19 |
+
test_cases = [
|
| 20 |
+
("I had a nice day today", "G0"),
|
| 21 |
+
("I'm such a failure, I always mess up", "G1 or G2"),
|
| 22 |
+
("Nobody likes me, everyone thinks I'm weird", "G2 or G3"),
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
for text, expected in test_cases:
|
| 26 |
+
result = classifier.classify(text)
|
| 27 |
+
print(f"\nText: {text[:50]}...")
|
| 28 |
+
print(f" Predicted: {result['group']} ({result['confidence']:.2%})")
|
| 29 |
+
print(f" Expected: {expected}")
|
| 30 |
+
|
| 31 |
+
print("\n✅ Classifier test complete\n")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_agent1():
|
| 35 |
+
"""Test Agent 1 - Warm Questioner"""
|
| 36 |
+
print("=" * 60)
|
| 37 |
+
print("TEST 2: AGENT 1 - Warm Questioner")
|
| 38 |
+
print("=" * 60)
|
| 39 |
+
|
| 40 |
+
api_key = os.environ.get("GROQ_API_KEY")
|
| 41 |
+
if not api_key:
|
| 42 |
+
print("❌ GROQ_API_KEY not set - skipping LLM tests")
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
client = GroqClient(api_key)
|
| 46 |
+
|
| 47 |
+
# Simulate VALIDATE state
|
| 48 |
+
beck_data = {
|
| 49 |
+
"original_thought": "I'm a complete failure",
|
| 50 |
+
"beck_state": "VALIDATE"
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
response = client.agent1_warm_questioner(
|
| 54 |
+
current_state="VALIDATE",
|
| 55 |
+
user_message="I'm a complete failure",
|
| 56 |
+
beck_data=beck_data,
|
| 57 |
+
user_name="TestUser",
|
| 58 |
+
conversation_history=[]
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
print(f"\nAgent 1 response (VALIDATE state):")
|
| 62 |
+
print(f" {response}")
|
| 63 |
+
|
| 64 |
+
# Test Q1_EVIDENCE_FOR
|
| 65 |
+
beck_data["beck_state"] = "Q1_EVIDENCE_FOR"
|
| 66 |
+
beck_data["initial_belief_rating"] = 85
|
| 67 |
+
beck_data["emotion"] = "sadness"
|
| 68 |
+
beck_data["initial_emotion_intensity"] = 75
|
| 69 |
+
|
| 70 |
+
response = client.agent1_warm_questioner(
|
| 71 |
+
current_state="Q1_EVIDENCE_FOR",
|
| 72 |
+
user_message="85",
|
| 73 |
+
beck_data=beck_data,
|
| 74 |
+
user_name="TestUser",
|
| 75 |
+
conversation_history=[
|
| 76 |
+
{"role": "user", "content": "I'm a complete failure"},
|
| 77 |
+
{"role": "assistant", "content": "That sounds really heavy..."}
|
| 78 |
+
]
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
print(f"\nAgent 1 response (Q1_EVIDENCE_FOR state):")
|
| 82 |
+
print(f" {response}")
|
| 83 |
+
|
| 84 |
+
print("\n✅ Agent 1 test complete\n")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_agent2():
|
| 88 |
+
"""Test Agent 2 - Clinical Summarizer"""
|
| 89 |
+
print("=" * 60)
|
| 90 |
+
print("TEST 3: AGENT 2 - Clinical Summarizer")
|
| 91 |
+
print("=" * 60)
|
| 92 |
+
|
| 93 |
+
api_key = os.environ.get("GROQ_API_KEY")
|
| 94 |
+
if not api_key:
|
| 95 |
+
print("❌ GROQ_API_KEY not set - skipping LLM tests")
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
client = GroqClient(api_key)
|
| 99 |
+
|
| 100 |
+
# Simulate collected Beck data
|
| 101 |
+
beck_data = {
|
| 102 |
+
"original_thought": "I always fail at everything",
|
| 103 |
+
"initial_belief_rating": 85,
|
| 104 |
+
"emotion": "sadness",
|
| 105 |
+
"initial_emotion_intensity": 70,
|
| 106 |
+
"q1_evidence_for": "I failed my exam yesterday",
|
| 107 |
+
"q1_evidence_against": "Well, I did get a good grade on my report last week",
|
| 108 |
+
"q2_alternative": "Maybe I was just tired and didn't prepare enough",
|
| 109 |
+
"q3_worst": "I'll fail out of school",
|
| 110 |
+
"q3_best": "I'll do better next time",
|
| 111 |
+
"q3_realistic": "I'll probably pass the class even if I don't ace everything",
|
| 112 |
+
"q4_effect": "It makes me feel awful and want to give up",
|
| 113 |
+
"q5_friend": "I'd tell them one test doesn't define them",
|
| 114 |
+
"q6_action": "Maybe study more next time"
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
summary = client.agent2_clinical_summarizer(beck_data)
|
| 118 |
+
|
| 119 |
+
print("\nAgent 2 Clinical Summary:")
|
| 120 |
+
print(f" Contradictions: {summary.get('contradictions', [])}")
|
| 121 |
+
print(f" Patient Wisdom: {summary.get('patient_wisdom', 'N/A')}")
|
| 122 |
+
print(f" Cost of Belief: {summary.get('cost_of_belief', 'N/A')}")
|
| 123 |
+
print(f" Realistic Prediction: {summary.get('realistic_prediction', 'N/A')}")
|
| 124 |
+
print(f" Suggested Reframe: {summary.get('suggested_balanced_thought', 'N/A')}")
|
| 125 |
+
|
| 126 |
+
print("\n✅ Agent 2 test complete\n")
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_agent3():
|
| 130 |
+
"""Test Agent 3 - Treatment Agent"""
|
| 131 |
+
print("=" * 60)
|
| 132 |
+
print("TEST 4: AGENT 3 - Treatment Agent")
|
| 133 |
+
print("=" * 60)
|
| 134 |
+
|
| 135 |
+
api_key = os.environ.get("GROQ_API_KEY")
|
| 136 |
+
if not api_key:
|
| 137 |
+
print("❌ GROQ_API_KEY not set - skipping LLM tests")
|
| 138 |
+
return
|
| 139 |
+
|
| 140 |
+
client = GroqClient(api_key)
|
| 141 |
+
|
| 142 |
+
beck_data = {
|
| 143 |
+
"original_thought": "I always fail at everything",
|
| 144 |
+
"initial_belief_rating": 85,
|
| 145 |
+
"emotion": "sadness",
|
| 146 |
+
"initial_emotion_intensity": 70,
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
clinical_summary = {
|
| 150 |
+
"contradictions": ["Said 'always fails' but got good grade last week"],
|
| 151 |
+
"patient_wisdom": "One test doesn't define them",
|
| 152 |
+
"cost_of_belief": "Makes them feel awful and want to give up",
|
| 153 |
+
"realistic_prediction": "Will probably pass the class",
|
| 154 |
+
"suggested_balanced_thought": "I failed this test, but I've succeeded before and can improve"
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
response = client.agent3_treatment_agent(
|
| 158 |
+
current_state="DELIVER_REFRAME",
|
| 159 |
+
user_message="I'm ready to hear it",
|
| 160 |
+
beck_data=beck_data,
|
| 161 |
+
clinical_summary=clinical_summary,
|
| 162 |
+
user_name="TestUser",
|
| 163 |
+
conversation_history=[]
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
print(f"\nAgent 3 response (DELIVER_REFRAME):")
|
| 167 |
+
print(f" {response}")
|
| 168 |
+
|
| 169 |
+
print("\n✅ Agent 3 test complete\n")
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def test_state_flow():
|
| 173 |
+
"""Test state machine flow"""
|
| 174 |
+
print("=" * 60)
|
| 175 |
+
print("TEST 5: STATE MACHINE FLOW")
|
| 176 |
+
print("=" * 60)
|
| 177 |
+
|
| 178 |
+
from prompts import BECK_STATES, get_next_state, get_field_to_save
|
| 179 |
+
|
| 180 |
+
states_to_test = [
|
| 181 |
+
"VALIDATE",
|
| 182 |
+
"RATE_BELIEF",
|
| 183 |
+
"CAPTURE_EMOTION",
|
| 184 |
+
"Q1_EVIDENCE_FOR",
|
| 185 |
+
"SUMMARIZING",
|
| 186 |
+
"DELIVER_REFRAME",
|
| 187 |
+
"COMPLETE"
|
| 188 |
+
]
|
| 189 |
+
|
| 190 |
+
for state in states_to_test:
|
| 191 |
+
next_state = get_next_state(state)
|
| 192 |
+
field = get_field_to_save(state)
|
| 193 |
+
print(f"\n{state}")
|
| 194 |
+
print(f" → Next: {next_state}")
|
| 195 |
+
print(f" → Saves to: {field}")
|
| 196 |
+
|
| 197 |
+
print("\n✅ State flow test complete\n")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def test_rating_extraction():
|
| 201 |
+
"""Test rating extraction from messages"""
|
| 202 |
+
print("=" * 60)
|
| 203 |
+
print("TEST 6: RATING EXTRACTION")
|
| 204 |
+
print("=" * 60)
|
| 205 |
+
|
| 206 |
+
import re
|
| 207 |
+
|
| 208 |
+
def extract_rating(message: str) -> int:
|
| 209 |
+
numbers = re.findall(r'\d+', message)
|
| 210 |
+
for num in numbers:
|
| 211 |
+
n = int(num)
|
| 212 |
+
if 0 <= n <= 100:
|
| 213 |
+
return n
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
test_cases = [
|
| 217 |
+
("85", 85),
|
| 218 |
+
("I'd say about 70", 70),
|
| 219 |
+
("100%", 100),
|
| 220 |
+
("Maybe like... 50?", 50),
|
| 221 |
+
("Not sure, somewhere around 0", 0),
|
| 222 |
+
("I don't know", None),
|
| 223 |
+
]
|
| 224 |
+
|
| 225 |
+
for text, expected in test_cases:
|
| 226 |
+
result = extract_rating(text)
|
| 227 |
+
status = "✅" if result == expected else "❌"
|
| 228 |
+
print(f"{status} '{text}' → {result} (expected {expected})")
|
| 229 |
+
|
| 230 |
+
print("\n✅ Rating extraction test complete\n")
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
if __name__ == "__main__":
|
| 234 |
+
print("\n" + "=" * 60)
|
| 235 |
+
print("BECK PROTOCOL IMPLEMENTATION - TEST SUITE")
|
| 236 |
+
print("=" * 60 + "\n")
|
| 237 |
+
|
| 238 |
+
# Run tests
|
| 239 |
+
test_classifier()
|
| 240 |
+
test_agent1()
|
| 241 |
+
test_agent2()
|
| 242 |
+
test_agent3()
|
| 243 |
+
test_state_flow()
|
| 244 |
+
test_rating_extraction()
|
| 245 |
+
|
| 246 |
+
print("\n" + "=" * 60)
|
| 247 |
+
print("ALL TESTS COMPLETE")
|
| 248 |
+
print("=" * 60)
|
| 249 |
+
print("\nNext steps:")
|
| 250 |
+
print("1. Start the Flask server: python app.py")
|
| 251 |
+
print("2. Test via API with a real session")
|
| 252 |
+
print("3. Monitor database for beck_sessions table")
|
| 253 |
+
print("\n")
|