Spaces:
Runtime error
Runtime error
final
#1
by
othdu
- opened
- .dockerignore +0 -63
- .env +0 -26
- .gitattributes +35 -35
- .gitignore +0 -1
- Dockerfile +0 -49
- README.md +10 -10
- app.py +0 -249
- chatbot.py +0 -930
- conversation_flow.py +0 -467
- guidelines.txt +0 -107
- hf_spaces.py +0 -38
- requirements.txt +0 -28
- start.sh +0 -7
.dockerignore
DELETED
|
@@ -1,63 +0,0 @@
|
|
| 1 |
-
# Git
|
| 2 |
-
.git
|
| 3 |
-
.gitignore
|
| 4 |
-
|
| 5 |
-
# Python
|
| 6 |
-
__pycache__/
|
| 7 |
-
*.py[cod]
|
| 8 |
-
*$py.class
|
| 9 |
-
*.so
|
| 10 |
-
.Python
|
| 11 |
-
env/
|
| 12 |
-
build/
|
| 13 |
-
develop-eggs/
|
| 14 |
-
dist/
|
| 15 |
-
downloads/
|
| 16 |
-
eggs/
|
| 17 |
-
.eggs/
|
| 18 |
-
lib/
|
| 19 |
-
lib64/
|
| 20 |
-
parts/
|
| 21 |
-
sdist/
|
| 22 |
-
var/
|
| 23 |
-
*.egg-info/
|
| 24 |
-
.installed.cfg
|
| 25 |
-
*.egg
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
# IDE
|
| 29 |
-
.idea/
|
| 30 |
-
.vscode/
|
| 31 |
-
*.swp
|
| 32 |
-
*.swo
|
| 33 |
-
|
| 34 |
-
# Logs
|
| 35 |
-
*.log
|
| 36 |
-
logs/
|
| 37 |
-
|
| 38 |
-
# Local development
|
| 39 |
-
.env
|
| 40 |
-
.env.local
|
| 41 |
-
.env.development
|
| 42 |
-
.env.test
|
| 43 |
-
.env.production
|
| 44 |
-
|
| 45 |
-
# Test files
|
| 46 |
-
tests/
|
| 47 |
-
test_*.py
|
| 48 |
-
|
| 49 |
-
# Documentation
|
| 50 |
-
docs/
|
| 51 |
-
*.md
|
| 52 |
-
!README.md
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
.cache/
|
| 56 |
-
.pytest_cache/
|
| 57 |
-
.mypy_cache/
|
| 58 |
-
|
| 59 |
-
# Session data
|
| 60 |
-
session_data/
|
| 61 |
-
session_summaries/
|
| 62 |
-
vector_db/
|
| 63 |
-
models/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.env
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
# Model Configuration
|
| 2 |
-
MODEL_NAME=meta-llama/Llama-3.2-3B-Instruct
|
| 3 |
-
PEFT_MODEL_PATH=llama_fine_tuned
|
| 4 |
-
GUIDELINES_PATH=guidelines.txt
|
| 5 |
-
|
| 6 |
-
# API Configuration
|
| 7 |
-
API_HOST=0.0.0.0
|
| 8 |
-
API_PORT=8080
|
| 9 |
-
DEBUG=False
|
| 10 |
-
|
| 11 |
-
ALLOWED_ORIGINS=http://localhost:8000
|
| 12 |
-
|
| 13 |
-
# Logging
|
| 14 |
-
LOG_LEVEL=INFO
|
| 15 |
-
LOG_FILE=mental_health_chatbot.log
|
| 16 |
-
|
| 17 |
-
# Additional Configuration
|
| 18 |
-
MAX_SESSION_DURATION=45 # in minutes
|
| 19 |
-
MAX_MESSAGES_PER_SESSION=100000
|
| 20 |
-
SESSION_TIMEOUT=44 # in minutes
|
| 21 |
-
EMOTION_THRESHOLD=0.3 # minimum confidence for emotion detection
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
PORT= 8000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
offload/
|
|
|
|
|
|
Dockerfile
DELETED
|
@@ -1,49 +0,0 @@
|
|
| 1 |
-
# Use Python 3.9 slim image
|
| 2 |
-
FROM python:3.9-slim
|
| 3 |
-
|
| 4 |
-
# Install system dependencies
|
| 5 |
-
RUN apt-get update && apt-get install -y \
|
| 6 |
-
build-essential \
|
| 7 |
-
&& rm -rf /var/lib/apt/lists/*
|
| 8 |
-
|
| 9 |
-
# Create necessary directories and set permissions
|
| 10 |
-
RUN mkdir -p /tmp/huggingface && \
|
| 11 |
-
chmod -R 777 /tmp/huggingface
|
| 12 |
-
|
| 13 |
-
# Create a non-root user
|
| 14 |
-
RUN useradd -m -s /bin/bash user && \
|
| 15 |
-
chown -R user:user /tmp/huggingface
|
| 16 |
-
|
| 17 |
-
USER user
|
| 18 |
-
ENV HOME=/home/user \
|
| 19 |
-
PATH=/home/user/.local/bin:$PATH
|
| 20 |
-
|
| 21 |
-
# Set working directory
|
| 22 |
-
WORKDIR $HOME/app
|
| 23 |
-
|
| 24 |
-
# Create app directories
|
| 25 |
-
RUN mkdir -p $HOME/app/session_data $HOME/app/session_summaries $HOME/app/vector_db $HOME/app/models
|
| 26 |
-
|
| 27 |
-
# Copy requirements first for better caching
|
| 28 |
-
COPY --chown=user:user requirements.txt .
|
| 29 |
-
RUN pip install --user --no-cache-dir -r requirements.txt
|
| 30 |
-
|
| 31 |
-
# Copy the rest of the application
|
| 32 |
-
COPY --chown=user:user . .
|
| 33 |
-
|
| 34 |
-
# Make start.sh executable
|
| 35 |
-
RUN chmod +x start.sh
|
| 36 |
-
|
| 37 |
-
# Set environment variables
|
| 38 |
-
ENV PORT=7860
|
| 39 |
-
ENV TRANSFORMERS_CACHE=/tmp/huggingface
|
| 40 |
-
ENV HF_HOME=/tmp/huggingface
|
| 41 |
-
ENV TOKENIZERS_PARALLELISM=false
|
| 42 |
-
ENV TRANSFORMERS_VERBOSITY=error
|
| 43 |
-
ENV BITSANDBYTES_NOWELCOME=1
|
| 44 |
-
|
| 45 |
-
# Expose the port
|
| 46 |
-
EXPOSE 7860
|
| 47 |
-
|
| 48 |
-
# Run the application using start.sh
|
| 49 |
-
CMD ["./start.sh"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Chat
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
---
|
| 9 |
-
|
| 10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Chat
|
| 3 |
+
emoji: 🦀
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
DELETED
|
@@ -1,249 +0,0 @@
|
|
| 1 |
-
from fastapi import FastAPI, WebSocket, HTTPException
|
| 2 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
-
from fastapi.responses import FileResponse
|
| 4 |
-
from pydantic import BaseModel
|
| 5 |
-
from typing import Optional, List, Dict, Any
|
| 6 |
-
import os
|
| 7 |
-
import logging
|
| 8 |
-
from dotenv import load_dotenv
|
| 9 |
-
from chatbot import MentalHealthChatbot
|
| 10 |
-
from datetime import datetime
|
| 11 |
-
import json
|
| 12 |
-
import uvicorn
|
| 13 |
-
import torch
|
| 14 |
-
|
| 15 |
-
# Configure logging
|
| 16 |
-
logging.basicConfig(
|
| 17 |
-
level=logging.INFO,
|
| 18 |
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 19 |
-
)
|
| 20 |
-
logger = logging.getLogger(__name__)
|
| 21 |
-
|
| 22 |
-
# Load environment variables
|
| 23 |
-
load_dotenv()
|
| 24 |
-
|
| 25 |
-
# Initialize FastAPI app
|
| 26 |
-
app = FastAPI(
|
| 27 |
-
title="Mental Health Chatbot",
|
| 28 |
-
description="mental health support chatbot",
|
| 29 |
-
version="1.0.0"
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
# Add CORS middleware - allow all origins for Hugging Face Spaces
|
| 33 |
-
app.add_middleware(
|
| 34 |
-
CORSMiddleware,
|
| 35 |
-
allow_origins=["*"], # Allows all origins
|
| 36 |
-
allow_credentials=True,
|
| 37 |
-
allow_methods=["*"], # Allows all methods
|
| 38 |
-
allow_headers=["*"], # Allows all headers
|
| 39 |
-
)
|
| 40 |
-
|
| 41 |
-
# Initialize chatbot with Hugging Face Spaces specific settings
|
| 42 |
-
chatbot = MentalHealthChatbot(
|
| 43 |
-
model_name="meta-llama/Llama-3.2-3B-Instruct",
|
| 44 |
-
peft_model_path="nada013/mental-health-chatbot",
|
| 45 |
-
use_4bit=True, # Enable 4-bit quantization for GPU
|
| 46 |
-
device="cuda" if torch.cuda.is_available() else "cpu", # Use GPU if available
|
| 47 |
-
therapy_guidelines_path="guidelines.txt"
|
| 48 |
-
)
|
| 49 |
-
|
| 50 |
-
# Add GPU memory logging
|
| 51 |
-
if torch.cuda.is_available():
|
| 52 |
-
logger.info(f"GPU Device: {torch.cuda.get_device_name(0)}")
|
| 53 |
-
logger.info(f"Available GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
|
| 54 |
-
|
| 55 |
-
# pydantic models
|
| 56 |
-
class MessageRequest(BaseModel):
|
| 57 |
-
user_id: str
|
| 58 |
-
message: str
|
| 59 |
-
|
| 60 |
-
class MessageResponse(BaseModel):
|
| 61 |
-
response: str
|
| 62 |
-
session_id: str
|
| 63 |
-
|
| 64 |
-
class SessionSummary(BaseModel):
|
| 65 |
-
session_id: str
|
| 66 |
-
user_id: str
|
| 67 |
-
start_time: str
|
| 68 |
-
end_time: str
|
| 69 |
-
duration_minutes: float
|
| 70 |
-
current_phase: str
|
| 71 |
-
primary_emotions: List[str]
|
| 72 |
-
emotion_progression: List[str]
|
| 73 |
-
summary: str
|
| 74 |
-
recommendations: List[str]
|
| 75 |
-
session_characteristics: Dict[str, Any]
|
| 76 |
-
|
| 77 |
-
class UserReply(BaseModel):
|
| 78 |
-
text: str
|
| 79 |
-
timestamp: str
|
| 80 |
-
session_id: str
|
| 81 |
-
|
| 82 |
-
class Message(BaseModel):
|
| 83 |
-
text: str
|
| 84 |
-
role: str = "user"
|
| 85 |
-
|
| 86 |
-
# API endpoints
|
| 87 |
-
@app.get("/")
|
| 88 |
-
async def root():
|
| 89 |
-
"""Root endpoint with API information."""
|
| 90 |
-
return {
|
| 91 |
-
"name": "Mental Health Chatbot API",
|
| 92 |
-
"version": "1.0.0",
|
| 93 |
-
"description": "API for mental health support chatbot",
|
| 94 |
-
"endpoints": {
|
| 95 |
-
"POST /start_session": "Start a new chat session",
|
| 96 |
-
"POST /send_message": "Send a message to the chatbot",
|
| 97 |
-
"POST /end_session": "End the current session",
|
| 98 |
-
"GET /health": "Health check endpoint",
|
| 99 |
-
"GET /docs": "API documentation (Swagger UI)",
|
| 100 |
-
"GET /redoc": "API documentation (ReDoc)",
|
| 101 |
-
"GET /ws": "WebSocket endpoint"
|
| 102 |
-
}
|
| 103 |
-
}
|
| 104 |
-
|
| 105 |
-
@app.post("/start_session", response_model=MessageResponse)
|
| 106 |
-
async def start_session(user_id: str):
|
| 107 |
-
try:
|
| 108 |
-
session_id, initial_message = chatbot.start_session(user_id)
|
| 109 |
-
return MessageResponse(response=initial_message, session_id=session_id)
|
| 110 |
-
except Exception as e:
|
| 111 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 112 |
-
|
| 113 |
-
@app.post("/send_message", response_model=MessageResponse)
|
| 114 |
-
async def send_message(request: MessageRequest):
|
| 115 |
-
try:
|
| 116 |
-
# Check if user has an active session
|
| 117 |
-
if request.user_id not in chatbot.conversations or not chatbot.conversations[request.user_id].is_active:
|
| 118 |
-
# Start a new session if none exists
|
| 119 |
-
session_id, _ = chatbot.start_session(request.user_id)
|
| 120 |
-
logger.info(f"Started new session {session_id} for user {request.user_id} during message send")
|
| 121 |
-
|
| 122 |
-
# Process the message
|
| 123 |
-
response = chatbot.process_message(request.user_id, request.message)
|
| 124 |
-
session = chatbot.conversations[request.user_id]
|
| 125 |
-
return MessageResponse(response=response, session_id=session.session_id)
|
| 126 |
-
except Exception as e:
|
| 127 |
-
logger.error(f"Error processing message for user {request.user_id}: {str(e)}")
|
| 128 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 129 |
-
|
| 130 |
-
@app.post("/end_session", response_model=SessionSummary)
|
| 131 |
-
async def end_session(user_id: str):
|
| 132 |
-
try:
|
| 133 |
-
summary = chatbot.end_session(user_id)
|
| 134 |
-
if not summary:
|
| 135 |
-
raise HTTPException(status_code=404, detail="No active session found")
|
| 136 |
-
return summary
|
| 137 |
-
except Exception as e:
|
| 138 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 139 |
-
|
| 140 |
-
@app.get("/health")
|
| 141 |
-
async def health_check():
|
| 142 |
-
return {"status": "healthy"}
|
| 143 |
-
|
| 144 |
-
@app.get("/session_summary/{session_id}", response_model=SessionSummary)
|
| 145 |
-
async def get_session_summary(
|
| 146 |
-
session_id: str,
|
| 147 |
-
include_summary: bool = True,
|
| 148 |
-
include_recommendations: bool = True,
|
| 149 |
-
include_emotions: bool = True,
|
| 150 |
-
include_characteristics: bool = True,
|
| 151 |
-
include_duration: bool = True,
|
| 152 |
-
include_phase: bool = True
|
| 153 |
-
):
|
| 154 |
-
try:
|
| 155 |
-
summary = chatbot.get_session_summary(session_id)
|
| 156 |
-
if not summary:
|
| 157 |
-
raise HTTPException(status_code=404, detail="Session summary not found")
|
| 158 |
-
|
| 159 |
-
filtered_summary = {
|
| 160 |
-
"session_id": summary["session_id"],
|
| 161 |
-
"user_id": summary["user_id"],
|
| 162 |
-
"start_time": summary["start_time"],
|
| 163 |
-
"end_time": summary["end_time"],
|
| 164 |
-
"duration_minutes": summary.get("duration_minutes", 0.0),
|
| 165 |
-
"current_phase": summary.get("current_phase", "unknown"),
|
| 166 |
-
"primary_emotions": summary.get("primary_emotions", []),
|
| 167 |
-
"emotion_progression": summary.get("emotion_progression", []),
|
| 168 |
-
"summary": summary.get("summary", ""),
|
| 169 |
-
"recommendations": summary.get("recommendations", []),
|
| 170 |
-
"session_characteristics": summary.get("session_characteristics", {})
|
| 171 |
-
}
|
| 172 |
-
|
| 173 |
-
# Filter out fields based on include parameters
|
| 174 |
-
if not include_summary:
|
| 175 |
-
filtered_summary["summary"] = ""
|
| 176 |
-
if not include_recommendations:
|
| 177 |
-
filtered_summary["recommendations"] = []
|
| 178 |
-
if not include_emotions:
|
| 179 |
-
filtered_summary["primary_emotions"] = []
|
| 180 |
-
filtered_summary["emotion_progression"] = []
|
| 181 |
-
if not include_characteristics:
|
| 182 |
-
filtered_summary["session_characteristics"] = {}
|
| 183 |
-
if not include_duration:
|
| 184 |
-
filtered_summary["duration_minutes"] = 0.0
|
| 185 |
-
if not include_phase:
|
| 186 |
-
filtered_summary["current_phase"] = "unknown"
|
| 187 |
-
|
| 188 |
-
return filtered_summary
|
| 189 |
-
except Exception as e:
|
| 190 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 191 |
-
|
| 192 |
-
@app.get("/user_replies/{user_id}")
|
| 193 |
-
async def get_user_replies(user_id: str):
|
| 194 |
-
try:
|
| 195 |
-
replies = chatbot.get_user_replies(user_id)
|
| 196 |
-
|
| 197 |
-
# Create a filename with user_id and timestamp
|
| 198 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 199 |
-
filename = f"user_replies_{user_id}_{timestamp}.json"
|
| 200 |
-
filepath = os.path.join("user_replies", filename)
|
| 201 |
-
|
| 202 |
-
# Ensure directory exists
|
| 203 |
-
os.makedirs("user_replies", exist_ok=True)
|
| 204 |
-
|
| 205 |
-
# Write replies to JSON file
|
| 206 |
-
with open(filepath, 'w') as f:
|
| 207 |
-
json.dump({
|
| 208 |
-
"user_id": user_id,
|
| 209 |
-
"timestamp": datetime.now().isoformat(),
|
| 210 |
-
"replies": replies
|
| 211 |
-
}, f, indent=2)
|
| 212 |
-
|
| 213 |
-
# Return the file
|
| 214 |
-
return FileResponse(
|
| 215 |
-
path=filepath,
|
| 216 |
-
filename=filename,
|
| 217 |
-
media_type="application/json"
|
| 218 |
-
)
|
| 219 |
-
except Exception as e:
|
| 220 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 221 |
-
|
| 222 |
-
@app.websocket("/ws")
|
| 223 |
-
async def websocket_endpoint(websocket: WebSocket):
|
| 224 |
-
await websocket.accept()
|
| 225 |
-
try:
|
| 226 |
-
while True:
|
| 227 |
-
data = await websocket.receive_json()
|
| 228 |
-
user_id = data.get("user_id")
|
| 229 |
-
message = data.get("message")
|
| 230 |
-
|
| 231 |
-
if not user_id or not message:
|
| 232 |
-
await websocket.send_json({"error": "Missing user_id or message"})
|
| 233 |
-
continue
|
| 234 |
-
|
| 235 |
-
response = chatbot.process_message(user_id, message)
|
| 236 |
-
session_id = chatbot.conversations[user_id].session_id
|
| 237 |
-
|
| 238 |
-
await websocket.send_json({
|
| 239 |
-
"response": response,
|
| 240 |
-
"session_id": session_id
|
| 241 |
-
})
|
| 242 |
-
except Exception as e:
|
| 243 |
-
await websocket.send_json({"error": str(e)})
|
| 244 |
-
finally:
|
| 245 |
-
await websocket.close()
|
| 246 |
-
|
| 247 |
-
if __name__ == "__main__":
|
| 248 |
-
port = int(os.getenv("PORT", 7860))
|
| 249 |
-
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chatbot.py
DELETED
|
@@ -1,930 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import logging
|
| 3 |
-
import json
|
| 4 |
-
import torch
|
| 5 |
-
import re
|
| 6 |
-
from typing import List, Dict, Any, Optional, Union
|
| 7 |
-
from datetime import datetime
|
| 8 |
-
from pydantic import BaseModel, Field
|
| 9 |
-
import tempfile
|
| 10 |
-
|
| 11 |
-
# Model imports
|
| 12 |
-
from transformers import (
|
| 13 |
-
pipeline,
|
| 14 |
-
AutoTokenizer,
|
| 15 |
-
AutoModelForCausalLM,
|
| 16 |
-
BitsAndBytesConfig
|
| 17 |
-
)
|
| 18 |
-
from peft import PeftModel, PeftConfig
|
| 19 |
-
from sentence_transformers import SentenceTransformer
|
| 20 |
-
|
| 21 |
-
# LangChain imports
|
| 22 |
-
# Core LangChain components for building conversational AI
|
| 23 |
-
from langchain.llms import HuggingFacePipeline # Wrapper for HuggingFace models
|
| 24 |
-
from langchain.chains import LLMChain # Chain for LLM interactions
|
| 25 |
-
from langchain.memory import ConversationBufferMemory # Memory for conversation history
|
| 26 |
-
from langchain.prompts import PromptTemplate # Template for structured prompts
|
| 27 |
-
from langchain.embeddings import HuggingFaceEmbeddings # Text embeddings for similarity search
|
| 28 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter # Document chunking
|
| 29 |
-
from langchain.document_loaders import TextLoader # Load text documents
|
| 30 |
-
from langchain.vectorstores import FAISS # Vector database for similarity search
|
| 31 |
-
|
| 32 |
-
# Import FlowManager
|
| 33 |
-
from conversation_flow import FlowManager
|
| 34 |
-
|
| 35 |
-
# Configure logging
|
| 36 |
-
logging.basicConfig(
|
| 37 |
-
level=logging.INFO,
|
| 38 |
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 39 |
-
handlers=[logging.StreamHandler()]
|
| 40 |
-
)
|
| 41 |
-
logger = logging.getLogger(__name__)
|
| 42 |
-
|
| 43 |
-
# Suppress warnings
|
| 44 |
-
import warnings
|
| 45 |
-
warnings.filterwarnings('ignore', category=UserWarning)
|
| 46 |
-
|
| 47 |
-
# Set up cache directories
|
| 48 |
-
def setup_cache_dirs():
|
| 49 |
-
# Check if running in Hugging Face Spaces
|
| 50 |
-
is_spaces = os.environ.get('SPACE_ID') is not None
|
| 51 |
-
|
| 52 |
-
if is_spaces:
|
| 53 |
-
# Use /tmp for Hugging Face Spaces with proper permissions
|
| 54 |
-
cache_dir = '/tmp/huggingface'
|
| 55 |
-
os.environ.update({
|
| 56 |
-
'TRANSFORMERS_CACHE': cache_dir,
|
| 57 |
-
'HF_HOME': cache_dir,
|
| 58 |
-
'TOKENIZERS_PARALLELISM': 'false',
|
| 59 |
-
'TRANSFORMERS_VERBOSITY': 'error',
|
| 60 |
-
'BITSANDBYTES_NOWELCOME': '1',
|
| 61 |
-
'HF_DATASETS_CACHE': cache_dir,
|
| 62 |
-
'HF_METRICS_CACHE': cache_dir,
|
| 63 |
-
'HF_MODULES_CACHE': cache_dir,
|
| 64 |
-
'HUGGING_FACE_HUB_TOKEN': os.environ.get('HF_TOKEN', ''),
|
| 65 |
-
'HF_TOKEN': os.environ.get('HF_TOKEN', '')
|
| 66 |
-
})
|
| 67 |
-
else:
|
| 68 |
-
# Use default cache for local development
|
| 69 |
-
cache_dir = os.path.expanduser('~/.cache/huggingface')
|
| 70 |
-
os.environ.update({
|
| 71 |
-
'TOKENIZERS_PARALLELISM': 'false',
|
| 72 |
-
'TRANSFORMERS_VERBOSITY': 'error',
|
| 73 |
-
'BITSANDBYTES_NOWELCOME': '1'
|
| 74 |
-
})
|
| 75 |
-
|
| 76 |
-
# Create cache directory if it doesn't exist
|
| 77 |
-
os.makedirs(cache_dir, exist_ok=True)
|
| 78 |
-
|
| 79 |
-
return cache_dir
|
| 80 |
-
|
| 81 |
-
# Set up cache directories
|
| 82 |
-
CACHE_DIR = setup_cache_dirs()
|
| 83 |
-
|
| 84 |
-
# Define base directory and paths
|
| 85 |
-
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
|
| 86 |
-
MODELS_DIR = os.path.join(BASE_DIR, "models")
|
| 87 |
-
VECTOR_DB_PATH = os.path.join(BASE_DIR, "vector_db")
|
| 88 |
-
SESSION_DATA_PATH = os.path.join(BASE_DIR, "session_data")
|
| 89 |
-
SUMMARIES_DIR = os.path.join(BASE_DIR, "session_summaries")
|
| 90 |
-
|
| 91 |
-
# Create necessary directories
|
| 92 |
-
for directory in [MODELS_DIR, VECTOR_DB_PATH, SESSION_DATA_PATH, SUMMARIES_DIR]:
|
| 93 |
-
os.makedirs(directory, exist_ok=True)
|
| 94 |
-
|
| 95 |
-
# Pydantic models
|
| 96 |
-
class Message(BaseModel):
|
| 97 |
-
text: str = Field(..., description="The content of the message")
|
| 98 |
-
timestamp: str = Field(None, description="ISO format timestamp of the message")
|
| 99 |
-
role: str = Field("user", description="The role of the message sender (user or assistant)")
|
| 100 |
-
|
| 101 |
-
class SessionSummary(BaseModel):
|
| 102 |
-
session_id: str = Field(
|
| 103 |
-
...,
|
| 104 |
-
description="Unique identifier for the session",
|
| 105 |
-
examples=["user_789_session_20240314"]
|
| 106 |
-
)
|
| 107 |
-
user_id: str = Field(
|
| 108 |
-
...,
|
| 109 |
-
description="Identifier of the user",
|
| 110 |
-
examples=["user_123"])
|
| 111 |
-
start_time: str = Field(..., description="ISO format start time of the session"
|
| 112 |
-
)
|
| 113 |
-
end_time: str = Field(
|
| 114 |
-
...,
|
| 115 |
-
description="ISO format end time of the session"
|
| 116 |
-
)
|
| 117 |
-
message_count: int = Field(
|
| 118 |
-
...,
|
| 119 |
-
description="Total number of messages in the session"
|
| 120 |
-
)
|
| 121 |
-
duration_minutes: float = Field(
|
| 122 |
-
...,
|
| 123 |
-
description="Duration of the session in minutes"
|
| 124 |
-
)
|
| 125 |
-
primary_emotions: List[str] = Field(
|
| 126 |
-
...,
|
| 127 |
-
min_items=1,
|
| 128 |
-
description="List of primary emotions detected",
|
| 129 |
-
examples=[
|
| 130 |
-
["anxiety", "stress"],
|
| 131 |
-
["joy", "excitement"],
|
| 132 |
-
["sadness", "loneliness"]
|
| 133 |
-
]
|
| 134 |
-
)
|
| 135 |
-
emotion_progression: List[Dict[str, float]] = Field(
|
| 136 |
-
...,
|
| 137 |
-
description="Progression of emotions throughout the session",
|
| 138 |
-
examples=[
|
| 139 |
-
[
|
| 140 |
-
{"anxiety": 0.8, "stress": 0.6},
|
| 141 |
-
{"calm": 0.7, "anxiety": 0.3},
|
| 142 |
-
{"joy": 0.9, "calm": 0.8}
|
| 143 |
-
]
|
| 144 |
-
]
|
| 145 |
-
)
|
| 146 |
-
summary_text: str = Field(
|
| 147 |
-
...,
|
| 148 |
-
description="Text summary of the session",
|
| 149 |
-
examples=[
|
| 150 |
-
"The session focused on managing work-related stress and developing coping strategies. The client showed improvement in recognizing stress triggers and implementing relaxation techniques.",
|
| 151 |
-
"Discussion centered around relationship challenges and self-esteem issues. The client expressed willingness to try new communication strategies."
|
| 152 |
-
]
|
| 153 |
-
)
|
| 154 |
-
recommendations: Optional[List[str]] = Field(
|
| 155 |
-
None,
|
| 156 |
-
description="Optional recommendations based on the session"
|
| 157 |
-
)
|
| 158 |
-
|
| 159 |
-
class Conversation(BaseModel):
|
| 160 |
-
user_id: str = Field(
|
| 161 |
-
...,
|
| 162 |
-
description="Identifier of the user",
|
| 163 |
-
examples=["user_123"]
|
| 164 |
-
)
|
| 165 |
-
session_id: str = Field(
|
| 166 |
-
"",
|
| 167 |
-
description="Identifier of the current session"
|
| 168 |
-
)
|
| 169 |
-
start_time: str = Field(
|
| 170 |
-
"",
|
| 171 |
-
description="ISO format start time of the conversation"
|
| 172 |
-
)
|
| 173 |
-
messages: List[Message] = Field(
|
| 174 |
-
[],
|
| 175 |
-
description="List of messages in the conversation",
|
| 176 |
-
examples=[
|
| 177 |
-
[
|
| 178 |
-
Message(text="I'm feeling anxious", role="user"),
|
| 179 |
-
Message(text="I understand you're feeling anxious. Can you tell me more about what's causing this?", role="assistant")
|
| 180 |
-
]
|
| 181 |
-
]
|
| 182 |
-
)
|
| 183 |
-
emotion_history: List[Dict[str, float]] = Field(
|
| 184 |
-
[],
|
| 185 |
-
description="History of emotions detected",
|
| 186 |
-
examples=[
|
| 187 |
-
[
|
| 188 |
-
{"anxiety": 0.8, "stress": 0.6},
|
| 189 |
-
{"calm": 0.7, "anxiety": 0.3}
|
| 190 |
-
]
|
| 191 |
-
]
|
| 192 |
-
)
|
| 193 |
-
context: Dict[str, Any] = Field(
|
| 194 |
-
{},
|
| 195 |
-
description="Additional context for the conversation",
|
| 196 |
-
examples=[
|
| 197 |
-
{
|
| 198 |
-
"last_emotion": "anxiety",
|
| 199 |
-
"conversation_topic": "work stress",
|
| 200 |
-
"previous_sessions": 3
|
| 201 |
-
}
|
| 202 |
-
]
|
| 203 |
-
)
|
| 204 |
-
is_active: bool = Field(
|
| 205 |
-
True,
|
| 206 |
-
description="Whether the conversation is currently active",
|
| 207 |
-
examples=[True, False]
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
class MentalHealthChatbot:
|
| 211 |
-
def __init__(
|
| 212 |
-
self,
|
| 213 |
-
model_name: str = "meta-llama/Llama-3.2-3B-Instruct",
|
| 214 |
-
peft_model_path: str = "nada013/mental-health-chatbot",
|
| 215 |
-
therapy_guidelines_path: str = None,
|
| 216 |
-
use_4bit: bool = True,
|
| 217 |
-
device: str = None
|
| 218 |
-
):
|
| 219 |
-
# Set device (cuda if available, otherwise cpu)
|
| 220 |
-
if device is None:
|
| 221 |
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 222 |
-
else:
|
| 223 |
-
self.device = device
|
| 224 |
-
|
| 225 |
-
# Set memory optimization for T4
|
| 226 |
-
if self.device == "cuda":
|
| 227 |
-
torch.cuda.empty_cache() # Clear GPU cache
|
| 228 |
-
# Set smaller batch size for T4
|
| 229 |
-
self.batch_size = 4
|
| 230 |
-
# Enable memory efficient attention
|
| 231 |
-
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
|
| 232 |
-
else:
|
| 233 |
-
self.batch_size = 8
|
| 234 |
-
|
| 235 |
-
logger.info(f"Using device: {self.device}")
|
| 236 |
-
|
| 237 |
-
# Initialize models
|
| 238 |
-
self.peft_model_path = peft_model_path
|
| 239 |
-
|
| 240 |
-
# Initialize emotion detection model
|
| 241 |
-
logger.info("Loading emotion detection model")
|
| 242 |
-
self.emotion_classifier = self._load_emotion_model()
|
| 243 |
-
|
| 244 |
-
# Initialize LLAMA model
|
| 245 |
-
logger.info(f"Loading LLAMA model: {model_name}")
|
| 246 |
-
self.llama_model, self.llama_tokenizer, self.llm = self._initialize_llm(model_name, use_4bit)
|
| 247 |
-
|
| 248 |
-
# Initialize summary model
|
| 249 |
-
logger.info("Loading summary model")
|
| 250 |
-
self.summary_model = pipeline(
|
| 251 |
-
"summarization",
|
| 252 |
-
model="philschmid/bart-large-cnn-samsum",
|
| 253 |
-
device=0 if self.device == "cuda" else -1,
|
| 254 |
-
model_kwargs={
|
| 255 |
-
"cache_dir": CACHE_DIR,
|
| 256 |
-
"torch_dtype": torch.float16,
|
| 257 |
-
"max_memory": {0: "2GB"} if self.device == "cuda" else None
|
| 258 |
-
}
|
| 259 |
-
)
|
| 260 |
-
logger.info("Summary model loaded successfully")
|
| 261 |
-
|
| 262 |
-
# Initialize FlowManager
|
| 263 |
-
logger.info("Initializing FlowManager")
|
| 264 |
-
self.flow_manager = FlowManager(self.llm)
|
| 265 |
-
|
| 266 |
-
# Setup conversation memory with LangChain
|
| 267 |
-
# ConversationBufferMemory stores the conversation history in a buffer
|
| 268 |
-
# This allows the chatbot to maintain context across multiple interactions
|
| 269 |
-
# - return_messages=True: Returns messages as a list of message objects
|
| 270 |
-
# - input_key="input": Specifies which key to use for the input in the memory
|
| 271 |
-
self.memory = ConversationBufferMemory(
|
| 272 |
-
return_messages=True,
|
| 273 |
-
input_key="input"
|
| 274 |
-
)
|
| 275 |
-
|
| 276 |
-
# Create conversation prompt template
|
| 277 |
-
# PromptTemplate defines the structure for generating responses
|
| 278 |
-
# It includes placeholders for dynamic content that gets filled during generation
|
| 279 |
-
# Input variables:
|
| 280 |
-
# - history: Previous conversation context from memory
|
| 281 |
-
# - input: Current user message
|
| 282 |
-
# - past_context: Relevant past conversations from vector search
|
| 283 |
-
# - emotion_context: Detected emotions and their context
|
| 284 |
-
# - guidelines: Relevant therapeutic guidelines from vector search
|
| 285 |
-
self.prompt_template = PromptTemplate(
|
| 286 |
-
input_variables=["history", "input", "past_context", "emotion_context", "guidelines"],
|
| 287 |
-
template="""You are a supportive and empathetic mental health conversational AI. Your role is to provide therapeutic support while maintaining professional boundaries.
|
| 288 |
-
|
| 289 |
-
Previous conversation:
|
| 290 |
-
{history}
|
| 291 |
-
|
| 292 |
-
EMOTIONAL CONTEXT:
|
| 293 |
-
{emotion_context}
|
| 294 |
-
|
| 295 |
-
Past context: {past_context}
|
| 296 |
-
|
| 297 |
-
Relevant therapeutic guidelines:
|
| 298 |
-
{guidelines}
|
| 299 |
-
|
| 300 |
-
Current message: {input}
|
| 301 |
-
|
| 302 |
-
Provide a supportive response that:
|
| 303 |
-
1. Validates the user's feelings without using casual greetings
|
| 304 |
-
2. Asks relevant follow-up questions
|
| 305 |
-
3. Maintains a conversational tone , professional and empathetic tone
|
| 306 |
-
4. Focuses on understanding and support
|
| 307 |
-
5. Avoids repeating previous responses
|
| 308 |
-
|
| 309 |
-
Response:"""
|
| 310 |
-
)
|
| 311 |
-
|
| 312 |
-
# Create the conversation chain
|
| 313 |
-
self.conversation = LLMChain(
|
| 314 |
-
llm=self.llm,
|
| 315 |
-
prompt=self.prompt_template,
|
| 316 |
-
memory=self.memory,
|
| 317 |
-
verbose=False
|
| 318 |
-
)
|
| 319 |
-
|
| 320 |
-
# Setup embeddings for vector search
|
| 321 |
-
self.embeddings = HuggingFaceEmbeddings(
|
| 322 |
-
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
| 323 |
-
)
|
| 324 |
-
|
| 325 |
-
# Setup vector database for retrieving relevant past conversations
|
| 326 |
-
|
| 327 |
-
if therapy_guidelines_path and os.path.exists(therapy_guidelines_path):
|
| 328 |
-
self.setup_vector_db(therapy_guidelines_path)
|
| 329 |
-
else:
|
| 330 |
-
self.setup_vector_db(None)
|
| 331 |
-
|
| 332 |
-
# Initialize conversation storage
|
| 333 |
-
self.conversations = {}
|
| 334 |
-
|
| 335 |
-
# Load existing session summaries
|
| 336 |
-
self.session_summaries = {}
|
| 337 |
-
self._load_existing_summaries()
|
| 338 |
-
|
| 339 |
-
logger.info("All models and components initialized successfully")
|
| 340 |
-
|
| 341 |
-
def _load_emotion_model(self):
|
| 342 |
-
try:
|
| 343 |
-
# Load emotion model directly from Hugging Face
|
| 344 |
-
return pipeline(
|
| 345 |
-
"text-classification",
|
| 346 |
-
model="SamLowe/roberta-base-go_emotions",
|
| 347 |
-
top_k=None,
|
| 348 |
-
device_map="auto" if torch.cuda.is_available() else None,
|
| 349 |
-
model_kwargs={
|
| 350 |
-
"cache_dir": CACHE_DIR,
|
| 351 |
-
"torch_dtype": torch.float16, # Use float16
|
| 352 |
-
"max_memory": {0: "2GB"} if torch.cuda.is_available() else None # Limit memory usage
|
| 353 |
-
},
|
| 354 |
-
)
|
| 355 |
-
except Exception as e:
|
| 356 |
-
logger.error(f"Error loading emotion model: {e}")
|
| 357 |
-
# Fallback to a simpler model
|
| 358 |
-
try:
|
| 359 |
-
return pipeline(
|
| 360 |
-
"text-classification",
|
| 361 |
-
model="j-hartmann/emotion-english-distilroberta-base",
|
| 362 |
-
return_all_scores=True,
|
| 363 |
-
device_map="auto" if torch.cuda.is_available() else None,
|
| 364 |
-
model_kwargs={
|
| 365 |
-
"cache_dir": CACHE_DIR,
|
| 366 |
-
"torch_dtype": torch.float16,
|
| 367 |
-
"max_memory": {0: "2GB"} if torch.cuda.is_available() else None
|
| 368 |
-
},
|
| 369 |
-
)
|
| 370 |
-
except Exception as e:
|
| 371 |
-
logger.error(f"Error loading fallback emotion model: {e}")
|
| 372 |
-
# Return a simple pipeline that always returns neutral
|
| 373 |
-
return lambda text: [{"label": "neutral", "score": 1.0}]
|
| 374 |
-
|
| 375 |
-
def _initialize_llm(self, model_name: str, use_4bit: bool):
|
| 376 |
-
try:
|
| 377 |
-
# Configure quantization only if CUDA is available
|
| 378 |
-
if use_4bit and torch.cuda.is_available():
|
| 379 |
-
quantization_config = BitsAndBytesConfig(
|
| 380 |
-
load_in_4bit=True,
|
| 381 |
-
bnb_4bit_compute_dtype=torch.float16,
|
| 382 |
-
bnb_4bit_quant_type="nf4",
|
| 383 |
-
bnb_4bit_use_double_quant=True,
|
| 384 |
-
)
|
| 385 |
-
|
| 386 |
-
# Set max memory for T4 GPU
|
| 387 |
-
max_memory = {0: "14GB"} # Leave 2GB buffer for other operations
|
| 388 |
-
else:
|
| 389 |
-
quantization_config = None
|
| 390 |
-
max_memory = None
|
| 391 |
-
logger.info("CUDA not available, running without quantization")
|
| 392 |
-
|
| 393 |
-
# Load base model
|
| 394 |
-
logger.info(f"Loading base model: {model_name}")
|
| 395 |
-
base_model = AutoModelForCausalLM.from_pretrained(
|
| 396 |
-
model_name,
|
| 397 |
-
quantization_config=quantization_config,
|
| 398 |
-
device_map="auto",
|
| 399 |
-
max_memory=max_memory,
|
| 400 |
-
trust_remote_code=True,
|
| 401 |
-
cache_dir=CACHE_DIR,
|
| 402 |
-
use_auth_token=os.environ.get('HF_TOKEN'),
|
| 403 |
-
torch_dtype=torch.float16 # Use float16 for better memory efficiency
|
| 404 |
-
)
|
| 405 |
-
|
| 406 |
-
# Load tokenizer
|
| 407 |
-
logger.info("Loading tokenizer")
|
| 408 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 409 |
-
model_name,
|
| 410 |
-
cache_dir=CACHE_DIR,
|
| 411 |
-
use_auth_token=os.environ.get('HF_TOKEN') # Add auth token for gated models
|
| 412 |
-
)
|
| 413 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 414 |
-
|
| 415 |
-
# Load PEFT model
|
| 416 |
-
logger.info(f"Loading PEFT model from {self.peft_model_path}")
|
| 417 |
-
model = PeftModel.from_pretrained(
|
| 418 |
-
base_model,
|
| 419 |
-
self.peft_model_path,
|
| 420 |
-
cache_dir=CACHE_DIR,
|
| 421 |
-
use_auth_token=os.environ.get('HF_TOKEN') # Add auth token for gated models
|
| 422 |
-
)
|
| 423 |
-
logger.info("Successfully loaded PEFT model")
|
| 424 |
-
|
| 425 |
-
# Create text generation pipeline
|
| 426 |
-
text_generator = pipeline(
|
| 427 |
-
"text-generation",
|
| 428 |
-
model=model,
|
| 429 |
-
tokenizer=tokenizer,
|
| 430 |
-
max_new_tokens=512,
|
| 431 |
-
temperature=0.7,
|
| 432 |
-
top_p=0.95,
|
| 433 |
-
repetition_penalty=1.1,
|
| 434 |
-
do_sample=True,
|
| 435 |
-
device_map="auto" if torch.cuda.is_available() else None
|
| 436 |
-
)
|
| 437 |
-
|
| 438 |
-
# Create LangChain wrapper
|
| 439 |
-
llm = HuggingFacePipeline(pipeline=text_generator)
|
| 440 |
-
|
| 441 |
-
return model, tokenizer, llm
|
| 442 |
-
|
| 443 |
-
except Exception as e:
|
| 444 |
-
logger.error(f"Error initializing LLM: {str(e)}")
|
| 445 |
-
raise
|
| 446 |
-
|
| 447 |
-
def setup_vector_db(self, guidelines_path: str = None):
|
| 448 |
-
|
| 449 |
-
logger.info("Setting up FAISS vector database")
|
| 450 |
-
|
| 451 |
-
# Check if vector DB exists
|
| 452 |
-
vector_db_exists = os.path.exists(os.path.join(VECTOR_DB_PATH, "index.faiss"))
|
| 453 |
-
|
| 454 |
-
if not vector_db_exists:
|
| 455 |
-
# Load therapy guidelines
|
| 456 |
-
if guidelines_path and os.path.exists(guidelines_path):
|
| 457 |
-
loader = TextLoader(guidelines_path)
|
| 458 |
-
documents = loader.load()
|
| 459 |
-
|
| 460 |
-
# Split documents into chunks with better overlap for context
|
| 461 |
-
text_splitter = RecursiveCharacterTextSplitter(
|
| 462 |
-
chunk_size=500, # Smaller chunks for more precise retrieval
|
| 463 |
-
chunk_overlap=100,
|
| 464 |
-
separators=["\n\n", "\n", " ", ""]
|
| 465 |
-
)
|
| 466 |
-
chunks = text_splitter.split_documents(documents)
|
| 467 |
-
|
| 468 |
-
# Create and save the vector store
|
| 469 |
-
self.vector_db = FAISS.from_documents(chunks, self.embeddings)
|
| 470 |
-
self.vector_db.save_local(VECTOR_DB_PATH)
|
| 471 |
-
logger.info("Successfully loaded and indexed therapy guidelines")
|
| 472 |
-
else:
|
| 473 |
-
# Initialize with empty vector DB
|
| 474 |
-
self.vector_db = FAISS.from_texts(["Initial empty vector store"], self.embeddings)
|
| 475 |
-
self.vector_db.save_local(VECTOR_DB_PATH)
|
| 476 |
-
logger.warning("No guidelines file provided, using empty vector store")
|
| 477 |
-
else:
|
| 478 |
-
# Load existing vector DB
|
| 479 |
-
self.vector_db = FAISS.load_local(VECTOR_DB_PATH, self.embeddings, allow_dangerous_deserialization=True)
|
| 480 |
-
logger.info("Loaded existing vector database")
|
| 481 |
-
|
| 482 |
-
def _load_existing_summaries(self):
|
| 483 |
-
if not os.path.exists(SUMMARIES_DIR):
|
| 484 |
-
return
|
| 485 |
-
|
| 486 |
-
for filename in os.listdir(SUMMARIES_DIR):
|
| 487 |
-
if filename.endswith('.json'):
|
| 488 |
-
try:
|
| 489 |
-
with open(os.path.join(SUMMARIES_DIR, filename), 'r') as f:
|
| 490 |
-
summary_data = json.load(f)
|
| 491 |
-
session_id = summary_data.get('session_id')
|
| 492 |
-
if session_id:
|
| 493 |
-
self.session_summaries[session_id] = summary_data
|
| 494 |
-
except Exception as e:
|
| 495 |
-
logger.warning(f"Failed to load summary from {filename}: {e}")
|
| 496 |
-
|
| 497 |
-
def detect_emotion(self, text: str) -> Dict[str, float]:
|
| 498 |
-
try:
|
| 499 |
-
results = self.emotion_classifier(text)[0]
|
| 500 |
-
return {result['label']: result['score'] for result in results}
|
| 501 |
-
except Exception as e:
|
| 502 |
-
logger.error(f"Error detecting emotions: {e}")
|
| 503 |
-
return {"neutral": 1.0}
|
| 504 |
-
|
| 505 |
-
def retrieve_relevant_context(self, query: str, k: int = 3) -> str:
|
| 506 |
-
# Retrieve relevant past conversations using vector similarity
|
| 507 |
-
if not hasattr(self, 'vector_db'):
|
| 508 |
-
return ""
|
| 509 |
-
|
| 510 |
-
try:
|
| 511 |
-
# Retrieve similar documents from vector DB
|
| 512 |
-
docs = self.vector_db.similarity_search(query, k=k)
|
| 513 |
-
|
| 514 |
-
# Combine the content of retrieved documents
|
| 515 |
-
relevant_context = "\n".join([doc.page_content for doc in docs])
|
| 516 |
-
return relevant_context
|
| 517 |
-
except Exception as e:
|
| 518 |
-
logger.error(f"Error retrieving context: {e}")
|
| 519 |
-
return ""
|
| 520 |
-
|
| 521 |
-
def retrieve_relevant_guidelines(self, query: str, emotion_context: str) -> str:
|
| 522 |
-
if not hasattr(self, 'vector_db'):
|
| 523 |
-
return ""
|
| 524 |
-
|
| 525 |
-
try:
|
| 526 |
-
# Combine query and emotion context for better relevance
|
| 527 |
-
search_query = f"{query} {emotion_context}"
|
| 528 |
-
|
| 529 |
-
# Retrieve similar documents from vector DB
|
| 530 |
-
docs = self.vector_db.similarity_search(search_query, k=2)
|
| 531 |
-
|
| 532 |
-
# Combine the content of retrieved documents
|
| 533 |
-
relevant_guidelines = "\n".join([doc.page_content for doc in docs])
|
| 534 |
-
return relevant_guidelines
|
| 535 |
-
except Exception as e:
|
| 536 |
-
logger.error(f"Error retrieving guidelines: {e}")
|
| 537 |
-
return ""
|
| 538 |
-
|
| 539 |
-
def generate_response(self, prompt: str, emotion_data: Dict[str, float], conversation_history: List[Dict]) -> str:
|
| 540 |
-
|
| 541 |
-
# Get primary and secondary emotions
|
| 542 |
-
sorted_emotions = sorted(emotion_data.items(), key=lambda x: x[1], reverse=True)
|
| 543 |
-
primary_emotion = sorted_emotions[0][0] if sorted_emotions else "neutral"
|
| 544 |
-
|
| 545 |
-
# Get secondary emotions (if any)
|
| 546 |
-
secondary_emotions = []
|
| 547 |
-
for emotion, score in sorted_emotions[1:3]: # Get 2nd and 3rd strongest emotions
|
| 548 |
-
if score > 0.2: # Only include if reasonably strong
|
| 549 |
-
secondary_emotions.append(emotion)
|
| 550 |
-
|
| 551 |
-
# Create emotion context string
|
| 552 |
-
emotion_context = f"User is primarily feeling {primary_emotion}"
|
| 553 |
-
if secondary_emotions:
|
| 554 |
-
emotion_context += f" with elements of {' and '.join(secondary_emotions)}"
|
| 555 |
-
emotion_context += "."
|
| 556 |
-
|
| 557 |
-
# Retrieve relevant guidelines
|
| 558 |
-
guidelines = self.retrieve_relevant_guidelines(prompt, emotion_context)
|
| 559 |
-
|
| 560 |
-
# Retrieve past context
|
| 561 |
-
past_context = self.retrieve_relevant_context(prompt)
|
| 562 |
-
|
| 563 |
-
# Generate response using the conversation chain
|
| 564 |
-
response = self.conversation.predict(
|
| 565 |
-
input=prompt,
|
| 566 |
-
past_context=past_context,
|
| 567 |
-
emotion_context=emotion_context,
|
| 568 |
-
guidelines=guidelines
|
| 569 |
-
)
|
| 570 |
-
|
| 571 |
-
# Clean up the response to only include the actual message
|
| 572 |
-
response = response.split("Response:")[-1].strip()
|
| 573 |
-
response = response.split("---")[0].strip()
|
| 574 |
-
response = response.split("Note:")[0].strip()
|
| 575 |
-
|
| 576 |
-
# Remove any casual greetings like "Hey" or "Hi"
|
| 577 |
-
response = re.sub(r'^(Hey|Hi|Hello|Hi there|Hey there),\s*', '', response)
|
| 578 |
-
|
| 579 |
-
# Ensure the response is unique and not repeating previous messages
|
| 580 |
-
if len(conversation_history) > 0:
|
| 581 |
-
last_responses = [msg["text"] for msg in conversation_history[-4:] if msg["role"] == "assistant"]
|
| 582 |
-
if response in last_responses:
|
| 583 |
-
# Generate a new response with a different angle
|
| 584 |
-
response = self.conversation.predict(
|
| 585 |
-
input=f"{prompt} (Please provide a different perspective)",
|
| 586 |
-
past_context=past_context,
|
| 587 |
-
emotion_context=emotion_context,
|
| 588 |
-
guidelines=guidelines
|
| 589 |
-
)
|
| 590 |
-
response = response.split("Response:")[-1].strip()
|
| 591 |
-
response = re.sub(r'^(Hey|Hi|Hello|Hi there|Hey there),\s*', '', response)
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
return response.strip()
|
| 596 |
-
|
| 597 |
-
def generate_session_summary(
|
| 598 |
-
self,
|
| 599 |
-
flow_manager_session: Dict = None
|
| 600 |
-
) -> Dict:
|
| 601 |
-
|
| 602 |
-
if not flow_manager_session:
|
| 603 |
-
return {
|
| 604 |
-
"session_id": "",
|
| 605 |
-
"user_id": "",
|
| 606 |
-
"start_time": "",
|
| 607 |
-
"end_time": datetime.now().isoformat(),
|
| 608 |
-
"duration_minutes": 0,
|
| 609 |
-
"current_phase": "unknown",
|
| 610 |
-
"primary_emotions": [],
|
| 611 |
-
"emotion_progression": [],
|
| 612 |
-
"summary": "Error: No session data provided",
|
| 613 |
-
"recommendations": ["Unable to generate recommendations"],
|
| 614 |
-
"session_characteristics": {}
|
| 615 |
-
}
|
| 616 |
-
|
| 617 |
-
# Get session data from FlowManager
|
| 618 |
-
session_id = flow_manager_session.get('session_id', '')
|
| 619 |
-
user_id = flow_manager_session.get('user_id', '')
|
| 620 |
-
current_phase = flow_manager_session.get('current_phase')
|
| 621 |
-
|
| 622 |
-
if current_phase:
|
| 623 |
-
# Convert ConversationPhase to dict
|
| 624 |
-
current_phase = {
|
| 625 |
-
'name': current_phase.name,
|
| 626 |
-
'description': current_phase.description,
|
| 627 |
-
'goals': current_phase.goals,
|
| 628 |
-
'started_at': current_phase.started_at,
|
| 629 |
-
'ended_at': current_phase.ended_at,
|
| 630 |
-
'completion_metrics': current_phase.completion_metrics
|
| 631 |
-
}
|
| 632 |
-
|
| 633 |
-
session_start = flow_manager_session.get('started_at')
|
| 634 |
-
if isinstance(session_start, str):
|
| 635 |
-
session_start = datetime.fromisoformat(session_start)
|
| 636 |
-
session_duration = (datetime.now() - session_start).total_seconds() / 60 if session_start else 0
|
| 637 |
-
|
| 638 |
-
# Get emotion progression and primary emotions
|
| 639 |
-
emotion_progression = flow_manager_session.get('emotion_progression', [])
|
| 640 |
-
emotion_history = flow_manager_session.get('emotion_history', [])
|
| 641 |
-
|
| 642 |
-
# Extract primary emotions from emotion history
|
| 643 |
-
primary_emotions = []
|
| 644 |
-
if emotion_history:
|
| 645 |
-
# Get the most frequent emotions
|
| 646 |
-
emotion_counts = {}
|
| 647 |
-
for entry in emotion_history:
|
| 648 |
-
emotions = entry.get('emotions', {})
|
| 649 |
-
if isinstance(emotions, dict):
|
| 650 |
-
primary = max(emotions.items(), key=lambda x: x[1])[0]
|
| 651 |
-
emotion_counts[primary] = emotion_counts.get(primary, 0) + 1
|
| 652 |
-
|
| 653 |
-
# sort by frequency and get top 3
|
| 654 |
-
primary_emotions = sorted(emotion_counts.items(), key=lambda x: x[1], reverse=True)[:3]
|
| 655 |
-
primary_emotions = [emotion for emotion, _ in primary_emotions]
|
| 656 |
-
|
| 657 |
-
# get session
|
| 658 |
-
session_characteristics = flow_manager_session.get('llm_context', {}).get('session_characteristics', {})
|
| 659 |
-
|
| 660 |
-
# prepare the text for summarization
|
| 661 |
-
summary_text = f"""
|
| 662 |
-
Session Overview:
|
| 663 |
-
- Session ID: {session_id}
|
| 664 |
-
- User ID: {user_id}
|
| 665 |
-
- Phase: {current_phase.get('name', 'unknown') if current_phase else 'unknown'}
|
| 666 |
-
- Duration: {session_duration:.1f} minutes
|
| 667 |
-
|
| 668 |
-
Emotional Analysis:
|
| 669 |
-
- Primary Emotions: {', '.join(primary_emotions) if primary_emotions else 'No primary emotions detected'}
|
| 670 |
-
- Emotion Progression: {', '.join(emotion_progression) if emotion_progression else 'No significant emotion changes noted'}
|
| 671 |
-
|
| 672 |
-
Session Characteristics:
|
| 673 |
-
- Therapeutic Alliance: {session_characteristics.get('alliance_strength', 'N/A')}
|
| 674 |
-
- Engagement Level: {session_characteristics.get('engagement_level', 'N/A')}
|
| 675 |
-
- Emotional Pattern: {session_characteristics.get('emotional_pattern', 'N/A')}
|
| 676 |
-
- Cognitive Pattern: {session_characteristics.get('cognitive_pattern', 'N/A')}
|
| 677 |
-
|
| 678 |
-
Key Observations:
|
| 679 |
-
- The session focused on {current_phase.get('description', 'general discussion') if current_phase else 'general discussion'}
|
| 680 |
-
- Main emotional themes: {', '.join(primary_emotions) if primary_emotions else 'not identified'}
|
| 681 |
-
- Session progress: {session_characteristics.get('progress_quality', 'N/A')}
|
| 682 |
-
"""
|
| 683 |
-
|
| 684 |
-
# Generate summary using BART
|
| 685 |
-
summary = self.summary_model(
|
| 686 |
-
summary_text,
|
| 687 |
-
max_length=150,
|
| 688 |
-
min_length=50,
|
| 689 |
-
do_sample=False
|
| 690 |
-
)[0]['summary_text']
|
| 691 |
-
|
| 692 |
-
# Generate recommendations using Llama
|
| 693 |
-
recommendations_prompt = f"""
|
| 694 |
-
Based on the following session summary, provide 2-3 specific recommendations for follow-up:
|
| 695 |
-
|
| 696 |
-
{summary}
|
| 697 |
-
|
| 698 |
-
Session Characteristics:
|
| 699 |
-
- Therapeutic Alliance: {session_characteristics.get('alliance_strength', 'N/A')}
|
| 700 |
-
- Engagement Level: {session_characteristics.get('engagement_level', 'N/A')}
|
| 701 |
-
- Emotional Pattern: {session_characteristics.get('emotional_pattern', 'N/A')}
|
| 702 |
-
- Cognitive Pattern: {session_characteristics.get('cognitive_pattern', 'N/A')}
|
| 703 |
-
|
| 704 |
-
Recommendations should be:
|
| 705 |
-
1. Actionable and specific
|
| 706 |
-
2. Based on the session content
|
| 707 |
-
3. Focused on next steps
|
| 708 |
-
"""
|
| 709 |
-
|
| 710 |
-
recommendations = self.llm.invoke(recommendations_prompt)
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
recommendations = recommendations.split('\n')
|
| 714 |
-
recommendations = [r.strip() for r in recommendations if r.strip()]
|
| 715 |
-
recommendations = [r for r in recommendations if not r.startswith(('Based on', 'Session', 'Recommendations'))]
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
return {
|
| 719 |
-
"session_id": session_id,
|
| 720 |
-
"user_id": user_id,
|
| 721 |
-
"start_time": session_start.isoformat() if isinstance(session_start, datetime) else str(session_start),
|
| 722 |
-
"end_time": datetime.now().isoformat(),
|
| 723 |
-
"duration_minutes": session_duration,
|
| 724 |
-
"current_phase": current_phase.get('name', 'unknown') if current_phase else 'unknown',
|
| 725 |
-
"primary_emotions": primary_emotions,
|
| 726 |
-
"emotion_progression": emotion_progression,
|
| 727 |
-
"summary": summary,
|
| 728 |
-
"recommendations": recommendations,
|
| 729 |
-
"session_characteristics": session_characteristics
|
| 730 |
-
}
|
| 731 |
-
|
| 732 |
-
def start_session(self, user_id: str) -> tuple[str, str]:
|
| 733 |
-
# Generate session id
|
| 734 |
-
session_id = f"{user_id}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
| 735 |
-
|
| 736 |
-
# Initialize FlowManager session
|
| 737 |
-
self.flow_manager.initialize_session(user_id)
|
| 738 |
-
|
| 739 |
-
# Create a new conversation
|
| 740 |
-
self.conversations[user_id] = Conversation(
|
| 741 |
-
user_id=user_id,
|
| 742 |
-
session_id=session_id,
|
| 743 |
-
start_time=datetime.now().isoformat(),
|
| 744 |
-
is_active=True
|
| 745 |
-
)
|
| 746 |
-
|
| 747 |
-
# Clear conversation memory
|
| 748 |
-
self.memory.clear()
|
| 749 |
-
|
| 750 |
-
# Generate initial greeting and question
|
| 751 |
-
initial_message = """Hello! I'm here to support you today. How have you been feeling lately?"""
|
| 752 |
-
|
| 753 |
-
# Add the initial message to conversation history
|
| 754 |
-
assistant_message = Message(
|
| 755 |
-
text=initial_message,
|
| 756 |
-
timestamp=datetime.now().isoformat(),
|
| 757 |
-
role="assistant"
|
| 758 |
-
)
|
| 759 |
-
self.conversations[user_id].messages.append(assistant_message)
|
| 760 |
-
|
| 761 |
-
logger.info(f"Session started for user {user_id}")
|
| 762 |
-
return session_id, initial_message
|
| 763 |
-
|
| 764 |
-
def end_session(
|
| 765 |
-
self,
|
| 766 |
-
user_id: str,
|
| 767 |
-
flow_manager: Optional[Any] = None
|
| 768 |
-
) -> Optional[Dict]:
|
| 769 |
-
|
| 770 |
-
if user_id not in self.conversations or not self.conversations[user_id].is_active:
|
| 771 |
-
return None
|
| 772 |
-
|
| 773 |
-
conversation = self.conversations[user_id]
|
| 774 |
-
conversation.is_active = False
|
| 775 |
-
|
| 776 |
-
# Get FlowManager session data
|
| 777 |
-
flow_manager_session = self.flow_manager.user_sessions.get(user_id)
|
| 778 |
-
|
| 779 |
-
# Generate session summary
|
| 780 |
-
try:
|
| 781 |
-
session_summary = self.generate_session_summary(flow_manager_session)
|
| 782 |
-
|
| 783 |
-
# Save summary to disk
|
| 784 |
-
summary_path = os.path.join(SUMMARIES_DIR, f"{session_summary['session_id']}.json")
|
| 785 |
-
with open(summary_path, 'w') as f:
|
| 786 |
-
json.dump(session_summary, f, indent=2)
|
| 787 |
-
|
| 788 |
-
# Store in memory
|
| 789 |
-
self.session_summaries[session_summary['session_id']] = session_summary
|
| 790 |
-
|
| 791 |
-
# Clear conversation memory
|
| 792 |
-
self.memory.clear()
|
| 793 |
-
|
| 794 |
-
return session_summary
|
| 795 |
-
except Exception as e:
|
| 796 |
-
logger.error(f"Failed to generate session summary: {e}")
|
| 797 |
-
return None
|
| 798 |
-
|
| 799 |
-
def process_message(self, user_id: str, message: str) -> str:
|
| 800 |
-
|
| 801 |
-
# Check for risk flags
|
| 802 |
-
risk_keywords = ["suicide", "kill myself", "end my life", "self-harm", "hurt myself"]
|
| 803 |
-
risk_detected = any(keyword in message.lower() for keyword in risk_keywords)
|
| 804 |
-
|
| 805 |
-
# Create or get conversation
|
| 806 |
-
if user_id not in self.conversations or not self.conversations[user_id].is_active:
|
| 807 |
-
self.start_session(user_id)
|
| 808 |
-
|
| 809 |
-
conversation = self.conversations[user_id]
|
| 810 |
-
|
| 811 |
-
# user message -> conversation history
|
| 812 |
-
new_message = Message(
|
| 813 |
-
text=message,
|
| 814 |
-
timestamp=datetime.now().isoformat(),
|
| 815 |
-
role="user"
|
| 816 |
-
)
|
| 817 |
-
conversation.messages.append(new_message)
|
| 818 |
-
|
| 819 |
-
# For crisis
|
| 820 |
-
if risk_detected:
|
| 821 |
-
logger.warning(f"Risk flag detected in session {user_id}")
|
| 822 |
-
|
| 823 |
-
crisis_response = """ I'm really sorry you're feeling this way — it sounds incredibly heavy, and I want you to know that you're not alone.
|
| 824 |
-
|
| 825 |
-
You don't have to face this by yourself. Our app has licensed mental health professionals who are ready to support you. I can connect you right now if you'd like.
|
| 826 |
-
|
| 827 |
-
In the meantime, I'm here to listen and talk with you. You can also do grounding exercises or calming techniques with me if you prefer. Just say "help me calm down" or "I need a break."
|
| 828 |
-
|
| 829 |
-
Would you like to connect with a professional now, or would you prefer to keep talking with me for a bit? Either way, I'm here for you."""
|
| 830 |
-
|
| 831 |
-
# assistant response -> conversation history
|
| 832 |
-
assistant_message = Message(
|
| 833 |
-
text=crisis_response,
|
| 834 |
-
timestamp=datetime.now().isoformat(),
|
| 835 |
-
role="assistant"
|
| 836 |
-
)
|
| 837 |
-
conversation.messages.append(assistant_message)
|
| 838 |
-
|
| 839 |
-
return crisis_response
|
| 840 |
-
|
| 841 |
-
# Detect emotions
|
| 842 |
-
emotions = self.detect_emotion(message)
|
| 843 |
-
conversation.emotion_history.append(emotions)
|
| 844 |
-
|
| 845 |
-
# Process message with FlowManager
|
| 846 |
-
flow_context = self.flow_manager.process_message(user_id, message, emotions)
|
| 847 |
-
|
| 848 |
-
# Format conversation history
|
| 849 |
-
conversation_history = []
|
| 850 |
-
for msg in conversation.messages:
|
| 851 |
-
conversation_history.append({
|
| 852 |
-
"text": msg.text,
|
| 853 |
-
"timestamp": msg.timestamp,
|
| 854 |
-
"role": msg.role
|
| 855 |
-
})
|
| 856 |
-
|
| 857 |
-
# Generate response
|
| 858 |
-
response_text = self.generate_response(message, emotions, conversation_history)
|
| 859 |
-
|
| 860 |
-
# Generate a follow-up question if the response is too short
|
| 861 |
-
if len(response_text.split()) < 20 and not response_text.endswith('?'):
|
| 862 |
-
follow_up_prompt = f"""
|
| 863 |
-
Recent conversation:
|
| 864 |
-
{chr(10).join([f"{msg['role']}: {msg['text']}" for msg in conversation_history[-3:]])}
|
| 865 |
-
|
| 866 |
-
Now, write a single empathetic and open-ended question to encourage the user to share more.
|
| 867 |
-
Respond with just the question, no explanation.
|
| 868 |
-
"""
|
| 869 |
-
follow_up = self.llm.invoke(follow_up_prompt).strip()
|
| 870 |
-
# Clean and extract only the actual question (first sentence ending with '?')
|
| 871 |
-
matches = re.findall(r'([^\n.?!]*\?)', follow_up)
|
| 872 |
-
if matches:
|
| 873 |
-
question = matches[0].strip()
|
| 874 |
-
else:
|
| 875 |
-
question = follow_up.strip().split('\n')[0]
|
| 876 |
-
# If the main response is very short, return just the question
|
| 877 |
-
if len(response_text.split()) < 5:
|
| 878 |
-
response_text = question
|
| 879 |
-
else:
|
| 880 |
-
response_text = f"{response_text}\n\n{question}"
|
| 881 |
-
|
| 882 |
-
# Final post-processing: remove any LLM commentary that may have leaked in
|
| 883 |
-
response_text = response_text.strip()
|
| 884 |
-
response_text = re.sub(r"(Your response|This response).*", "", response_text, flags=re.IGNORECASE).strip()
|
| 885 |
-
|
| 886 |
-
# assistant response -> conversation history
|
| 887 |
-
assistant_message = Message(
|
| 888 |
-
text=response_text,
|
| 889 |
-
timestamp=datetime.now().isoformat(),
|
| 890 |
-
role="assistant"
|
| 891 |
-
)
|
| 892 |
-
conversation.messages.append(assistant_message)
|
| 893 |
-
|
| 894 |
-
# Update context
|
| 895 |
-
conversation.context.update({
|
| 896 |
-
"last_emotion": emotions,
|
| 897 |
-
"last_interaction": datetime.now().isoformat(),
|
| 898 |
-
"flow_context": flow_context
|
| 899 |
-
})
|
| 900 |
-
|
| 901 |
-
# Store this interaction in vector database
|
| 902 |
-
current_interaction = f"User: {message}\nChatbot: {response_text}"
|
| 903 |
-
self.vector_db.add_texts([current_interaction])
|
| 904 |
-
self.vector_db.save_local(VECTOR_DB_PATH)
|
| 905 |
-
|
| 906 |
-
return response_text
|
| 907 |
-
|
| 908 |
-
def get_session_summary(self, session_id: str) -> Optional[Dict[str, Any]]:
|
| 909 |
-
|
| 910 |
-
return self.session_summaries.get(session_id)
|
| 911 |
-
|
| 912 |
-
def get_user_replies(self, user_id: str) -> List[Dict[str, Any]]:
|
| 913 |
-
if user_id not in self.conversations:
|
| 914 |
-
return []
|
| 915 |
-
|
| 916 |
-
conversation = self.conversations[user_id]
|
| 917 |
-
user_replies = []
|
| 918 |
-
|
| 919 |
-
for message in conversation.messages:
|
| 920 |
-
if message.role == "user":
|
| 921 |
-
user_replies.append({
|
| 922 |
-
"text": message.text,
|
| 923 |
-
"timestamp": message.timestamp,
|
| 924 |
-
"session_id": conversation.session_id
|
| 925 |
-
})
|
| 926 |
-
|
| 927 |
-
return user_replies
|
| 928 |
-
|
| 929 |
-
if __name__ == "__main__":
|
| 930 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
conversation_flow.py
DELETED
|
@@ -1,467 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
import json
|
| 3 |
-
import json5
|
| 4 |
-
import time
|
| 5 |
-
from datetime import datetime
|
| 6 |
-
from typing import List, Dict, Any, Optional
|
| 7 |
-
from pydantic import BaseModel, Field
|
| 8 |
-
|
| 9 |
-
# Configure logging
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
class PhaseTransitionResponse(BaseModel):
|
| 13 |
-
goals_progress: Dict[str, float]
|
| 14 |
-
should_transition: bool
|
| 15 |
-
next_phase: str
|
| 16 |
-
reasoning: str
|
| 17 |
-
|
| 18 |
-
class SessionCharacteristics(BaseModel):
|
| 19 |
-
alliance_strength: float = Field(ge=0.0, le=1.0)
|
| 20 |
-
engagement_level: float = Field(ge=0.0, le=1.0)
|
| 21 |
-
emotional_pattern: str
|
| 22 |
-
cognitive_pattern: str
|
| 23 |
-
coping_mechanisms: List[str] = Field(min_items=2)
|
| 24 |
-
progress_quality: float = Field(ge=0.0, le=1.0)
|
| 25 |
-
recommended_focus: str
|
| 26 |
-
|
| 27 |
-
class ConversationPhase(BaseModel):
|
| 28 |
-
name: str
|
| 29 |
-
description: str
|
| 30 |
-
goals: List[str]
|
| 31 |
-
typical_duration: int # in minutes
|
| 32 |
-
started_at: Optional[str] = None # ISO timestamp
|
| 33 |
-
ended_at: Optional[str] = None # ISO timestamp
|
| 34 |
-
completion_metrics: Dict[str, float] = Field(default_factory=dict) # e.g., {'goal_progress': 0.8}
|
| 35 |
-
|
| 36 |
-
class FlowManager:
|
| 37 |
-
|
| 38 |
-
# Define conversation phases
|
| 39 |
-
PHASES = {
|
| 40 |
-
'introduction': {
|
| 41 |
-
'description': 'Establishing rapport and identifying main concerns',
|
| 42 |
-
'goals': [
|
| 43 |
-
'build therapeutic alliance',
|
| 44 |
-
'identify primary concerns',
|
| 45 |
-
'understand client expectations',
|
| 46 |
-
'establish session structure'
|
| 47 |
-
],
|
| 48 |
-
'typical_duration': 5 # In mins
|
| 49 |
-
},
|
| 50 |
-
'exploration': {
|
| 51 |
-
'description': 'In-depth exploration of issues and their context',
|
| 52 |
-
'goals': [
|
| 53 |
-
'examine emotional responses',
|
| 54 |
-
'explore thought patterns',
|
| 55 |
-
'identify behavioral patterns',
|
| 56 |
-
'understand situational context',
|
| 57 |
-
'recognize relationship dynamics'
|
| 58 |
-
],
|
| 59 |
-
'typical_duration': 15 # In mins
|
| 60 |
-
},
|
| 61 |
-
'intervention': {
|
| 62 |
-
'description': 'Providing strategies, insights, and therapeutic interventions',
|
| 63 |
-
'goals': [
|
| 64 |
-
'introduce coping techniques',
|
| 65 |
-
'reframe negative thinking',
|
| 66 |
-
'provide emotional validation',
|
| 67 |
-
'offer perspective shifts',
|
| 68 |
-
'suggest behavioral modifications'
|
| 69 |
-
],
|
| 70 |
-
'typical_duration': 20 # In minutes
|
| 71 |
-
},
|
| 72 |
-
'conclusion': {
|
| 73 |
-
'description': 'Summarizing insights and establishing next steps',
|
| 74 |
-
'goals': [
|
| 75 |
-
'review key insights',
|
| 76 |
-
'consolidate learning',
|
| 77 |
-
'identify action items',
|
| 78 |
-
'set intentions',
|
| 79 |
-
'provide closure'
|
| 80 |
-
],
|
| 81 |
-
'typical_duration': 5 # In minutes
|
| 82 |
-
}
|
| 83 |
-
}
|
| 84 |
-
|
| 85 |
-
def __init__(self, llm, session_duration: int = 45):
|
| 86 |
-
|
| 87 |
-
self.llm = llm
|
| 88 |
-
self.session_duration = session_duration * 60 # Convert to seconds
|
| 89 |
-
|
| 90 |
-
# User session data structures
|
| 91 |
-
self.user_sessions = {} # user_id -> session data
|
| 92 |
-
|
| 93 |
-
logger.info(f"Initialized FlowManager with {session_duration} minute sessions")
|
| 94 |
-
|
| 95 |
-
def _ensure_user_session(self, user_id: str):
|
| 96 |
-
|
| 97 |
-
if user_id not in self.user_sessions:
|
| 98 |
-
self.initialize_session(user_id)
|
| 99 |
-
|
| 100 |
-
def initialize_session(self, user_id: str):
|
| 101 |
-
|
| 102 |
-
now = datetime.now().isoformat()
|
| 103 |
-
|
| 104 |
-
# Create initial phase
|
| 105 |
-
initial_phase = ConversationPhase(
|
| 106 |
-
name='introduction',
|
| 107 |
-
description=self.PHASES['introduction']['description'],
|
| 108 |
-
goals=self.PHASES['introduction']['goals'],
|
| 109 |
-
typical_duration=self.PHASES['introduction']['typical_duration'],
|
| 110 |
-
started_at=now
|
| 111 |
-
)
|
| 112 |
-
|
| 113 |
-
# Generate session ID
|
| 114 |
-
session_id = f"{user_id}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
| 115 |
-
|
| 116 |
-
# Initialize session data
|
| 117 |
-
self.user_sessions[user_id] = {
|
| 118 |
-
'session_id': session_id,
|
| 119 |
-
'user_id': user_id,
|
| 120 |
-
'started_at': now,
|
| 121 |
-
'updated_at': now,
|
| 122 |
-
'current_phase': initial_phase,
|
| 123 |
-
'phase_history': [initial_phase],
|
| 124 |
-
'message_count': 0,
|
| 125 |
-
'emotion_history': [],
|
| 126 |
-
'emotion_progression': [],
|
| 127 |
-
'flags': {
|
| 128 |
-
'crisis_detected': False,
|
| 129 |
-
'long_silences': False
|
| 130 |
-
},
|
| 131 |
-
'llm_context': {
|
| 132 |
-
'session_characteristics': {}
|
| 133 |
-
}
|
| 134 |
-
}
|
| 135 |
-
|
| 136 |
-
logger.info(f"Initialized new session for user {user_id}")
|
| 137 |
-
return self.user_sessions[user_id]
|
| 138 |
-
|
| 139 |
-
def process_message(self, user_id: str, message: str, emotions: Dict[str, float]) -> Dict[str, Any]:
|
| 140 |
-
|
| 141 |
-
self._ensure_user_session(user_id)
|
| 142 |
-
session = self.user_sessions[user_id]
|
| 143 |
-
|
| 144 |
-
# Update session
|
| 145 |
-
now = datetime.now().isoformat()
|
| 146 |
-
session['updated_at'] = now
|
| 147 |
-
session['message_count'] += 1
|
| 148 |
-
|
| 149 |
-
# Track emotions
|
| 150 |
-
emotion_entry = {
|
| 151 |
-
'timestamp': now,
|
| 152 |
-
'emotions': emotions,
|
| 153 |
-
'message_idx': session['message_count']
|
| 154 |
-
}
|
| 155 |
-
session['emotion_history'].append(emotion_entry)
|
| 156 |
-
|
| 157 |
-
# Update emotion progression
|
| 158 |
-
if not session.get('emotion_progression'):
|
| 159 |
-
session['emotion_progression'] = []
|
| 160 |
-
|
| 161 |
-
# Get primary emotion (highest confidence)
|
| 162 |
-
primary_emotion = max(emotions.items(), key=lambda x: x[1])[0]
|
| 163 |
-
session['emotion_progression'].append(primary_emotion)
|
| 164 |
-
|
| 165 |
-
# Check for phase transition
|
| 166 |
-
self._check_phase_transition(user_id, message, emotions)
|
| 167 |
-
|
| 168 |
-
# Update session characteristics via LLM analysis (periodically)
|
| 169 |
-
if session['message_count'] % 5 == 0:
|
| 170 |
-
self._update_session_characteristics(user_id)
|
| 171 |
-
|
| 172 |
-
# Create flow context for response generation
|
| 173 |
-
flow_context = self._create_flow_context(user_id)
|
| 174 |
-
|
| 175 |
-
return flow_context
|
| 176 |
-
|
| 177 |
-
def _check_phase_transition(self, user_id: str, message: str, emotions: Dict[str, float]):
|
| 178 |
-
|
| 179 |
-
session = self.user_sessions[user_id]
|
| 180 |
-
current_phase = session['current_phase']
|
| 181 |
-
|
| 182 |
-
# Calculate session progress
|
| 183 |
-
started_at = datetime.fromisoformat(session['started_at'])
|
| 184 |
-
now = datetime.now()
|
| 185 |
-
elapsed_seconds = (now - started_at).total_seconds()
|
| 186 |
-
session_progress = elapsed_seconds / self.session_duration
|
| 187 |
-
|
| 188 |
-
# Create prompt for LLM to evaluate phase transition
|
| 189 |
-
phase_context = {
|
| 190 |
-
'current': current_phase.name,
|
| 191 |
-
'description': current_phase.description,
|
| 192 |
-
'goals': current_phase.goals,
|
| 193 |
-
'time_in_phase': (now - datetime.fromisoformat(current_phase.started_at)).total_seconds() / 60,
|
| 194 |
-
'session_progress': session_progress,
|
| 195 |
-
'message_count': session['message_count']
|
| 196 |
-
}
|
| 197 |
-
|
| 198 |
-
# Only check for transition if we've spent some time in current phase
|
| 199 |
-
min_time_in_phase_minutes = max(2, current_phase.typical_duration * 0.5)
|
| 200 |
-
if phase_context['time_in_phase'] < min_time_in_phase_minutes:
|
| 201 |
-
return
|
| 202 |
-
|
| 203 |
-
prompt = f"""
|
| 204 |
-
Evaluate whether this therapeutic conversation should transition to the next phase.
|
| 205 |
-
|
| 206 |
-
Current conversation state:
|
| 207 |
-
- Current phase: {current_phase.name} ("{current_phase.description}")
|
| 208 |
-
- Goals for this phase: {', '.join(current_phase.goals)}
|
| 209 |
-
- Time spent in this phase: {phase_context['time_in_phase']:.1f} minutes
|
| 210 |
-
- Session progress: {session_progress * 100:.1f}% complete
|
| 211 |
-
- Message count: {session['message_count']}
|
| 212 |
-
|
| 213 |
-
Latest message from user: "{message}"
|
| 214 |
-
|
| 215 |
-
Current emotions: {', '.join([f"{e} ({score:.2f})" for e, score in
|
| 216 |
-
sorted(emotions.items(), key=lambda x: x[1], reverse=True)[:3]])}
|
| 217 |
-
|
| 218 |
-
Phases in a therapeutic conversation:
|
| 219 |
-
1. introduction: {self.PHASES['introduction']['description']}
|
| 220 |
-
2. exploration: {self.PHASES['exploration']['description']}
|
| 221 |
-
3. intervention: {self.PHASES['intervention']['description']}
|
| 222 |
-
4. conclusion: {self.PHASES['conclusion']['description']}
|
| 223 |
-
|
| 224 |
-
Consider:
|
| 225 |
-
1. Have the goals of the current phase been sufficiently addressed?
|
| 226 |
-
2. Is the timing appropriate considering overall session progress?
|
| 227 |
-
3. Is there a natural transition point in the conversation?
|
| 228 |
-
4. Does the emotional content suggest readiness to move forward?
|
| 229 |
-
|
| 230 |
-
First, provide your analysis of whether the key goals of the current phase have been met.
|
| 231 |
-
Then decide if the conversation should transition to the next phase.
|
| 232 |
-
|
| 233 |
-
Respond with a JSON object in this format:
|
| 234 |
-
{{
|
| 235 |
-
"goals_progress": {{
|
| 236 |
-
"goal1": 0.5,
|
| 237 |
-
"goal2": 0.7
|
| 238 |
-
}},
|
| 239 |
-
"should_transition": false,
|
| 240 |
-
"next_phase": "exploration",
|
| 241 |
-
"reasoning": "brief explanation"
|
| 242 |
-
}}
|
| 243 |
-
|
| 244 |
-
Output ONLY valid JSON without additional text.
|
| 245 |
-
"""
|
| 246 |
-
|
| 247 |
-
response = self.llm.invoke(prompt)
|
| 248 |
-
|
| 249 |
-
try:
|
| 250 |
-
# Parse with standard json
|
| 251 |
-
evaluation = json.loads(response)
|
| 252 |
-
# Validate with Pydantic
|
| 253 |
-
phase_transition = PhaseTransitionResponse.parse_obj(evaluation)
|
| 254 |
-
|
| 255 |
-
# Update goal progress metrics
|
| 256 |
-
for goal, score in phase_transition.goals_progress.items():
|
| 257 |
-
if goal in current_phase.goals:
|
| 258 |
-
current_phase.completion_metrics[goal] = score
|
| 259 |
-
|
| 260 |
-
# Check if we should transition
|
| 261 |
-
if phase_transition.should_transition:
|
| 262 |
-
if phase_transition.next_phase in self.PHASES:
|
| 263 |
-
self._transition_to_phase(user_id, phase_transition.next_phase, phase_transition.reasoning)
|
| 264 |
-
except (json.JSONDecodeError, ValueError):
|
| 265 |
-
self._check_time_based_transition(user_id)
|
| 266 |
-
|
| 267 |
-
def _check_time_based_transition(self, user_id: str):
|
| 268 |
-
|
| 269 |
-
session = self.user_sessions[user_id]
|
| 270 |
-
current_phase = session['current_phase']
|
| 271 |
-
|
| 272 |
-
# Get elapsed time
|
| 273 |
-
started_at = datetime.fromisoformat(session['started_at'])
|
| 274 |
-
now = datetime.now()
|
| 275 |
-
elapsed_minutes = (now - started_at).total_seconds() / 60
|
| 276 |
-
|
| 277 |
-
# Calculate phase thresholds
|
| 278 |
-
intro_threshold = self.PHASES['introduction']['typical_duration']
|
| 279 |
-
explore_threshold = intro_threshold + self.PHASES['exploration']['typical_duration']
|
| 280 |
-
intervention_threshold = explore_threshold + self.PHASES['intervention']['typical_duration']
|
| 281 |
-
|
| 282 |
-
# Transition based on time
|
| 283 |
-
next_phase = None
|
| 284 |
-
if current_phase.name == 'introduction' and elapsed_minutes >= intro_threshold:
|
| 285 |
-
next_phase = 'exploration'
|
| 286 |
-
elif current_phase.name == 'exploration' and elapsed_minutes >= explore_threshold:
|
| 287 |
-
next_phase = 'intervention'
|
| 288 |
-
elif current_phase.name == 'intervention' and elapsed_minutes >= intervention_threshold:
|
| 289 |
-
next_phase = 'conclusion'
|
| 290 |
-
|
| 291 |
-
if next_phase:
|
| 292 |
-
self._transition_to_phase(user_id, next_phase, "Time-based transition")
|
| 293 |
-
|
| 294 |
-
def _transition_to_phase(self, user_id: str, next_phase_name: str, reason: str):
|
| 295 |
-
|
| 296 |
-
session = self.user_sessions[user_id]
|
| 297 |
-
current_phase = session['current_phase']
|
| 298 |
-
|
| 299 |
-
# End current phase
|
| 300 |
-
now = datetime.now().isoformat()
|
| 301 |
-
current_phase.ended_at = now
|
| 302 |
-
|
| 303 |
-
# Create new phase
|
| 304 |
-
new_phase = ConversationPhase(
|
| 305 |
-
name=next_phase_name,
|
| 306 |
-
description=self.PHASES[next_phase_name]['description'],
|
| 307 |
-
goals=self.PHASES[next_phase_name]['goals'],
|
| 308 |
-
typical_duration=self.PHASES[next_phase_name]['typical_duration'],
|
| 309 |
-
started_at=now
|
| 310 |
-
)
|
| 311 |
-
|
| 312 |
-
# Update session
|
| 313 |
-
session['current_phase'] = new_phase
|
| 314 |
-
session['phase_history'].append(new_phase)
|
| 315 |
-
|
| 316 |
-
logger.info(f"User {user_id} transitioned from {current_phase.name} to {next_phase_name}: {reason}")
|
| 317 |
-
|
| 318 |
-
def _update_session_characteristics(self, user_id: str):
|
| 319 |
-
session = self.user_sessions[user_id]
|
| 320 |
-
|
| 321 |
-
# Only do this periodically to save LLM calls
|
| 322 |
-
if session['message_count'] < 5:
|
| 323 |
-
return
|
| 324 |
-
|
| 325 |
-
# Create a summary of the conversation so far
|
| 326 |
-
message_sample = []
|
| 327 |
-
emotion_summary = {}
|
| 328 |
-
|
| 329 |
-
# Get recent messages
|
| 330 |
-
for i, emotion_data in enumerate(session['emotion_history'][-10:]):
|
| 331 |
-
msg_idx = emotion_data['message_idx']
|
| 332 |
-
if i % 2 == 0: # Just include a subset of messages
|
| 333 |
-
message_sample.append(f"Message {msg_idx}: User emotions: {', '.join([f'{e}({s:.2f})' for e, s in sorted(emotion_data['emotions'].items(), key=lambda x: x[1], reverse=True)[:2]])}")
|
| 334 |
-
|
| 335 |
-
# Aggregate emotions
|
| 336 |
-
for emotion, score in emotion_data['emotions'].items():
|
| 337 |
-
if score > 0.3:
|
| 338 |
-
emotion_summary[emotion] = emotion_summary.get(emotion, 0) + score
|
| 339 |
-
|
| 340 |
-
# Normalize emotion summary
|
| 341 |
-
if emotion_summary:
|
| 342 |
-
total = sum(emotion_summary.values())
|
| 343 |
-
emotion_summary = {e: s/total for e, s in emotion_summary.items()}
|
| 344 |
-
|
| 345 |
-
# prompt for LLM
|
| 346 |
-
prompt = f"""
|
| 347 |
-
Analyze this therapy session and provide a JSON response with the following characteristics:
|
| 348 |
-
|
| 349 |
-
Current session state:
|
| 350 |
-
- Phase: {session['current_phase'].name} ({session['current_phase'].description})
|
| 351 |
-
- Message count: {session['message_count']}
|
| 352 |
-
- Emotion summary: {', '.join([f'{e}({s:.2f})' for e, s in sorted(emotion_summary.items(), key=lambda x: x[1], reverse=True)])}
|
| 353 |
-
|
| 354 |
-
Recent messages:
|
| 355 |
-
{chr(10).join(message_sample)}
|
| 356 |
-
|
| 357 |
-
Required JSON format:
|
| 358 |
-
{{
|
| 359 |
-
"alliance_strength": 0.8,
|
| 360 |
-
"engagement_level": 0.7,
|
| 361 |
-
"emotional_pattern": "brief description of emotional pattern",
|
| 362 |
-
"cognitive_pattern": "brief description of cognitive pattern",
|
| 363 |
-
"coping_mechanisms": ["mechanism1", "mechanism2"],
|
| 364 |
-
"progress_quality": 0.6,
|
| 365 |
-
"recommended_focus": "brief therapeutic recommendation"
|
| 366 |
-
}}
|
| 367 |
-
|
| 368 |
-
Important:
|
| 369 |
-
1. Respond with ONLY the JSON object
|
| 370 |
-
2. Use numbers between 0.0 and 1.0 for alliance_strength, engagement_level, and progress_quality
|
| 371 |
-
3. Keep descriptions brief and focused
|
| 372 |
-
4. Include at least 2 coping mechanisms
|
| 373 |
-
5. Provide a specific recommended focus
|
| 374 |
-
|
| 375 |
-
JSON Response:
|
| 376 |
-
"""
|
| 377 |
-
|
| 378 |
-
response = self.llm.invoke(prompt)
|
| 379 |
-
|
| 380 |
-
try:
|
| 381 |
-
# Parse with standard json
|
| 382 |
-
characteristics = json.loads(response)
|
| 383 |
-
# Validate with Pydantic
|
| 384 |
-
session_chars = SessionCharacteristics.parse_obj(characteristics)
|
| 385 |
-
session['llm_context']['session_characteristics'] = session_chars.dict()
|
| 386 |
-
logger.info(f"Updated session characteristics for user {user_id}")
|
| 387 |
-
except (json.JSONDecodeError, ValueError) as e:
|
| 388 |
-
logger.warning(f"Failed to parse session characteristics: {e}")
|
| 389 |
-
|
| 390 |
-
def _create_flow_context(self, user_id: str) -> Dict[str, Any]:
|
| 391 |
-
|
| 392 |
-
session = self.user_sessions[user_id]
|
| 393 |
-
current_phase = session['current_phase']
|
| 394 |
-
|
| 395 |
-
# Calculate session times
|
| 396 |
-
started_at = datetime.fromisoformat(session['started_at'])
|
| 397 |
-
now = datetime.now()
|
| 398 |
-
elapsed_seconds = (now - started_at).total_seconds()
|
| 399 |
-
remaining_seconds = max(0, self.session_duration - elapsed_seconds)
|
| 400 |
-
|
| 401 |
-
# Get primary emotions
|
| 402 |
-
emotions_summary = {}
|
| 403 |
-
for emotion_data in session['emotion_history'][-3:]: # Last 3 messages
|
| 404 |
-
for emotion, score in emotion_data['emotions'].items():
|
| 405 |
-
emotions_summary[emotion] = emotions_summary.get(emotion, 0) + score
|
| 406 |
-
|
| 407 |
-
if emotions_summary:
|
| 408 |
-
primary_emotions = sorted(emotions_summary.items(), key=lambda x: x[1], reverse=True)[:3]
|
| 409 |
-
else:
|
| 410 |
-
primary_emotions = []
|
| 411 |
-
|
| 412 |
-
# Create guidance based on phase
|
| 413 |
-
phase_guidance = []
|
| 414 |
-
|
| 415 |
-
# Add phase-specific guidance
|
| 416 |
-
if current_phase.name == 'introduction':
|
| 417 |
-
phase_guidance.append("Build rapport and identify main concerns")
|
| 418 |
-
if session['message_count'] > 3:
|
| 419 |
-
phase_guidance.append("Begin exploring emotional context")
|
| 420 |
-
|
| 421 |
-
elif current_phase.name == 'exploration':
|
| 422 |
-
phase_guidance.append("Deepen understanding of issues and contexts")
|
| 423 |
-
phase_guidance.append("Connect emotional patterns to identify themes")
|
| 424 |
-
|
| 425 |
-
elif current_phase.name == 'intervention':
|
| 426 |
-
phase_guidance.append("Offer support strategies and therapeutic insights")
|
| 427 |
-
if remaining_seconds < 600: # Less than 10 minutes left
|
| 428 |
-
phase_guidance.append("Begin consolidating key insights")
|
| 429 |
-
|
| 430 |
-
elif current_phase.name == 'conclusion':
|
| 431 |
-
phase_guidance.append("Summarize insights and establish next steps")
|
| 432 |
-
phase_guidance.append("Provide closure while maintaining supportive presence")
|
| 433 |
-
|
| 434 |
-
# Add guidance based on session characteristics
|
| 435 |
-
if 'session_characteristics' in session['llm_context']:
|
| 436 |
-
char = session['llm_context']['session_characteristics']
|
| 437 |
-
|
| 438 |
-
# Low alliance strength
|
| 439 |
-
if char.get('alliance_strength', 0.8) < 0.6:
|
| 440 |
-
phase_guidance.append("Focus on strengthening therapeutic alliance")
|
| 441 |
-
|
| 442 |
-
# Low engagement
|
| 443 |
-
if char.get('engagement_level', 0.8) < 0.6:
|
| 444 |
-
phase_guidance.append("Increase engagement with more personalized responses")
|
| 445 |
-
|
| 446 |
-
# Add recommended focus if available
|
| 447 |
-
if 'recommended_focus' in char:
|
| 448 |
-
phase_guidance.append(char['recommended_focus'])
|
| 449 |
-
|
| 450 |
-
# Create flow context
|
| 451 |
-
flow_context = {
|
| 452 |
-
'phase': {
|
| 453 |
-
'name': current_phase.name,
|
| 454 |
-
'description': current_phase.description,
|
| 455 |
-
'goals': current_phase.goals
|
| 456 |
-
},
|
| 457 |
-
'session': {
|
| 458 |
-
'elapsed_minutes': elapsed_seconds / 60,
|
| 459 |
-
'remaining_minutes': remaining_seconds / 60,
|
| 460 |
-
'progress_percentage': (elapsed_seconds / self.session_duration) * 100,
|
| 461 |
-
'message_count': session['message_count']
|
| 462 |
-
},
|
| 463 |
-
'emotions': [{'name': e, 'intensity': s} for e, s in primary_emotions],
|
| 464 |
-
'guidance': phase_guidance
|
| 465 |
-
}
|
| 466 |
-
|
| 467 |
-
return flow_context
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
guidelines.txt
DELETED
|
@@ -1,107 +0,0 @@
|
|
| 1 |
-
Therapeutic Guidelines:
|
| 2 |
-
1. Build Trust and Rapport
|
| 3 |
-
|
| 4 |
-
Begin with warmth and understanding.
|
| 5 |
-
|
| 6 |
-
Use active listening: reflect back emotions and key points.
|
| 7 |
-
|
| 8 |
-
Be supportive and non-threatening in tone.
|
| 9 |
-
|
| 10 |
-
Always keep the tone calm, supportive, and emotionally intelligent.
|
| 11 |
-
|
| 12 |
-
Empower users to explore their own thoughts and solutions.
|
| 13 |
-
|
| 14 |
-
Ask open-ended questions to deepen self-reflection.
|
| 15 |
-
|
| 16 |
-
Avoid giving commands or rigid advice.
|
| 17 |
-
|
| 18 |
-
Avoid assumptions based on culture, gender, or personal history.
|
| 19 |
-
|
| 20 |
-
Create psychological safety — reassure the user that their thoughts and emotions are welcome and valid.
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
2. Be Non-Judgmental
|
| 24 |
-
|
| 25 |
-
Accept all emotions and experiences without criticism.
|
| 26 |
-
|
| 27 |
-
Never blame or shame the user.
|
| 28 |
-
|
| 29 |
-
Normalize their feelings when appropriate
|
| 30 |
-
|
| 31 |
-
3. Use Evidence-Based Techniques
|
| 32 |
-
|
| 33 |
-
Apply suitable techniques such as:
|
| 34 |
-
1. Cognitive Behavioral Therapy (CBT)
|
| 35 |
-
Help users identify negative thought patterns (cognitive distortions) and reframe them:
|
| 36 |
-
|
| 37 |
-
“Let’s try to challenge that thought — is there evidence that supports or contradicts it?”
|
| 38 |
-
|
| 39 |
-
“What might be a more balanced way to look at this?”
|
| 40 |
-
|
| 41 |
-
2. Dialectical Behavior Therapy (DBT)
|
| 42 |
-
Focus on emotional regulation, distress tolerance, and mindfulness:
|
| 43 |
-
|
| 44 |
-
“Let’s take a moment to breathe and notice what you’re feeling without judgment.”
|
| 45 |
-
|
| 46 |
-
“What can you do right now to self-soothe or ground yourself?”
|
| 47 |
-
|
| 48 |
-
3. Acceptance and Commitment Therapy (ACT)
|
| 49 |
-
Promote acceptance of thoughts and values-based living:
|
| 50 |
-
|
| 51 |
-
“Instead of fighting that thought, can we observe it and let it be?”
|
| 52 |
-
|
| 53 |
-
“What matters to you right now? What small step can you take in that direction?”
|
| 54 |
-
|
| 55 |
-
4. Motivational Interviewing
|
| 56 |
-
Help ambivalent users explore change:
|
| 57 |
-
|
| 58 |
-
“On a scale from 1 to 10, how ready do you feel to make a change?”
|
| 59 |
-
|
| 60 |
-
“What would it take to move one step closer?”
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
4. Structured Conversation Flow
|
| 64 |
-
Begin with empathy → explore the problem → validate emotions → apply a therapeutic tool → summarize insight or coping step.
|
| 65 |
-
|
| 66 |
-
End each message with a question or reflection prompt to continue engagement.
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
5. Add Actionable Suggestions
|
| 70 |
-
|
| 71 |
-
Offer gentle, realistic, and practical steps the user can try.
|
| 72 |
-
|
| 73 |
-
Tailor suggestions to their emotional state — prioritize simplicity and emotional safety.
|
| 74 |
-
|
| 75 |
-
Use empowering language that invites, not instructs:
|
| 76 |
-
|
| 77 |
-
“Would you be open to trying…?”
|
| 78 |
-
|
| 79 |
-
“Some people find this helpful — would you like to explore it together?”
|
| 80 |
-
|
| 81 |
-
Examples of actionable suggestions include:
|
| 82 |
-
|
| 83 |
-
Grounding Techniques
|
| 84 |
-
“Can you name five things you see around you right now, four things you can touch, three you can hear, two you can smell, and one you can taste?”
|
| 85 |
-
|
| 86 |
-
Mindful Breathing
|
| 87 |
-
“Let’s try a simple breathing exercise: inhale slowly for 4 counts, hold for 4, exhale for 4. Can we do this together for a few rounds?”
|
| 88 |
-
|
| 89 |
-
Journaling Prompts
|
| 90 |
-
“Would writing down your thoughts help make sense of what you're feeling? You might start with: ‘Right now, I’m feeling… because…’”
|
| 91 |
-
|
| 92 |
-
Self-Compassion Reminders
|
| 93 |
-
“Can you speak to yourself the way you would to a friend going through this?”
|
| 94 |
-
|
| 95 |
-
Behavioral Activation
|
| 96 |
-
“Sometimes doing one small activity, even if it feels meaningless at first, can help shift your energy. What’s one thing you could do today that used to bring you comfort?”
|
| 97 |
-
|
| 98 |
-
Connection Check-In
|
| 99 |
-
“Is there someone you trust that you might feel comfortable talking to or spending time with today, even briefly?”
|
| 100 |
-
|
| 101 |
-
End with an open tone:
|
| 102 |
-
|
| 103 |
-
“How does that sound to you?”
|
| 104 |
-
|
| 105 |
-
“Would you like to try that and let me know how it goes?”
|
| 106 |
-
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hf_spaces.py
DELETED
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Hugging Face Spaces GPU configuration
|
| 3 |
-
"""
|
| 4 |
-
import os
|
| 5 |
-
import torch
|
| 6 |
-
|
| 7 |
-
# Set environment variables for Hugging Face Spaces
|
| 8 |
-
os.environ.update({
|
| 9 |
-
'TRANSFORMERS_CACHE': '/tmp/huggingface',
|
| 10 |
-
'HF_HOME': '/tmp/huggingface',
|
| 11 |
-
'TOKENIZERS_PARALLELISM': 'false',
|
| 12 |
-
'TRANSFORMERS_VERBOSITY': 'error',
|
| 13 |
-
'BITSANDBYTES_NOWELCOME': '1',
|
| 14 |
-
'PYTORCH_CUDA_ALLOC_CONF': 'max_split_size_mb:128' # Memory efficient attention
|
| 15 |
-
})
|
| 16 |
-
|
| 17 |
-
# Create necessary directories
|
| 18 |
-
for directory in ['/tmp/huggingface', '/tmp/vector_db', '/tmp/session_data', '/tmp/session_summaries']:
|
| 19 |
-
os.makedirs(directory, exist_ok=True)
|
| 20 |
-
|
| 21 |
-
# Hugging Face Spaces specific settings
|
| 22 |
-
SPACES_CONFIG = {
|
| 23 |
-
'port': 7860, # Default port for Hugging Face Spaces
|
| 24 |
-
'host': '0.0.0.0',
|
| 25 |
-
'workers': 1, # Single worker for Hugging Face Spaces
|
| 26 |
-
'timeout': 180, # Increased timeout for model loading
|
| 27 |
-
'log_level': 'info'
|
| 28 |
-
}
|
| 29 |
-
|
| 30 |
-
# Model settings optimized for T4 GPU
|
| 31 |
-
MODEL_CONFIG = {
|
| 32 |
-
'model_name': 'meta-llama/Llama-3.2-3B-Instruct',
|
| 33 |
-
'peft_model_path': 'nada013/mental-health-chatbot',
|
| 34 |
-
'use_4bit': True,
|
| 35 |
-
'device': 'cuda' if torch.cuda.is_available() else 'cpu', # Use GPU if available
|
| 36 |
-
'batch_size': 4, # Optimized for T4 GPU
|
| 37 |
-
'max_memory': {0: "14GB"} if torch.cuda.is_available() else None # T4 GPU memory limit
|
| 38 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
|
@@ -1,28 +0,0 @@
|
|
| 1 |
-
transformers>=4.49.0
|
| 2 |
-
torch>=2.2.0
|
| 3 |
-
sentence-transformers>=3.4.1
|
| 4 |
-
langchain>=0.3.21
|
| 5 |
-
langchain-community>=0.3.20
|
| 6 |
-
langchain-core>=0.3.47
|
| 7 |
-
langchain-huggingface>=0.1.2
|
| 8 |
-
pydantic>=2.10.6
|
| 9 |
-
pydantic-settings>=2.8.1
|
| 10 |
-
fastapi>=0.115.11
|
| 11 |
-
uvicorn>=0.34.0
|
| 12 |
-
python-dotenv>=1.0.1
|
| 13 |
-
pytest>=7.4.0
|
| 14 |
-
gunicorn>=21.2.0
|
| 15 |
-
accelerate>=1.5.2
|
| 16 |
-
bitsandbytes>=0.45.3
|
| 17 |
-
chromadb>=0.6.3
|
| 18 |
-
datasets>=3.4.1
|
| 19 |
-
faiss-cpu>=1.10.0
|
| 20 |
-
huggingface-hub>=0.29.3
|
| 21 |
-
peft>=0.15.1
|
| 22 |
-
safetensors>=0.5.3
|
| 23 |
-
tokenizers>=0.21.1
|
| 24 |
-
tiktoken>=0.9.0
|
| 25 |
-
starlette>=0.46.1
|
| 26 |
-
websockets>=15.0.1
|
| 27 |
-
python-multipart>=0.0.6
|
| 28 |
-
json5>=0.9.14
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
start.sh
DELETED
|
@@ -1,7 +0,0 @@
|
|
| 1 |
-
#!/bin/bash
|
| 2 |
-
echo "Starting Uvicorn..."
|
| 3 |
-
echo "Current directory: $(pwd)"
|
| 4 |
-
echo "Listing files:"
|
| 5 |
-
ls -la
|
| 6 |
-
echo "Starting app..."
|
| 7 |
-
exec uvicorn app:app --host 0.0.0.0 --port 7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|