chat-gpu / chatbot.py
Nada
yy
5eea25b
import os
import logging
import json
import torch
import re
from typing import List, Dict, Any, Optional, Union
from datetime import datetime
from pydantic import BaseModel, Field
import tempfile
# Model imports
from transformers import (
pipeline,
AutoTokenizer,
AutoModelForCausalLM,
BitsAndBytesConfig
)
from peft import PeftModel, PeftConfig
from sentence_transformers import SentenceTransformer
# LangChain imports
from langchain.llms import HuggingFacePipeline
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
# Import FlowManager
from conversation_flow import FlowManager
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
# Suppress warnings
import warnings
warnings.filterwarnings('ignore', category=UserWarning)
# Set up cache directories
def setup_cache_dirs():
# Check if running in Hugging Face Spaces
is_spaces = os.environ.get('SPACE_ID') is not None
if is_spaces:
# Use /tmp for Hugging Face Spaces with proper permissions
cache_dir = '/tmp/huggingface'
os.environ.update({
'TRANSFORMERS_CACHE': cache_dir,
'HF_HOME': cache_dir,
'TOKENIZERS_PARALLELISM': 'false',
'TRANSFORMERS_VERBOSITY': 'error',
'BITSANDBYTES_NOWELCOME': '1',
'HF_DATASETS_CACHE': cache_dir,
'HF_METRICS_CACHE': cache_dir,
'HF_MODULES_CACHE': cache_dir,
'HUGGING_FACE_HUB_TOKEN': os.environ.get('HF_TOKEN', ''),
'HF_TOKEN': os.environ.get('HF_TOKEN', '')
})
else:
# Use default cache for local development
cache_dir = os.path.expanduser('~/.cache/huggingface')
os.environ.update({
'TOKENIZERS_PARALLELISM': 'false',
'TRANSFORMERS_VERBOSITY': 'error',
'BITSANDBYTES_NOWELCOME': '1'
})
# Create cache directory if it doesn't exist
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
# Set up cache directories
CACHE_DIR = setup_cache_dirs()
# Define base directory and paths
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
MODELS_DIR = os.path.join(BASE_DIR, "models")
VECTOR_DB_PATH = os.path.join(BASE_DIR, "vector_db")
SESSION_DATA_PATH = os.path.join(BASE_DIR, "session_data")
SUMMARIES_DIR = os.path.join(BASE_DIR, "session_summaries")
# Create necessary directories
for directory in [MODELS_DIR, VECTOR_DB_PATH, SESSION_DATA_PATH, SUMMARIES_DIR]:
os.makedirs(directory, exist_ok=True)
# Pydantic models
class Message(BaseModel):
text: str = Field(..., description="The content of the message")
timestamp: str = Field(None, description="ISO format timestamp of the message")
role: str = Field("user", description="The role of the message sender (user or assistant)")
class SessionSummary(BaseModel):
session_id: str = Field(
...,
description="Unique identifier for the session",
examples=["user_789_session_20240314"]
)
user_id: str = Field(
...,
description="Identifier of the user",
examples=["user_123"])
start_time: str = Field(..., description="ISO format start time of the session"
)
end_time: str = Field(
...,
description="ISO format end time of the session"
)
message_count: int = Field(
...,
description="Total number of messages in the session"
)
duration_minutes: float = Field(
...,
description="Duration of the session in minutes"
)
primary_emotions: List[str] = Field(
...,
min_items=1,
description="List of primary emotions detected",
examples=[
["anxiety", "stress"],
["joy", "excitement"],
["sadness", "loneliness"]
]
)
emotion_progression: List[Dict[str, float]] = Field(
...,
description="Progression of emotions throughout the session",
examples=[
[
{"anxiety": 0.8, "stress": 0.6},
{"calm": 0.7, "anxiety": 0.3},
{"joy": 0.9, "calm": 0.8}
]
]
)
summary_text: str = Field(
...,
description="Text summary of the session",
examples=[
"The session focused on managing work-related stress and developing coping strategies. The client showed improvement in recognizing stress triggers and implementing relaxation techniques.",
"Discussion centered around relationship challenges and self-esteem issues. The client expressed willingness to try new communication strategies."
]
)
recommendations: Optional[List[str]] = Field(
None,
description="Optional recommendations based on the session"
)
class Conversation(BaseModel):
user_id: str = Field(
...,
description="Identifier of the user",
examples=["user_123"]
)
session_id: str = Field(
"",
description="Identifier of the current session"
)
start_time: str = Field(
"",
description="ISO format start time of the conversation"
)
messages: List[Message] = Field(
[],
description="List of messages in the conversation",
examples=[
[
Message(text="I'm feeling anxious", role="user"),
Message(text="I understand you're feeling anxious. Can you tell me more about what's causing this?", role="assistant")
]
]
)
emotion_history: List[Dict[str, float]] = Field(
[],
description="History of emotions detected",
examples=[
[
{"anxiety": 0.8, "stress": 0.6},
{"calm": 0.7, "anxiety": 0.3}
]
]
)
context: Dict[str, Any] = Field(
{},
description="Additional context for the conversation",
examples=[
{
"last_emotion": "anxiety",
"conversation_topic": "work stress",
"previous_sessions": 3
}
]
)
is_active: bool = Field(
True,
description="Whether the conversation is currently active",
examples=[True, False]
)
class MentalHealthChatbot:
def __init__(
self,
model_name: str = "meta-llama/Llama-3.2-3B-Instruct",
peft_model_path: str = "nada013/mental-health-chatbot",
therapy_guidelines_path: str = None,
use_4bit: bool = True,
device: str = None,
max_response_length: int = 500, # Maximum characters in response
max_response_words: int = 100, # Maximum words in response
min_response_words: int = 10, # Minimum words in response
max_consecutive_responses: int = 3 # Max consecutive responses without user input
):
# Set device (cuda if available, otherwise cpu)
if device is None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = device
# Set memory optimization for T4
if self.device == "cuda":
torch.cuda.empty_cache() # Clear GPU cache
# Set smaller batch size for T4
self.batch_size = 4
# Enable memory efficient attention
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
else:
self.batch_size = 8
logger.info(f"Using device: {self.device}")
# Set response limits
self.max_response_length = max_response_length
self.max_response_words = max_response_words
self.min_response_words = min_response_words
self.max_consecutive_responses = max_consecutive_responses
self.consecutive_response_count = 0 # Track consecutive responses
# Initialize models
self.peft_model_path = peft_model_path
# Initialize emotion detection model
logger.info("Loading emotion detection model")
self.emotion_classifier = self._load_emotion_model()
# Initialize LLAMA model
logger.info(f"Loading LLAMA model: {model_name}")
self.llama_model, self.llama_tokenizer, self.llm = self._initialize_llm(model_name, use_4bit)
# Initialize summary model
logger.info("Loading summary model")
self.summary_model = pipeline(
"summarization",
model="philschmid/bart-large-cnn-samsum",
device=0 if self.device == "cuda" else -1,
model_kwargs={
"cache_dir": CACHE_DIR,
"torch_dtype": torch.float16,
"max_memory": {0: "2GB"} if self.device == "cuda" else None
}
)
logger.info("Summary model loaded successfully")
# Initialize FlowManager
logger.info("Initializing FlowManager")
self.flow_manager = FlowManager(self.llm)
# Setup conversation memory with LangChain
self.memory = ConversationBufferMemory(
return_messages=True,
input_key="input"
)
# Create conversation prompt template
self.prompt_template = PromptTemplate(
input_variables=["history", "input", "past_context", "emotion_context", "guidelines"],
template="""You are a supportive and empathetic mental health conversational AI. Your role is to provide therapeutic support while maintaining professional boundaries.
Previous conversation:
{history}
EMOTIONAL CONTEXT:
{emotion_context}
Past context: {past_context}
Relevant therapeutic guidelines:
{guidelines}
Current message: {input}
Provide a supportive response that:
1. Validates the user's feelings without using casual greetings
2. Asks relevant follow-up questions
3. Maintains a conversational tone , professional and empathetic tone
4. Focuses on understanding and support
5. Avoids repeating previous responses
Response:"""
)
# Create the conversation chain
self.conversation = LLMChain(
llm=self.llm,
prompt=self.prompt_template,
memory=self.memory,
verbose=False
)
# Setup embeddings for vector search
self.embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
# Setup vector database for retrieving relevant past conversations
if therapy_guidelines_path and os.path.exists(therapy_guidelines_path):
self.setup_vector_db(therapy_guidelines_path)
else:
self.setup_vector_db(None)
# Initialize conversation storage
self.conversations = {}
# Load existing session summaries
self.session_summaries = {}
self._load_existing_summaries()
logger.info("All models and components initialized successfully")
def _load_emotion_model(self):
try:
# Load emotion model directly from Hugging Face
return pipeline(
"text-classification",
model="SamLowe/roberta-base-go_emotions",
top_k=None,
device_map="auto" if torch.cuda.is_available() else None,
model_kwargs={
"cache_dir": CACHE_DIR,
"torch_dtype": torch.float16, # Use float16
"max_memory": {0: "2GB"} if torch.cuda.is_available() else None # Limit memory usage
},
)
except Exception as e:
logger.error(f"Error loading emotion model: {e}")
# Fallback to a simpler model
try:
return pipeline(
"text-classification",
model="j-hartmann/emotion-english-distilroberta-base",
return_all_scores=True,
device_map="auto" if torch.cuda.is_available() else None,
model_kwargs={
"cache_dir": CACHE_DIR,
"torch_dtype": torch.float16,
"max_memory": {0: "2GB"} if torch.cuda.is_available() else None
},
)
except Exception as e:
logger.error(f"Error loading fallback emotion model: {e}")
# Return a simple pipeline that always returns neutral
return lambda text: [{"label": "neutral", "score": 1.0}]
def _initialize_llm(self, model_name: str, use_4bit: bool):
try:
# Configure quantization only if CUDA is available
if use_4bit and torch.cuda.is_available():
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
# Set max memory for T4 GPU
max_memory = {0: "14GB"} # Leave 2GB buffer for other operations
else:
quantization_config = None
max_memory = None
logger.info("CUDA not available, running without quantization")
# Load base model
logger.info(f"Loading base model: {model_name}")
base_model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=quantization_config,
device_map="auto",
max_memory=max_memory,
trust_remote_code=True,
cache_dir=CACHE_DIR,
use_auth_token=os.environ.get('HF_TOKEN'),
torch_dtype=torch.float16 # Use float16 for better memory efficiency
)
# Load tokenizer
logger.info("Loading tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
model_name,
cache_dir=CACHE_DIR,
use_auth_token=os.environ.get('HF_TOKEN') # Add auth token for gated models
)
tokenizer.pad_token = tokenizer.eos_token
# Load PEFT model
logger.info(f"Loading PEFT model from {self.peft_model_path}")
model = PeftModel.from_pretrained(
base_model,
self.peft_model_path,
cache_dir=CACHE_DIR,
use_auth_token=os.environ.get('HF_TOKEN') # Add auth token for gated models
)
logger.info("Successfully loaded PEFT model")
# Create text generation pipeline
text_generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=512,
temperature=0.7,
top_p=0.95,
repetition_penalty=1.1,
do_sample=True,
device_map="auto" if torch.cuda.is_available() else None
)
# Create LangChain wrapper
llm = HuggingFacePipeline(pipeline=text_generator)
return model, tokenizer, llm
except Exception as e:
logger.error(f"Error initializing LLM: {str(e)}")
raise
def setup_vector_db(self, guidelines_path: str = None):
logger.info("Setting up FAISS vector database")
# Check if vector DB exists
vector_db_exists = os.path.exists(os.path.join(VECTOR_DB_PATH, "index.faiss"))
if not vector_db_exists:
# Load therapy guidelines
if guidelines_path and os.path.exists(guidelines_path):
loader = TextLoader(guidelines_path)
documents = loader.load()
# Split documents into chunks with better overlap for context
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500, # Smaller chunks for more precise retrieval
chunk_overlap=100,
separators=["\n\n", "\n", " ", ""]
)
chunks = text_splitter.split_documents(documents)
# Create and save the vector store
self.vector_db = FAISS.from_documents(chunks, self.embeddings)
self.vector_db.save_local(VECTOR_DB_PATH)
logger.info("Successfully loaded and indexed therapy guidelines")
else:
# Initialize with empty vector DB
self.vector_db = FAISS.from_texts(["Initial empty vector store"], self.embeddings)
self.vector_db.save_local(VECTOR_DB_PATH)
logger.warning("No guidelines file provided, using empty vector store")
else:
# Load existing vector DB
self.vector_db = FAISS.load_local(VECTOR_DB_PATH, self.embeddings, allow_dangerous_deserialization=True)
logger.info("Loaded existing vector database")
def _load_existing_summaries(self):
if not os.path.exists(SUMMARIES_DIR):
return
for filename in os.listdir(SUMMARIES_DIR):
if filename.endswith('.json'):
try:
with open(os.path.join(SUMMARIES_DIR, filename), 'r') as f:
summary_data = json.load(f)
session_id = summary_data.get('session_id')
if session_id:
self.session_summaries[session_id] = summary_data
except Exception as e:
logger.warning(f"Failed to load summary from {filename}: {e}")
def detect_emotion(self, text: str) -> Dict[str, float]:
try:
results = self.emotion_classifier(text)[0]
return {result['label']: result['score'] for result in results}
except Exception as e:
logger.error(f"Error detecting emotions: {e}")
return {"neutral": 1.0}
def _validate_and_limit_response(self, response: str, user_message: str) -> str:
"""
Validate and limit response length and content.
Returns a properly limited response.
"""
if not response or not response.strip():
return "I understand. Could you tell me more about that?"
# Clean the response
response = response.strip()
# Remove any LLM commentary or instructions
response = re.sub(r"(Your response|This response|Response:|Note:).*", "", response, flags=re.IGNORECASE).strip()
response = re.sub(r"---.*", "", response).strip()
# Remove casual greetings
response = re.sub(r'^(Hey|Hi|Hello|Hi there|Hey there),\s*', '', response)
# Count words and characters
words = response.split()
word_count = len(words)
char_count = len(response)
# Check if response is too short
if word_count < self.min_response_words:
logger.info(f"Response too short ({word_count} words), adding follow-up question")
if not response.endswith('?'):
response += " Could you tell me more about that?"
# Check if response is too long
if char_count > self.max_response_length or word_count > self.max_response_words:
logger.info(f"Response too long ({char_count} chars, {word_count} words), truncating")
# Try to find a good breaking point
if word_count > self.max_response_words:
# Truncate to max words
truncated_words = words[:self.max_response_words]
response = ' '.join(truncated_words)
# Try to end at a sentence
last_period = response.rfind('.')
last_question = response.rfind('?')
last_exclamation = response.rfind('!')
end_point = max(last_period, last_question, last_exclamation)
if end_point > len(response) * 0.7: # If we can end at a sentence within 70% of the limit
response = response[:end_point + 1]
else:
# Add ellipsis if we can't end naturally
response = response.rstrip() + "..."
elif char_count > self.max_response_length:
# Truncate to max characters
response = response[:self.max_response_length]
# Try to end at a word boundary
last_space = response.rfind(' ')
if last_space > len(response) * 0.8: # If we can end at a word within 80% of the limit
response = response[:last_space]
else:
# Add ellipsis
response = response.rstrip() + "..."
# Check for repetitive content
if self._is_repetitive(response, user_message):
logger.info("Response detected as repetitive, generating alternative")
return "I hear what you're saying. Could you help me understand this better?"
# Ensure response ends properly
if not response.endswith(('.', '!', '?')):
response = response.rstrip() + '.'
return response.strip()
def _is_repetitive(self, response: str, user_message: str) -> bool:
"""
Check if response is repetitive or too similar to user message.
"""
# Convert to lowercase for comparison
response_lower = response.lower()
user_lower = user_message.lower()
# Check if response contains too much of the user's message
user_words = set(user_lower.split())
response_words = set(response_lower.split())
if len(user_words) > 3: # Only check if user message has enough words
common_words = user_words.intersection(response_words)
if len(common_words) / len(user_words) > 0.6: # If more than 60% of user words are in response
return True
# Check for repetitive phrases
repetitive_phrases = [
"i understand", "i hear you", "that sounds", "i can see",
"thank you for sharing", "i appreciate", "that must be"
]
phrase_count = sum(1 for phrase in repetitive_phrases if phrase in response_lower)
if phrase_count > 2: # If more than 2 repetitive phrases
return True
return False
def retrieve_relevant_context(self, query: str, k: int = 3) -> str:
# Retrieve relevant past conversations using vector similarity
if not hasattr(self, 'vector_db'):
return ""
try:
# Retrieve similar documents from vector DB
docs = self.vector_db.similarity_search(query, k=k)
# Combine the content of retrieved documents
relevant_context = "\n".join([doc.page_content for doc in docs])
return relevant_context
except Exception as e:
logger.error(f"Error retrieving context: {e}")
return ""
def retrieve_relevant_guidelines(self, query: str, emotion_context: str) -> str:
if not hasattr(self, 'vector_db'):
return ""
try:
# Combine query and emotion context for better relevance
search_query = f"{query} {emotion_context}"
# Retrieve similar documents from vector DB
docs = self.vector_db.similarity_search(search_query, k=2)
# Combine the content of retrieved documents
relevant_guidelines = "\n".join([doc.page_content for doc in docs])
return relevant_guidelines
except Exception as e:
logger.error(f"Error retrieving guidelines: {e}")
return ""
def generate_response(self, prompt: str, emotion_data: Dict[str, float], conversation_history: List[Dict]) -> str:
# Get primary and secondary emotions
sorted_emotions = sorted(emotion_data.items(), key=lambda x: x[1], reverse=True)
primary_emotion = sorted_emotions[0][0] if sorted_emotions else "neutral"
# Get secondary emotions (if any)
secondary_emotions = []
for emotion, score in sorted_emotions[1:3]: # Get 2nd and 3rd strongest emotions
if score > 0.2: # Only include if reasonably strong
secondary_emotions.append(emotion)
# Create emotion context string
emotion_context = f"User is primarily feeling {primary_emotion}"
if secondary_emotions:
emotion_context += f" with elements of {' and '.join(secondary_emotions)}"
emotion_context += "."
# Retrieve relevant guidelines
guidelines = self.retrieve_relevant_guidelines(prompt, emotion_context)
# Retrieve past context
past_context = self.retrieve_relevant_context(prompt)
# Generate response using the conversation chain
response = self.conversation.predict(
input=prompt,
past_context=past_context,
emotion_context=emotion_context,
guidelines=guidelines
)
# Validate and limit the response
response = self._validate_and_limit_response(response, prompt)
# Ensure the response is unique and not repeating previous messages
if len(conversation_history) > 0:
last_responses = [msg["text"] for msg in conversation_history[-4:] if msg["role"] == "assistant"]
if response in last_responses:
logger.info("Response detected as duplicate, generating alternative")
# Generate a new response with a different angle
alternative_response = self.conversation.predict(
input=f"{prompt} (Please provide a different perspective)",
past_context=past_context,
emotion_context=emotion_context,
guidelines=guidelines
)
alternative_response = self._validate_and_limit_response(alternative_response, prompt)
response = alternative_response
return response
def generate_session_summary(
self,
flow_manager_session: Dict = None
) -> Dict:
if not flow_manager_session:
return {
"session_id": "",
"user_id": "",
"start_time": "",
"end_time": datetime.now().isoformat(),
"duration_minutes": 0,
"current_phase": "unknown",
"primary_emotions": [],
"emotion_progression": [],
"summary": "Error: No session data provided",
"recommendations": ["Unable to generate recommendations"],
"session_characteristics": {}
}
# Get session data from FlowManager
session_id = flow_manager_session.get('session_id', '')
user_id = flow_manager_session.get('user_id', '')
current_phase = flow_manager_session.get('current_phase')
if current_phase:
# Convert ConversationPhase to dict
current_phase = {
'name': current_phase.name,
'description': current_phase.description,
'goals': current_phase.goals,
'started_at': current_phase.started_at,
'ended_at': current_phase.ended_at,
'completion_metrics': current_phase.completion_metrics
}
session_start = flow_manager_session.get('started_at')
if isinstance(session_start, str):
session_start = datetime.fromisoformat(session_start)
session_duration = (datetime.now() - session_start).total_seconds() / 60 if session_start else 0
# Get emotion progression and primary emotions
emotion_progression = flow_manager_session.get('emotion_progression', [])
emotion_history = flow_manager_session.get('emotion_history', [])
# Extract primary emotions from emotion history
primary_emotions = []
if emotion_history:
# Get the most frequent emotions
emotion_counts = {}
for entry in emotion_history:
emotions = entry.get('emotions', {})
if isinstance(emotions, dict):
primary = max(emotions.items(), key=lambda x: x[1])[0]
emotion_counts[primary] = emotion_counts.get(primary, 0) + 1
# sort by frequency and get top 3
primary_emotions = sorted(emotion_counts.items(), key=lambda x: x[1], reverse=True)[:3]
primary_emotions = [emotion for emotion, _ in primary_emotions]
# get session
session_characteristics = flow_manager_session.get('llm_context', {}).get('session_characteristics', {})
# prepare the text for summarization
summary_text = f"""
Session Overview:
- Session ID: {session_id}
- User ID: {user_id}
- Phase: {current_phase.get('name', 'unknown') if current_phase else 'unknown'}
- Duration: {session_duration:.1f} minutes
Emotional Analysis:
- Primary Emotions: {', '.join(primary_emotions) if primary_emotions else 'No primary emotions detected'}
- Emotion Progression: {', '.join(emotion_progression) if emotion_progression else 'No significant emotion changes noted'}
Session Characteristics:
- Therapeutic Alliance: {session_characteristics.get('alliance_strength', 'N/A')}
- Engagement Level: {session_characteristics.get('engagement_level', 'N/A')}
- Emotional Pattern: {session_characteristics.get('emotional_pattern', 'N/A')}
- Cognitive Pattern: {session_characteristics.get('cognitive_pattern', 'N/A')}
Key Observations:
- The session focused on {current_phase.get('description', 'general discussion') if current_phase else 'general discussion'}
- Main emotional themes: {', '.join(primary_emotions) if primary_emotions else 'not identified'}
- Session progress: {session_characteristics.get('progress_quality', 'N/A')}
"""
# Generate summary using BART
summary = self.summary_model(
summary_text,
max_length=150,
min_length=50,
do_sample=False
)[0]['summary_text']
# Generate recommendations using Llama
recommendations_prompt = f"""
Based on the following session summary, provide 2-3 specific recommendations for follow-up:
{summary}
Session Characteristics:
- Therapeutic Alliance: {session_characteristics.get('alliance_strength', 'N/A')}
- Engagement Level: {session_characteristics.get('engagement_level', 'N/A')}
- Emotional Pattern: {session_characteristics.get('emotional_pattern', 'N/A')}
- Cognitive Pattern: {session_characteristics.get('cognitive_pattern', 'N/A')}
Recommendations should be:
1. Actionable and specific
2. Based on the session content
3. Focused on next steps
"""
recommendations = self.llm.invoke(recommendations_prompt)
recommendations = recommendations.split('\n')
recommendations = [r.strip() for r in recommendations if r.strip()]
recommendations = [r for r in recommendations if not r.startswith(('Based on', 'Session', 'Recommendations'))]
return {
"session_id": session_id,
"user_id": user_id,
"start_time": session_start.isoformat() if isinstance(session_start, datetime) else str(session_start),
"end_time": datetime.now().isoformat(),
"duration_minutes": session_duration,
"current_phase": current_phase.get('name', 'unknown') if current_phase else 'unknown',
"primary_emotions": primary_emotions,
"emotion_progression": emotion_progression,
"summary": summary,
"recommendations": recommendations,
"session_characteristics": session_characteristics
}
def start_session(self, user_id: str) -> tuple[str, str]:
# Generate session id
session_id = f"{user_id}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
# Initialize FlowManager session
self.flow_manager.initialize_session(user_id)
# Create a new conversation
self.conversations[user_id] = Conversation(
user_id=user_id,
session_id=session_id,
start_time=datetime.now().isoformat(),
is_active=True
)
# Clear conversation memory
self.memory.clear()
# Generate initial greeting and question
initial_message = """Hello! I'm here to support you today. How have you been feeling lately?"""
# Add the initial message to conversation history
assistant_message = Message(
text=initial_message,
timestamp=datetime.now().isoformat(),
role="assistant"
)
self.conversations[user_id].messages.append(assistant_message)
logger.info(f"Session started for user {user_id}")
return session_id, initial_message
def end_session(
self,
user_id: str,
flow_manager: Optional[Any] = None
) -> Optional[Dict]:
if user_id not in self.conversations or not self.conversations[user_id].is_active:
return None
conversation = self.conversations[user_id]
conversation.is_active = False
# Get FlowManager session data
flow_manager_session = self.flow_manager.user_sessions.get(user_id)
# Generate session summary
try:
session_summary = self.generate_session_summary(flow_manager_session)
# Save summary to disk
summary_path = os.path.join(SUMMARIES_DIR, f"{session_summary['session_id']}.json")
with open(summary_path, 'w') as f:
json.dump(session_summary, f, indent=2)
# Store in memory
self.session_summaries[session_summary['session_id']] = session_summary
# Clear conversation memory
self.memory.clear()
return session_summary
except Exception as e:
logger.error(f"Failed to generate session summary: {e}")
return None
def process_message(self, user_id: str, message: str) -> str:
# Check for risk flags
risk_keywords = ["suicide", "kill myself", "end my life", "self-harm", "hurt myself"]
risk_detected = any(keyword in message.lower() for keyword in risk_keywords)
# Create or get conversation
if user_id not in self.conversations or not self.conversations[user_id].is_active:
self.start_session(user_id)
conversation = self.conversations[user_id]
# user message -> conversation history
new_message = Message(
text=message,
timestamp=datetime.now().isoformat(),
role="user"
)
conversation.messages.append(new_message)
# For crisis
if risk_detected:
logger.warning(f"Risk flag detected in session {user_id}")
crisis_response = """ I'm really sorry you're feeling this way — it sounds incredibly heavy, and I want you to know that you're not alone.
You don't have to face this by yourself. Our app has licensed mental health professionals who are ready to support you. I can connect you right now if you'd like.
In the meantime, I'm here to listen and talk with you. You can also do grounding exercises or calming techniques with me if you prefer. Just say "help me calm down" or "I need a break."
Would you like to connect with a professional now, or would you prefer to keep talking with me for a bit? Either way, I'm here for you."""
# assistant response -> conversation history
assistant_message = Message(
text=crisis_response,
timestamp=datetime.now().isoformat(),
role="assistant"
)
conversation.messages.append(assistant_message)
return crisis_response
# Reset consecutive response counter when user sends a message
self.consecutive_response_count = 0
# Detect emotions
emotions = self.detect_emotion(message)
conversation.emotion_history.append(emotions)
# Process message with FlowManager
flow_context = self.flow_manager.process_message(user_id, message, emotions)
# Format conversation history
conversation_history = []
for msg in conversation.messages:
conversation_history.append({
"text": msg.text,
"timestamp": msg.timestamp,
"role": msg.role
})
# Check rate limiting for consecutive responses
if self.consecutive_response_count >= self.max_consecutive_responses:
logger.warning(f"Rate limit reached for user {user_id}, sending brief response")
response_text = "I'm here to listen. Take your time to share what's on your mind."
self.consecutive_response_count = 0 # Reset counter
else:
# Generate response
response_text = self.generate_response(message, emotions, conversation_history)
# Increment consecutive response counter
self.consecutive_response_count += 1
# Generate a follow-up question if the response is too short and we haven't hit limits
if (len(response_text.split()) < self.min_response_words and
not response_text.endswith('?') and
self.consecutive_response_count < self.max_consecutive_responses):
follow_up_prompt = f"""
Recent conversation:
{chr(10).join([f"{msg['role']}: {msg['text']}" for msg in conversation_history[-3:]])}
Now, write a single empathetic and open-ended question to encourage the user to share more.
Respond with just the question, no explanation.
"""
follow_up = self.llm.invoke(follow_up_prompt).strip()
# Clean and extract only the actual question (first sentence ending with '?')
matches = re.findall(r'([^\n.?!]*\?)', follow_up)
if matches:
question = matches[0].strip()
else:
question = follow_up.strip().split('\n')[0]
# Validate the follow-up question
question = self._validate_and_limit_response(question, message)
# If the main response is very short, return just the question
if len(response_text.split()) < 5:
response_text = question
else:
response_text = f"{response_text}\n\n{question}"
# assistant response -> conversation history
assistant_message = Message(
text=response_text,
timestamp=datetime.now().isoformat(),
role="assistant"
)
conversation.messages.append(assistant_message)
# Update context
conversation.context.update({
"last_emotion": emotions,
"last_interaction": datetime.now().isoformat(),
"flow_context": flow_context
})
# Store this interaction in vector database
current_interaction = f"User: {message}\nChatbot: {response_text}"
self.vector_db.add_texts([current_interaction])
self.vector_db.save_local(VECTOR_DB_PATH)
return response_text
def get_session_summary(self, session_id: str) -> Optional[Dict[str, Any]]:
return self.session_summaries.get(session_id)
def get_user_replies(self, user_id: str) -> List[Dict[str, Any]]:
if user_id not in self.conversations:
return []
conversation = self.conversations[user_id]
user_replies = []
for message in conversation.messages:
if message.role == "user":
user_replies.append({
"text": message.text,
"timestamp": message.timestamp,
"session_id": conversation.session_id
})
return user_replies
if __name__ == "__main__":
pass