DocUA's picture
feat: Complete prompt optimization system implementation
24214fc
"""
Context-Aware Classifier for enhanced spiritual monitor with conversation context awareness.
This module implements enhanced classification logic that considers conversation history,
detects defensive patterns, and provides contextually relevant follow-up questions.
"""
import re
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
from .data_models import ConversationHistory, Message, Classification, IndicatorCategory
class ContextAwareClassifier:
"""
Enhanced spiritual monitor with conversation context awareness.
Implements contextual classification that considers:
- Conversation history and previous distress indicators
- Defensive response patterns
- Medical context integration
- Contextual indicator weighting
"""
def __init__(self):
"""Initialize the context-aware classifier."""
self.defensive_patterns = [
r'\b(i\'?m\s+)?fine\b',
r'\b(everything\'?s?\s+)?okay\b',
r'\bno\s+problem\b',
r'\bno\s+problems?\s+here\b',
r'\ball\s+good\b',
r'\bdon\'?t\s+need\s+help\b',
r'\bnothing\'?s?\s+wrong\b'
]
self.distress_indicators = [
'stress', 'anxiety', 'worried', 'depressed', 'sad', 'overwhelmed',
'hopeless', 'lonely', 'afraid', 'angry', 'frustrated', 'lost',
'confused', 'empty', 'numb', 'tired', 'exhausted'
]
self.medical_context_terms = [
'medication', 'treatment', 'therapy', 'counseling', 'diagnosis',
'condition', 'disorder', 'symptoms', 'doctor', 'psychiatrist'
]
def classify_with_context(self, message: str, history: ConversationHistory) -> Classification:
"""
Classify a message considering conversation history and context.
Args:
message: Current patient message to classify
history: Conversation history with previous messages and context
Returns:
Classification with category, confidence, and reasoning
"""
# Base classification without context
base_category, base_confidence = self._classify_message_basic(message)
# Analyze historical context
historical_distress = self._analyze_historical_distress(history)
defensive_pattern = self.detect_defensive_responses(message, history)
medical_context_weight = self._evaluate_medical_context(message, history)
# Adjust classification based on context
final_category = base_category
final_confidence = base_confidence
context_factors = []
# Historical distress with dismissive current message
if historical_distress['has_distress'] and self._is_dismissive_message(message):
if base_category == 'GREEN':
final_category = 'YELLOW'
final_confidence = max(0.7, base_confidence)
context_factors.append('historical_distress_with_dismissive_response')
# Defensive patterns detected
if defensive_pattern:
if final_category == 'GREEN':
final_category = 'YELLOW'
final_confidence = max(0.6, final_confidence)
context_factors.append('defensive_response_pattern')
# Medical context considerations
if medical_context_weight > 0.3: # Lower threshold for medical context
# Check for emotional struggle language with medical context
struggle_terms = ['hard', 'difficult', 'trying', 'struggling', 'challenging']
if final_category == 'GREEN' and any(term in message.lower() for term in struggle_terms):
final_category = 'YELLOW'
final_confidence = max(0.6, final_confidence)
context_factors.append('medical_context_relevant')
# Build reasoning
reasoning = self._build_contextual_reasoning(
message, base_category, final_category, historical_distress,
defensive_pattern, medical_context_weight, context_factors
)
return Classification(
category=final_category,
confidence=final_confidence,
reasoning=reasoning,
indicators_found=self._extract_indicators(message),
context_factors=context_factors
)
def detect_defensive_responses(self, message: str, history: ConversationHistory) -> bool:
"""
Detect defensive response patterns that contradict conversation history.
Args:
message: Current message to analyze
history: Conversation history
Returns:
True if defensive pattern is detected
"""
# Check if message matches defensive patterns
message_lower = message.lower()
has_defensive_language = any(
re.search(pattern, message_lower) for pattern in self.defensive_patterns
)
if not has_defensive_language:
return False
# Check if there's sufficient distress history to contradict
distress_count = len([
msg for msg in history.messages
if msg.classification in ['YELLOW', 'RED']
])
# Also check distress indicators in history
historical_distress_indicators = len(history.distress_indicators_found)
# Defensive if dismissive language with significant distress history
return distress_count >= 2 or historical_distress_indicators >= 3
def evaluate_contextual_indicators(self, indicators: List[str], context: Dict[str, Any]) -> float:
"""
Evaluate indicator weights based on conversation context.
Args:
indicators: List of indicator names
context: Context information including historical mentions
Returns:
Contextual weight for the indicators
"""
if not indicators:
return 0.0
base_weight = 0.5 # Base weight for any indicator
historical_mentions = context.get('historical_mentions', 0)
recent_mention = context.get('recent_mention', False)
conversation_length = context.get('conversation_length', 1)
# Increase weight for repeated indicators
repetition_bonus = min(0.3, historical_mentions * 0.1)
# Bonus for recent mentions
recency_bonus = 0.2 if recent_mention else 0.0
# Normalize by conversation length to avoid inflation, but maintain minimum thresholds
normalization_factor = min(1.0, 3.0 / max(1, conversation_length))
final_weight = (base_weight + repetition_bonus + recency_bonus) * normalization_factor
# Ensure minimum weights for important patterns
if historical_mentions >= 2:
final_weight = max(final_weight, 0.5)
if recent_mention and historical_mentions > 0:
final_weight = max(final_weight, 0.6)
return min(1.0, final_weight)
def generate_contextual_follow_up(self, message: str, history: ConversationHistory,
classification: str) -> str:
"""
Generate follow-up questions that reference conversation context.
Args:
message: Current message
history: Conversation history
classification: Current classification
Returns:
Contextually appropriate follow-up question
"""
# Extract previous topics mentioned
previous_topics = self._extract_conversation_topics(history)
# Base follow-up questions
base_questions = {
'YELLOW': [
"Can you tell me more about how you're feeling?",
"What's been on your mind lately?",
"How are you coping with things right now?"
],
'RED': [
"I'm concerned about what you've shared. Can you tell me more?",
"It sounds like you're going through a difficult time. What's been most challenging?",
"How are you managing with everything that's happening?"
]
}
# Contextual follow-ups when we have history
if len(history.messages) >= 2 and previous_topics:
contextual_questions = {
'YELLOW': [
f"Earlier you mentioned feeling {previous_topics[0]}. How are you doing with that now?",
f"You talked about {previous_topics[0]} before. Is that still on your mind?",
f"I remember you discussed {previous_topics[0]}. How has that been for you?"
],
'RED': [
f"You mentioned {previous_topics[0]} earlier, and I'm still concerned. Can you help me understand how you're feeling about that?",
f"Thinking about what you said before regarding {previous_topics[0]}, how are you managing right now?",
f"You've talked about {previous_topics[0]}, and I want to make sure you're okay. What's going through your mind?"
]
}
# Use contextual question if available
if classification in contextual_questions:
import random
return random.choice(contextual_questions[classification])
# Fall back to base questions
if classification in base_questions:
import random
return random.choice(base_questions[classification])
return "Can you tell me more about how you're feeling right now?"
def _classify_message_basic(self, message: str) -> tuple:
"""Basic classification without context."""
message_lower = message.lower()
# RED indicators (severe distress)
red_indicators = [
'suicide', 'kill myself', 'end it all', 'no point', 'hopeless',
'can\'t go on', 'want to die', 'better off dead', 'want it all to stop',
'give up', 'end my life', 'can\'t take it', 'rather be dead'
]
# YELLOW indicators (moderate distress)
yellow_indicators = [
'stressed', 'anxious', 'worried', 'depressed', 'sad', 'overwhelmed',
'struggling', 'difficult', 'hard time', 'not okay', 'can\'t handle',
'too much', 'scared', 'afraid', 'lonely', 'isolated'
]
# Check for RED
if any(indicator in message_lower for indicator in red_indicators):
return 'RED', 0.8
# Check for YELLOW
if any(indicator in message_lower for indicator in yellow_indicators):
return 'YELLOW', 0.7
# Default to GREEN
return 'GREEN', 0.6
def _analyze_historical_distress(self, history: ConversationHistory) -> Dict[str, Any]:
"""Analyze historical distress patterns in conversation."""
distress_messages = [
msg for msg in history.messages
if msg.classification in ['YELLOW', 'RED']
]
recent_distress = [
msg for msg in distress_messages
if (datetime.now() - msg.timestamp).total_seconds() < 3600 # Last hour
]
return {
'has_distress': len(distress_messages) > 0,
'distress_count': len(distress_messages),
'recent_distress': len(recent_distress) > 0,
'severity_trend': self._calculate_severity_trend(history.messages),
'indicators_mentioned': len(history.distress_indicators_found)
}
def _is_dismissive_message(self, message: str) -> bool:
"""Check if message is dismissive/minimizing."""
dismissive_patterns = [
r'\b(i\'?m\s+)?fine\b',
r'\b(everything\'?s?\s+)?okay\b',
r'\b(all\s+)?good\b',
r'\b(much\s+)?better\b',
r'\bno\s+problem\b'
]
message_lower = message.lower()
return any(re.search(pattern, message_lower) for pattern in dismissive_patterns)
def _evaluate_medical_context(self, message: str, history: ConversationHistory) -> float:
"""Evaluate relevance of medical context to current message."""
medical_context = history.medical_context
# Check if message mentions medical terms
message_lower = message.lower()
medical_mentions = sum(1 for term in self.medical_context_terms if term in message_lower)
# Check if patient has relevant medical conditions
relevant_conditions = len(medical_context.get('conditions', []))
# Check for emotional struggle in context of medical conditions
emotional_struggle_terms = ['hard', 'difficult', 'trying', 'struggling', 'challenging', 'tough']
emotional_mentions = sum(1 for term in emotional_struggle_terms if term in message_lower)
# Weight based on medical relevance
weight = 0.0
if medical_mentions > 0:
weight += 0.4
if relevant_conditions > 0:
weight += 0.3
# Extra weight if emotional struggle with medical conditions
if emotional_mentions > 0:
weight += 0.3
return min(1.0, weight)
def _extract_indicators(self, message: str) -> List[str]:
"""Extract distress indicators from message."""
message_lower = message.lower()
found_indicators = [
indicator for indicator in self.distress_indicators
if indicator in message_lower
]
return found_indicators
def _extract_conversation_topics(self, history: ConversationHistory) -> List[str]:
"""Extract main topics from conversation history."""
topics = []
# Extract from distress indicators
if history.distress_indicators_found:
topics.extend(history.distress_indicators_found[:2]) # Top 2
# Extract from recent messages (simplified)
for msg in history.messages[-3:]: # Last 3 messages
words = msg.content.lower().split()
# Look for emotional or significant words
significant_words = [
word for word in words
if word in self.distress_indicators or len(word) > 6
]
topics.extend(significant_words[:1]) # One per message
return topics[:3] # Return top 3 topics
def _calculate_severity_trend(self, messages: List[Message]) -> str:
"""Calculate if distress severity is increasing, decreasing, or stable."""
if len(messages) < 2:
return 'insufficient_data'
# Map categories to numeric values
severity_map = {'GREEN': 0, 'YELLOW': 1, 'RED': 2}
recent_messages = messages[-3:] # Last 3 messages
severities = [severity_map.get(msg.classification, 0) for msg in recent_messages]
if len(severities) < 2:
return 'stable'
# Simple trend analysis
if severities[-1] > severities[0]:
return 'increasing'
elif severities[-1] < severities[0]:
return 'decreasing'
else:
return 'stable'
def _build_contextual_reasoning(self, message: str, base_category: str,
final_category: str, historical_distress: Dict[str, Any],
defensive_pattern: bool, medical_context_weight: float,
context_factors: List[str]) -> str:
"""Build reasoning that explains the contextual classification."""
reasoning_parts = []
# Base classification reasoning
reasoning_parts.append(f"Message content suggests {base_category} classification.")
# Historical context
if historical_distress['has_distress']:
reasoning_parts.append(
f"Previous conversation shows {historical_distress['distress_count']} "
f"instances of distress with {historical_distress['indicators_mentioned']} indicators mentioned."
)
# Defensive pattern
if defensive_pattern:
reasoning_parts.append(
"Current dismissive language contradicts previous distress expressions, "
"suggesting possible defensive response pattern."
)
# Medical context
if medical_context_weight > 0.5:
reasoning_parts.append(
"Medical context (conditions/medications) relevant to current emotional state."
)
# Final adjustment
if base_category != final_category:
reasoning_parts.append(
f"Classification adjusted from {base_category} to {final_category} "
f"based on historical context and conversation patterns."
)
return " ".join(reasoning_parts)