Spaces:
Running
Running
File size: 17,350 Bytes
24214fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 |
"""
Context-Aware Classifier for enhanced spiritual monitor with conversation context awareness.
This module implements enhanced classification logic that considers conversation history,
detects defensive patterns, and provides contextually relevant follow-up questions.
"""
import re
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
from .data_models import ConversationHistory, Message, Classification, IndicatorCategory
class ContextAwareClassifier:
"""
Enhanced spiritual monitor with conversation context awareness.
Implements contextual classification that considers:
- Conversation history and previous distress indicators
- Defensive response patterns
- Medical context integration
- Contextual indicator weighting
"""
def __init__(self):
"""Initialize the context-aware classifier."""
self.defensive_patterns = [
r'\b(i\'?m\s+)?fine\b',
r'\b(everything\'?s?\s+)?okay\b',
r'\bno\s+problem\b',
r'\bno\s+problems?\s+here\b',
r'\ball\s+good\b',
r'\bdon\'?t\s+need\s+help\b',
r'\bnothing\'?s?\s+wrong\b'
]
self.distress_indicators = [
'stress', 'anxiety', 'worried', 'depressed', 'sad', 'overwhelmed',
'hopeless', 'lonely', 'afraid', 'angry', 'frustrated', 'lost',
'confused', 'empty', 'numb', 'tired', 'exhausted'
]
self.medical_context_terms = [
'medication', 'treatment', 'therapy', 'counseling', 'diagnosis',
'condition', 'disorder', 'symptoms', 'doctor', 'psychiatrist'
]
def classify_with_context(self, message: str, history: ConversationHistory) -> Classification:
"""
Classify a message considering conversation history and context.
Args:
message: Current patient message to classify
history: Conversation history with previous messages and context
Returns:
Classification with category, confidence, and reasoning
"""
# Base classification without context
base_category, base_confidence = self._classify_message_basic(message)
# Analyze historical context
historical_distress = self._analyze_historical_distress(history)
defensive_pattern = self.detect_defensive_responses(message, history)
medical_context_weight = self._evaluate_medical_context(message, history)
# Adjust classification based on context
final_category = base_category
final_confidence = base_confidence
context_factors = []
# Historical distress with dismissive current message
if historical_distress['has_distress'] and self._is_dismissive_message(message):
if base_category == 'GREEN':
final_category = 'YELLOW'
final_confidence = max(0.7, base_confidence)
context_factors.append('historical_distress_with_dismissive_response')
# Defensive patterns detected
if defensive_pattern:
if final_category == 'GREEN':
final_category = 'YELLOW'
final_confidence = max(0.6, final_confidence)
context_factors.append('defensive_response_pattern')
# Medical context considerations
if medical_context_weight > 0.3: # Lower threshold for medical context
# Check for emotional struggle language with medical context
struggle_terms = ['hard', 'difficult', 'trying', 'struggling', 'challenging']
if final_category == 'GREEN' and any(term in message.lower() for term in struggle_terms):
final_category = 'YELLOW'
final_confidence = max(0.6, final_confidence)
context_factors.append('medical_context_relevant')
# Build reasoning
reasoning = self._build_contextual_reasoning(
message, base_category, final_category, historical_distress,
defensive_pattern, medical_context_weight, context_factors
)
return Classification(
category=final_category,
confidence=final_confidence,
reasoning=reasoning,
indicators_found=self._extract_indicators(message),
context_factors=context_factors
)
def detect_defensive_responses(self, message: str, history: ConversationHistory) -> bool:
"""
Detect defensive response patterns that contradict conversation history.
Args:
message: Current message to analyze
history: Conversation history
Returns:
True if defensive pattern is detected
"""
# Check if message matches defensive patterns
message_lower = message.lower()
has_defensive_language = any(
re.search(pattern, message_lower) for pattern in self.defensive_patterns
)
if not has_defensive_language:
return False
# Check if there's sufficient distress history to contradict
distress_count = len([
msg for msg in history.messages
if msg.classification in ['YELLOW', 'RED']
])
# Also check distress indicators in history
historical_distress_indicators = len(history.distress_indicators_found)
# Defensive if dismissive language with significant distress history
return distress_count >= 2 or historical_distress_indicators >= 3
def evaluate_contextual_indicators(self, indicators: List[str], context: Dict[str, Any]) -> float:
"""
Evaluate indicator weights based on conversation context.
Args:
indicators: List of indicator names
context: Context information including historical mentions
Returns:
Contextual weight for the indicators
"""
if not indicators:
return 0.0
base_weight = 0.5 # Base weight for any indicator
historical_mentions = context.get('historical_mentions', 0)
recent_mention = context.get('recent_mention', False)
conversation_length = context.get('conversation_length', 1)
# Increase weight for repeated indicators
repetition_bonus = min(0.3, historical_mentions * 0.1)
# Bonus for recent mentions
recency_bonus = 0.2 if recent_mention else 0.0
# Normalize by conversation length to avoid inflation, but maintain minimum thresholds
normalization_factor = min(1.0, 3.0 / max(1, conversation_length))
final_weight = (base_weight + repetition_bonus + recency_bonus) * normalization_factor
# Ensure minimum weights for important patterns
if historical_mentions >= 2:
final_weight = max(final_weight, 0.5)
if recent_mention and historical_mentions > 0:
final_weight = max(final_weight, 0.6)
return min(1.0, final_weight)
def generate_contextual_follow_up(self, message: str, history: ConversationHistory,
classification: str) -> str:
"""
Generate follow-up questions that reference conversation context.
Args:
message: Current message
history: Conversation history
classification: Current classification
Returns:
Contextually appropriate follow-up question
"""
# Extract previous topics mentioned
previous_topics = self._extract_conversation_topics(history)
# Base follow-up questions
base_questions = {
'YELLOW': [
"Can you tell me more about how you're feeling?",
"What's been on your mind lately?",
"How are you coping with things right now?"
],
'RED': [
"I'm concerned about what you've shared. Can you tell me more?",
"It sounds like you're going through a difficult time. What's been most challenging?",
"How are you managing with everything that's happening?"
]
}
# Contextual follow-ups when we have history
if len(history.messages) >= 2 and previous_topics:
contextual_questions = {
'YELLOW': [
f"Earlier you mentioned feeling {previous_topics[0]}. How are you doing with that now?",
f"You talked about {previous_topics[0]} before. Is that still on your mind?",
f"I remember you discussed {previous_topics[0]}. How has that been for you?"
],
'RED': [
f"You mentioned {previous_topics[0]} earlier, and I'm still concerned. Can you help me understand how you're feeling about that?",
f"Thinking about what you said before regarding {previous_topics[0]}, how are you managing right now?",
f"You've talked about {previous_topics[0]}, and I want to make sure you're okay. What's going through your mind?"
]
}
# Use contextual question if available
if classification in contextual_questions:
import random
return random.choice(contextual_questions[classification])
# Fall back to base questions
if classification in base_questions:
import random
return random.choice(base_questions[classification])
return "Can you tell me more about how you're feeling right now?"
def _classify_message_basic(self, message: str) -> tuple:
"""Basic classification without context."""
message_lower = message.lower()
# RED indicators (severe distress)
red_indicators = [
'suicide', 'kill myself', 'end it all', 'no point', 'hopeless',
'can\'t go on', 'want to die', 'better off dead', 'want it all to stop',
'give up', 'end my life', 'can\'t take it', 'rather be dead'
]
# YELLOW indicators (moderate distress)
yellow_indicators = [
'stressed', 'anxious', 'worried', 'depressed', 'sad', 'overwhelmed',
'struggling', 'difficult', 'hard time', 'not okay', 'can\'t handle',
'too much', 'scared', 'afraid', 'lonely', 'isolated'
]
# Check for RED
if any(indicator in message_lower for indicator in red_indicators):
return 'RED', 0.8
# Check for YELLOW
if any(indicator in message_lower for indicator in yellow_indicators):
return 'YELLOW', 0.7
# Default to GREEN
return 'GREEN', 0.6
def _analyze_historical_distress(self, history: ConversationHistory) -> Dict[str, Any]:
"""Analyze historical distress patterns in conversation."""
distress_messages = [
msg for msg in history.messages
if msg.classification in ['YELLOW', 'RED']
]
recent_distress = [
msg for msg in distress_messages
if (datetime.now() - msg.timestamp).total_seconds() < 3600 # Last hour
]
return {
'has_distress': len(distress_messages) > 0,
'distress_count': len(distress_messages),
'recent_distress': len(recent_distress) > 0,
'severity_trend': self._calculate_severity_trend(history.messages),
'indicators_mentioned': len(history.distress_indicators_found)
}
def _is_dismissive_message(self, message: str) -> bool:
"""Check if message is dismissive/minimizing."""
dismissive_patterns = [
r'\b(i\'?m\s+)?fine\b',
r'\b(everything\'?s?\s+)?okay\b',
r'\b(all\s+)?good\b',
r'\b(much\s+)?better\b',
r'\bno\s+problem\b'
]
message_lower = message.lower()
return any(re.search(pattern, message_lower) for pattern in dismissive_patterns)
def _evaluate_medical_context(self, message: str, history: ConversationHistory) -> float:
"""Evaluate relevance of medical context to current message."""
medical_context = history.medical_context
# Check if message mentions medical terms
message_lower = message.lower()
medical_mentions = sum(1 for term in self.medical_context_terms if term in message_lower)
# Check if patient has relevant medical conditions
relevant_conditions = len(medical_context.get('conditions', []))
# Check for emotional struggle in context of medical conditions
emotional_struggle_terms = ['hard', 'difficult', 'trying', 'struggling', 'challenging', 'tough']
emotional_mentions = sum(1 for term in emotional_struggle_terms if term in message_lower)
# Weight based on medical relevance
weight = 0.0
if medical_mentions > 0:
weight += 0.4
if relevant_conditions > 0:
weight += 0.3
# Extra weight if emotional struggle with medical conditions
if emotional_mentions > 0:
weight += 0.3
return min(1.0, weight)
def _extract_indicators(self, message: str) -> List[str]:
"""Extract distress indicators from message."""
message_lower = message.lower()
found_indicators = [
indicator for indicator in self.distress_indicators
if indicator in message_lower
]
return found_indicators
def _extract_conversation_topics(self, history: ConversationHistory) -> List[str]:
"""Extract main topics from conversation history."""
topics = []
# Extract from distress indicators
if history.distress_indicators_found:
topics.extend(history.distress_indicators_found[:2]) # Top 2
# Extract from recent messages (simplified)
for msg in history.messages[-3:]: # Last 3 messages
words = msg.content.lower().split()
# Look for emotional or significant words
significant_words = [
word for word in words
if word in self.distress_indicators or len(word) > 6
]
topics.extend(significant_words[:1]) # One per message
return topics[:3] # Return top 3 topics
def _calculate_severity_trend(self, messages: List[Message]) -> str:
"""Calculate if distress severity is increasing, decreasing, or stable."""
if len(messages) < 2:
return 'insufficient_data'
# Map categories to numeric values
severity_map = {'GREEN': 0, 'YELLOW': 1, 'RED': 2}
recent_messages = messages[-3:] # Last 3 messages
severities = [severity_map.get(msg.classification, 0) for msg in recent_messages]
if len(severities) < 2:
return 'stable'
# Simple trend analysis
if severities[-1] > severities[0]:
return 'increasing'
elif severities[-1] < severities[0]:
return 'decreasing'
else:
return 'stable'
def _build_contextual_reasoning(self, message: str, base_category: str,
final_category: str, historical_distress: Dict[str, Any],
defensive_pattern: bool, medical_context_weight: float,
context_factors: List[str]) -> str:
"""Build reasoning that explains the contextual classification."""
reasoning_parts = []
# Base classification reasoning
reasoning_parts.append(f"Message content suggests {base_category} classification.")
# Historical context
if historical_distress['has_distress']:
reasoning_parts.append(
f"Previous conversation shows {historical_distress['distress_count']} "
f"instances of distress with {historical_distress['indicators_mentioned']} indicators mentioned."
)
# Defensive pattern
if defensive_pattern:
reasoning_parts.append(
"Current dismissive language contradicts previous distress expressions, "
"suggesting possible defensive response pattern."
)
# Medical context
if medical_context_weight > 0.5:
reasoning_parts.append(
"Medical context (conditions/medications) relevant to current emotional state."
)
# Final adjustment
if base_category != final_category:
reasoning_parts.append(
f"Classification adjusted from {base_category} to {final_category} "
f"based on historical context and conversation patterns."
)
return " ".join(reasoning_parts) |