TRuCAL / components /feedback_ingestion.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Feedback ingestion system for TRuCAL with trauma-aware processing.
"""
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Any
import re
import torch
from datetime import datetime, timedelta
@dataclass
class FeedbackConsent:
"""Granular consent management for different feedback types."""
explicit: bool = False
implicit: bool = False
somatic: bool = False
clinical: bool = False
retention_days: int = 30 # Auto-delete after period
class FeedbackType(Enum):
"""Types of feedback supported by the system."""
EXPLICIT = "explicit_rating"
IMPLICIT = "implicit_engagement"
SOMATIC = "somatic_biofeedback"
CLINICAL = "clinical_override"
CORRECTION = "safety_correction"
class TemporalPatternAnalyzer:
"""Analyzes temporal patterns for trauma-aware learning."""
def __init__(self):
self.session_patterns = {}
def get_current_pattern(self) -> Dict[str, Any]:
"""Get current temporal context."""
now = datetime.now()
return {
'hour_of_day': now.hour,
'is_weekend': now.weekday() >= 5,
'seasonal_factor': self._get_seasonal_factor(now),
'light_conditions': self._get_light_conditions(now)
}
def has_recent_trauma_discussion(self, session_id: str, window_hours: int = 24) -> bool:
"""Check if session recently discussed trauma-related content."""
session_data = self.session_patterns.get(session_id, {})
last_trauma_time = session_data.get('last_trauma_discussion')
if not last_trauma_time:
return False
return (datetime.now() - last_trauma_time).total_seconds() < (window_hours * 3600)
def _get_seasonal_factor(self, dt: datetime) -> float:
"""Calculate seasonal factor (0-1) based on date."""
# Simple sinusoidal approximation of seasonal variation
day_of_year = dt.timetuple().tm_yday
return 0.5 * (1 + np.sin(2 * np.pi * (day_of_year - 80) / 365))
def _get_light_conditions(self, dt: datetime) -> str:
"""Get light conditions based on time of day."""
hour = dt.hour
if 6 <= hour < 18:
return 'daylight'
elif 18 <= hour < 21 or 5 <= hour < 6:
return 'twilight'
return 'dark'
class TraumaAwareFeedbackIngestor:
"""Enhanced feedback processing with trauma awareness and consent management."""
def __init__(self, buffer_size: int = 1000, min_consent_age: int = 18):
"""Initialize the feedback ingestor.
Args:
buffer_size: Maximum number of feedback items to keep in memory
min_consent_age: Minimum age for providing consent
"""
self.feedback_buffer = []
self.buffer_size = buffer_size
self.min_consent_age = min_consent_age
self.consent_records = {} # session_id -> FeedbackConsent
self.temporal_patterns = TemporalPatternAnalyzer()
self.trauma_keywords = {
'abuse', 'assault', 'trauma', 'ptsd', 'trigger', 'flashback',
'violence', 'attack', 'victim', 'survivor', 'harassment'
}
def request_consent(self, session_id: str, consent_config: FeedbackConsent) -> bool:
"""Request and store granular consent from user.
Args:
session_id: Unique identifier for the user session
consent_config: FeedbackConsent object with consent settings
Returns:
bool: True if consent was successfully recorded
"""
if not self._verify_consent_capacity(session_id):
return False
self.consent_records[session_id] = consent_config
return True
def ingest_with_context(self, feedback: Dict[str, Any], session_id: str,
context_embedding: torch.Tensor) -> bool:
"""Ingest feedback with contextual information.
Args:
feedback: Dictionary containing feedback data
session_id: Session identifier for consent verification
context_embedding: Contextual embedding of the interaction
Returns:
bool: True if feedback was successfully ingested
"""
consent = self.consent_records.get(session_id)
if not consent or not self._check_granular_consent(feedback.get('type'), consent):
return False
# Enhanced context capture
feedback['context_embedding'] = context_embedding.cpu().numpy()
feedback['temporal_context'] = self.temporal_patterns.get_current_pattern()
# Trauma-aware processing
feedback = self._trauma_aware_sanitization(feedback)
# Risk assessment
risk_score = self._assess_feedback_risk(feedback, session_id)
if risk_score > 0.8:
self._trigger_safety_review(feedback, session_id)
# Add to buffer and maintain size
self.feedback_buffer.append(feedback)
self._maintain_buffer_integrity()
return True
def _verify_consent_capacity(self, session_id: str) -> bool:
"""Verify if user has capacity to provide consent."""
# In a real implementation, this would check age, mental state, etc.
# This is a simplified version
return True
def _check_granular_consent(self, feedback_type: str, consent: FeedbackConsent) -> bool:
"""Check if specific feedback type is covered by consent."""
if not feedback_type:
return False
feedback_enum = FeedbackType(feedback_type)
if feedback_enum == FeedbackType.EXPLICIT:
return consent.explicit
elif feedback_enum == FeedbackType.IMPLICIT:
return consent.implicit
elif feedback_enum == FeedbackType.SOMATIC:
return consent.somatic
elif feedback_enum == FeedbackType.CLINICAL:
return consent.clinical
elif feedback_enum == FeedbackType.CORRECTION:
return True # Always allow safety corrections
return False
def _trauma_aware_sanitization(self, feedback: Dict[str, Any]) -> Dict[str, Any]:
"""Sanitize feedback while preserving therapeutic value."""
if 'content' in feedback:
content = feedback['content']
feedback['original_content'] = content # Keep original for clinical review
# Redact specific trauma details
if self._contains_trauma_content(content):
content = self._redact_specific_trauma_details(content)
content = self._generalize_emotional_content(content)
feedback['content'] = content
feedback['contains_trauma_content'] = True
if 'bio_signals' in feedback:
feedback['arousal_metrics'] = self._extract_arousal_metrics(feedback['bio_signals'])
del feedback['bio_signals'] # Remove raw signals
return feedback
def _contains_trauma_content(self, text: str) -> bool:
"""Check if text contains trauma-related keywords."""
if not isinstance(text, str):
return False
text_lower = text.lower()
return any(keyword in text_lower for keyword in self.trauma_keywords)
def _redact_specific_trauma_details(self, text: str) -> str:
"""Redact specific trauma details while preserving emotional content."""
# This is a simplified version - in practice, you'd want more sophisticated NLP
patterns = [
(r'\b(?:i was|i \w+ed|i felt) (?:\w+ ){0,3}(?:abused|assaulted|attacked|raped|beaten|harmed|threatened|harassed)\b',
'[trauma experience redacted]'),
(r'\b(?:he|she|they) (?:\w+ ){0,3}(?:abused|assaulted|attacked|raped|beat|harmed|threatened|harassed) (?:me|us|them)\b',
'[perpetrator action redacted]')
]
for pattern, replacement in patterns:
text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
return text
def _generalize_emotional_content(self, text: str) -> str:
"""Generalize emotional content to reduce triggers."""
# This is a simplified version - in practice, you'd want more sophisticated NLP
emotional_phrases = {
r'i felt (?:very |extremely |really )?(?:scared|terrified|frightened|fearful)': 'I felt very afraid',
r'i was (?:very |extremely |really )?(?:angry|furious|enraged)': 'I felt very angry',
r'i was (?:very |extremely |really )?(?:sad|depressed|miserable|hopeless)': 'I felt very sad'
}
for pattern, replacement in emotional_phrases.items():
text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
return text
def _extract_arousal_metrics(self, bio_signals: Dict[str, float]) -> Dict[str, float]:
"""Convert raw bio signals to normalized arousal metrics."""
metrics = {}
# Heart Rate Variability (HRV) - lower is more stressed
if 'hrv' in bio_signals:
# Normalize HRV (typical range 20-200ms)
hrv = max(20, min(200, bio_signals['hrv']))
metrics['hrv_norm'] = 1.0 - ((hrv - 20) / 180.0)
# Galvanic Skin Response (GSR) - higher is more aroused
if 'gsr' in bio_signals:
# Normalize GSR (assuming 0-1 range)
metrics['gsr_norm'] = max(0.0, min(1.0, bio_signals['gsr']))
# Calculate composite arousal score (0-1)
if 'hrv_norm' in metrics and 'gsr_norm' in metrics:
metrics['composite_arousal'] = (metrics['hrv_norm'] * 0.6 +
metrics['gsr_norm'] * 0.4)
elif 'hrv_norm' in metrics:
metrics['composite_arousal'] = metrics['hrv_norm']
elif 'gsr_norm' in metrics:
metrics['composite_arousal'] = metrics['gsr_norm']
return metrics
def _assess_feedback_risk(self, feedback: Dict[str, Any], session_id: str) -> float:
"""Assess potential re-traumatization risk in feedback."""
risk_factors = []
# Recent trauma discussion pattern
if self.temporal_patterns.has_recent_trauma_discussion(session_id):
risk_factors.append(0.7)
# High arousal state
arousal = feedback.get('arousal_metrics', {}).get('composite_arousal', 0)
if arousal > 0.7:
risk_factors.append(min(arousal, 0.9)) # Cap at 0.9
# Negative feedback on supportive responses
if (feedback.get('type') == FeedbackType.EXPLICIT.value and
feedback.get('rating', 5) <= 2 and
self._was_response_supportive(feedback.get('response_id'))):
risk_factors.append(0.9)
return max(risk_factors) if risk_factors else 0.0
def _was_response_supportive(self, response_id: str) -> bool:
"""Check if a response was intended to be supportive."""
# In a real implementation, this would check the response's classification
# For now, we'll assume all responses with ID starting with 'S' are supportive
return response_id and response_id.startswith('S')
def _trigger_safety_review(self, feedback: Dict[str, Any], session_id: str) -> None:
"""Trigger a safety review for concerning feedback."""
# In a real implementation, this would notify a human reviewer
print(f"[SAFETY REVIEW] Session {session_id} triggered safety review for feedback: {feedback}")
def _maintain_buffer_integrity(self) -> None:
"""Ensure feedback buffer doesn't exceed maximum size."""
while len(self.feedback_buffer) > self.buffer_size:
self.feedback_buffer.pop(0) # Remove oldest feedback
def get_feedback_batch(self, batch_size: int = 32) -> List[Dict[str, Any]]:
"""Get a batch of feedback for model training."""
return self.feedback_buffer[-batch_size:] if self.feedback_buffer else []
def clear_expired_feedback(self) -> int:
"""Remove feedback that has exceeded retention period.
Returns:
int: Number of feedback items removed
"""
if not self.consent_records:
return 0
now = datetime.now()
initial_count = len(self.feedback_buffer)
# Filter out expired feedback
self.feedback_buffer = [
fb for fb in self.feedback_buffer
if not self._is_feedback_expired(fb, now)
]
return initial_count - len(self.feedback_buffer)
def _is_feedback_expired(self, feedback: Dict[str, Any], current_time: datetime) -> bool:
"""Check if feedback has exceeded its retention period."""
if 'timestamp' not in feedback:
return False
try:
feedback_time = datetime.fromisoformat(feedback['timestamp'])
session_id = feedback.get('session_id')
if not session_id or session_id not in self.consent_records:
return True # No consent record, remove
retention_days = self.consent_records[session_id].retention_days
return (current_time - feedback_time).days > retention_days
except (ValueError, TypeError):
return True # Invalid timestamp format, remove