TRuCAL / components /virtue_learning.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Virtue learning module for TRuCAL with adaptive learning and safety mechanisms.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau
from collections import deque, defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Any
import numpy as np
import json
from datetime import datetime
@dataclass
class LearningMetrics:
"""Tracks learning metrics over time."""
losses: List[float] = None
safety_violations: List[float] = None
adaptation_magnitudes: List[float] = None
def __post_init__(self):
self.losses = []
self.safety_violations = []
self.adaptation_magnitudes = []
def update(self, loss: float, safety_violation: float, adaptation_magnitude: float):
"""Update metrics with new values."""
self.losses.append(loss)
self.safety_violations.append(safety_violation)
self.adaptation_magnitudes.append(adaptation_magnitude)
def get_recent_metrics(self, window: int = 10) -> Dict[str, float]:
"""Get recent metrics over a window of updates."""
recent = slice(-window, None) if window > 0 else slice(0, None)
return {
'loss': np.mean(self.losses[recent]) if self.losses else 0.0,
'safety_violation_rate': np.mean(self.safety_violations[recent]) if self.safety_violations else 0.0,
'avg_adaptation': np.mean(self.adaptation_magnitudes[recent]) if self.adaptation_magnitudes else 0.0
}
class SafetyPreservationNetwork(nn.Module):
"""Ensures adaptations never compromise safety boundaries."""
def __init__(self, hidden_size: int):
super().__init__()
self.safety_assessor = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 3), # [safety_score, boundary_distance, risk_estimate]
nn.Sigmoid()
)
def forward(self, adapted_weights: torch.Tensor,
base_weights: torch.Tensor,
context: torch.Tensor) -> torch.Tensor:
"""Apply safety-preserving transformation to adapted weights."""
# Flatten weight tensors for processing
adapted_flat = adapted_weights.view(-1)
base_flat = base_weights.view(-1)
# Concatenate with context if provided
if context is not None:
context_flat = context.view(-1)
combined = torch.cat([adapted_flat, base_flat, context_flat])
else:
combined = torch.cat([adapted_flat, base_flat])
# Get safety metrics
safety_metrics = self.safety_assessor(combined[:2*hidden_size]) # Ensure correct size
safety_score, boundary_distance, risk_estimate = safety_metrics
# Adaptive safety enforcement
if safety_score < 0.7 or risk_estimate > 0.6:
# Calculate blend ratio based on safety metrics
blend_ratio = min(1.0, (0.7 - safety_score) * 2)
# Blend back toward safe base weights
safe_weights = blend_ratio * base_weights + (1 - blend_ratio) * adapted_weights
return safe_weights
return adapted_weights
class CurriculumScheduler:
"""Manages learning curriculum from easy to hard cases."""
def __init__(self,
easy_threshold: float = 0.3,
hard_threshold: float = 0.7,
min_confidence: float = 0.7):
self.easy_threshold = easy_threshold
self.hard_threshold = hard_threshold
self.min_confidence = min_confidence
self.update_counter = 0
self.difficulty_level = 0.0 # 0.0 (easy) to 1.0 (hard)
def should_update(self, batch_size: int) -> bool:
"""Determine if model should update based on curriculum stage."""
self.update_counter += 1
# Gradually increase update frequency with curriculum progression
min_batch_size = max(1, int(5 * (1 - self.difficulty_level)))
return batch_size >= min_batch_size
def split_by_difficulty(self, feedback_batch: List[Dict]) -> Tuple[List, List]:
"""Split feedback into easy and hard cases based on complexity metrics."""
easy_cases = []
hard_cases = []
for feedback in feedback_batch:
complexity = self._assess_feedback_complexity(feedback)
# Adjust threshold based on current difficulty level
threshold = (self.easy_threshold +
(self.hard_threshold - self.easy_threshold) * self.difficulty_level)
if complexity < threshold:
easy_cases.append(feedback)
else:
hard_cases.append(feedback)
return easy_cases, hard_cases
def update_difficulty(self, success_rate: float) -> None:
"""Adjust difficulty based on recent success rate."""
if success_rate > 0.8: # Doing well, increase difficulty
self.difficulty_level = min(1.0, self.difficulty_level + 0.05)
elif success_rate < 0.5: # Struggling, decrease difficulty
self.difficulty_level = max(0.0, self.difficulty_level - 0.1)
def _assess_feedback_complexity(self, feedback: Dict) -> float:
"""Assess complexity of feedback case for curriculum learning."""
complexity_score = 0.0
# Multi-signal feedback is more complex
signals = feedback.get('signals', [])
if len(signals) > 1:
complexity_score += 0.3
# High emotional arousal
if feedback.get('arousal_metrics', {}).get('composite_arousal', 0) > 0.7:
complexity_score += 0.4
# Contradictory signals
if self._has_contradictory_signals(feedback):
complexity_score += 0.3
return min(complexity_score, 1.0)
def _has_contradictory_signals(self, feedback: Dict) -> bool:
"""Check for contradictory signals in feedback."""
# Example: Positive rating but high negative emotion
if (feedback.get('rating', 0) > 3 and
feedback.get('sentiment', {}).get('compound', 0) < -0.5):
return True
# Example: Negative rating but low arousal
if (feedback.get('rating', 0) < 3 and
feedback.get('arousal_metrics', {}).get('composite_arousal', 0) < 0.3):
return True
return False
class AdaptiveVirtueGateLearner(nn.Module):
"""Enhanced virtue gate with multi-expert adaptation and safety mechanisms."""
def __init__(self,
base_gate: nn.Module,
hidden_size: int = 256,
num_experts: int = 4,
device: str = None):
super().__init__()
self.base_gate = base_gate
self.hidden_size = hidden_size
self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu')
# Freeze base gate parameters
for param in self.base_gate.parameters():
param.requires_grad = False
# Multi-expert tension adaptors
self.expert_adaptors = nn.ModuleList([
self._create_expert_adaptor() for _ in range(num_experts)
])
# Expert router based on context
self.expert_router = nn.Sequential(
nn.Linear(hidden_size + 4, hidden_size // 2), # +4 for tension metrics
nn.ReLU(),
nn.Linear(hidden_size // 2, num_experts),
nn.Softmax(dim=-1)
)
# Safety preservation network
self.safety_preserver = SafetyPreservationNetwork(hidden_size)
# Curriculum learning scheduler
self.curriculum_scheduler = CurriculumScheduler()
# Optimization
self.optimizer = AdamW(self.parameters(), lr=1e-4, weight_decay=1e-5)
self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=5)
# Track learning metrics
self.metrics = LearningMetrics()
self.feedback_memory = deque(maxlen=1000)
# Move to device
self.to(self.device)
def _create_expert_adaptor(self) -> nn.Module:
"""Create an expert adaptation module."""
return nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size),
nn.Tanh() # Constrain output to [-1, 1]
)
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor = None,
virtue_meta: Dict = None,
feedback_context: Dict = None):
"""Forward pass with adaptation and safety mechanisms."""
# Base attention
base_out, base_weights = self.base_gate(query, key, value, mask, virtue_meta)
if not self.training or feedback_context is None:
return base_out, base_weights
# Enhanced context fusion
context_embedding = self._fuse_contexts(virtue_meta, feedback_context, base_weights)
# Expert routing
expert_weights = self.expert_router(context_embedding)
# Multi-expert adaptation
adapted_weights = self._apply_multi_expert_adaptation(
base_weights, expert_weights, context_embedding, virtue_meta
)
# Safety preservation check
safe_weights = self.safety_preserver(adapted_weights, base_weights, context_embedding)
return safe_weights @ value, safe_weights
def update_from_feedback_batch(self, feedback_batch: List[Dict]) -> Dict[str, float]:
"""Update model from a batch of feedback with curriculum learning."""
if not feedback_batch:
return {'loss': 0.0, 'update_skipped': 1.0}
# Apply curriculum scheduling
if not self.curriculum_scheduler.should_update(len(feedback_batch)):
return {'loss': 0.0, 'update_skipped': 1.0}
# Split feedback by type and difficulty
explicit_feedback = [fb for fb in feedback_batch if fb.get('type') == 'explicit_rating']
clinical_feedback = [fb for fb in feedback_batch if fb.get('type') == 'clinical_override']
# Split by difficulty using curriculum scheduler
easy_batch, hard_batch = self.curriculum_scheduler.split_by_difficulty(feedback_batch)
# Multi-task learning
losses = {}
# Process explicit feedback (ratings)
if explicit_feedback:
losses['explicit_loss'] = self._update_from_explicit_feedback(explicit_feedback)
# Process clinical feedback (higher weight)
if clinical_feedback:
losses['clinical_loss'] = self._update_from_clinical_feedback(clinical_feedback) * 2.0
# Process easy and hard batches
if easy_batch:
losses['easy_loss'] = self._update_from_feedback(easy_batch, difficulty='easy')
if hard_batch:
losses['hard_loss'] = self._update_from_feedback(hard_batch, difficulty='hard')
# Combine losses
total_loss = sum(losses.values())
if total_loss > 0:
# Backpropagate and update
self.optimizer.zero_grad()
total_loss.backward()
# Gradient safety clipping
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=1.0)
# Update parameters
self.optimizer.step()
# Update learning rate
self.scheduler.step(total_loss.item())
# Log adaptation metrics
self._log_adaptation_metrics(losses, feedback_batch)
# Update curriculum difficulty
success_rate = self._calculate_success_rate(feedback_batch)
self.curriculum_scheduler.update_difficulty(success_rate)
return {k: v.item() if hasattr(v, 'item') else v for k, v in losses.items()}
def _update_from_explicit_feedback(self, feedback_list: List[Dict]) -> torch.Tensor:
"""Update model from explicit user ratings with emotional intelligence."""
losses = []
for feedback in feedback_list:
# Extract emotional context
emotional_context = self._extract_emotional_context(feedback)
# Convert rating to target tension (0-1)
target_tension = self._rating_to_target_tension(
feedback.get('rating', 3), # Default to neutral
emotional_context
)
# Predict optimal tension
pred_tension = self._predict_optimal_tension(feedback)
# Smooth L1 loss is less sensitive to outliers than MSE
loss = F.smooth_l1_loss(pred_tension, torch.tensor([target_tension],
device=self.device))
losses.append(loss)
return torch.stack(losses).mean() if losses else torch.tensor(0.0, device=self.device)
def _update_from_clinical_feedback(self, feedback_list: List[Dict]) -> torch.Tensor:
"""Update model from clinical feedback with higher confidence."""
# Similar to explicit feedback but with higher confidence
# and potentially different target calculation
return self._update_from_explicit_feedback(feedback_list) * 1.5 # Higher weight
def _update_from_feedback(self, feedback_batch: List[Dict], difficulty: str = 'easy') -> torch.Tensor:
"""General feedback processing with difficulty-based weighting."""
losses = []
weight = 1.0 if difficulty == 'easy' else 1.5 # Hard examples weighted more
for feedback in feedback_batch:
# Custom loss calculation based on feedback type
if 'rating' in feedback:
# Explicit rating feedback
loss = self._calculate_rating_loss(feedback)
elif 'correction' in feedback:
# Safety correction feedback
loss = self._calculate_correction_loss(feedback)
else:
# Default to a generic loss
loss = self._calculate_generic_loss(feedback)
losses.append(loss * weight)
return torch.stack(losses).mean() if losses else torch.tensor(0.0, device=self.device)
def _calculate_rating_loss(self, feedback: Dict) -> torch.Tensor:
"""Calculate loss from rating feedback."""
# Similar to _update_from_explicit_feedback but returns a single loss
emotional_context = self._extract_emotional_context(feedback)
target_tension = self._rating_to_target_tension(
feedback.get('rating', 3), # Default to neutral
emotional_context
)
pred_tension = self._predict_optimal_tension(feedback)
return F.smooth_l1_loss(pred_tension,
torch.tensor([target_tension], device=self.device))
def _calculate_correction_loss(self, feedback: Dict) -> torch.Tensor:
"""Calculate loss from safety correction feedback."""
# This would implement a more specialized loss for safety corrections
# For now, we'll use a simple MSE on the corrected output
correction = feedback.get('correction', {})
target = correction.get('target', 0.5) # Default to neutral
return F.mse_loss(
self._predict_optimal_tension(feedback),
torch.tensor([target], device=self.device)
)
def _calculate_generic_loss(self, feedback: Dict) -> torch.Tensor:
"""Calculate a generic loss for other feedback types."""
# Default implementation - can be overridden in subclasses
return torch.tensor(0.0, device=self.device)
def _extract_emotional_context(self, feedback: Dict) -> Dict[str, float]:
"""Extract emotional context from feedback."""
return {
'arousal': feedback.get('arousal_metrics', {}).get('composite_arousal', 0.5),
'valence': feedback.get('sentiment', {}).get('compound', 0.0),
'intensity': max(
abs(feedback.get('arousal_metrics', {}).get('composite_arousal', 0.5) - 0.5) * 2,
abs(feedback.get('sentiment', {}).get('compound', 0.0))
)
}
def _rating_to_target_tension(self, rating: int, emotional_context: Dict) -> float:
"""Convert user rating to target tension value."""
# Scale rating from 1-5 to 0-1
scaled_rating = (rating - 1) / 4.0
# Adjust based on emotional context
intensity = emotional_context.get('intensity', 0.5)
valence = emotional_context.get('valence', 0.0)
# If high intensity negative emotion, aim for lower tension
if intensity > 0.7 and valence < -0.3:
return max(0.1, scaled_rating - 0.2)
return scaled_rating
def _predict_optimal_tension(self, feedback: Dict) -> torch.Tensor:
"""Predict optimal tension level for given feedback."""
# This is a simplified version - in practice, you'd use the model's current state
# to predict the optimal tension level for this context
return torch.tensor([0.5], device=self.device) # Default to neutral
def _fuse_contexts(self,
virtue_meta: Dict,
feedback_context: Dict,
base_weights: torch.Tensor) -> torch.Tensor:
"""Fuse multiple context sources into a single embedding."""
# Extract features from virtue metadata
tension = torch.tensor([virtue_meta.get('tension', 0.5)], device=self.device)
head_weights = virtue_meta.get('head_weights', torch.ones(1, device=self.device))
# Extract features from feedback context
arousal = torch.tensor([feedback_context.get('arousal_metrics', {}).get('composite_arousal', 0.5)],
device=self.device)
sentiment = torch.tensor([feedback_context.get('sentiment', {}).get('compound', 0.0)],
device=self.device)
# Calculate attention statistics
attn_mean = base_weights.mean()
attn_std = base_weights.std()
# Combine all features
context_features = torch.cat([
tension.unsqueeze(0),
head_weights.mean().unsqueeze(0), # Use mean head weight as feature
arousal.unsqueeze(0),
sentiment.unsqueeze(0),
attn_mean.unsqueeze(0),
attn_std.unsqueeze(0)
])
# Project to hidden size
return F.relu(nn.Linear(6, self.hidden_size).to(self.device)(context_features))
def _apply_multi_expert_adaptation(self,
base_weights: torch.Tensor,
expert_weights: torch.Tensor,
context_embedding: torch.Tensor,
virtue_meta: Dict) -> torch.Tensor:
"""Apply multi-expert adaptation to base weights."""
# Get expert outputs
expert_outputs = [expert(context_embedding) for expert in self.expert_adaptors]
expert_outputs = torch.stack(expert_outputs) # [num_experts, hidden_size]
# Weight expert outputs by router probabilities
adapted_weights = (expert_weights.unsqueeze(-1) * expert_outputs).sum(dim=0)
# Apply residual connection
return base_weights + adapted_weights * 0.1 # Small step size for stability
def _log_adaptation_metrics(self, losses: Dict[str, torch.Tensor],
feedback_batch: List[Dict]) -> None:
"""Log metrics for monitoring and analysis."""
# Calculate metrics
total_loss = sum(loss.item() if hasattr(loss, 'item') else loss
for loss in losses.values())
safety_violation = 1.0 if any(fb.get('safety_violation', False)
for fb in feedback_batch) else 0.0
adaptation_magnitude = sum(p.norm().item()
for p in self.parameters()
if p.requires_grad and p.grad is not None)
# Update metrics
self.metrics.update(total_loss, safety_violation, adaptation_magnitude)
# Log to console (in practice, you'd use a proper logging system)
if self.metrics.update_count % 10 == 0:
print(f"Step {self.metrics.update_count}:")
print(f" Loss: {total_loss:.4f}")
print(f" Safety violation: {safety_violation}")
print(f" Adaptation magnitude: {adaptation_magnitude:.6f}")
def _calculate_success_rate(self, feedback_batch: List[Dict]) -> float:
"""Calculate success rate from feedback batch."""
if not feedback_batch:
return 0.5 # Neutral success rate for empty batch
successes = 0
for fb in feedback_batch:
if 'rating' in fb and fb['rating'] >= 4: # Rating 4 or 5 is success
successes += 1
elif 'success' in fb and fb['success']:
successes += 1
return successes / len(feedback_batch)
def get_adaptation_summary(self) -> Dict[str, Any]:
"""Get a summary of the model's adaptation state."""
return {
'difficulty_level': self.curriculum_scheduler.difficulty_level,
'learning_rate': self.optimizer.param_groups[0]['lr'],
'recent_metrics': self.metrics.get_recent_metrics(window=10),
'update_count': self.metrics.update_count,
'timestamp': datetime.now().isoformat()
}