| """ |
| Virtue learning module for TRuCAL with adaptive learning and safety mechanisms. |
| """ |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.optim import AdamW |
| from torch.optim.lr_scheduler import ReduceLROnPlateau |
| from collections import deque, defaultdict |
| from dataclasses import dataclass |
| from typing import Dict, List, Optional, Tuple, Any |
| import numpy as np |
| import json |
| from datetime import datetime |
|
|
| @dataclass |
| class LearningMetrics: |
| """Tracks learning metrics over time.""" |
| losses: List[float] = None |
| safety_violations: List[float] = None |
| adaptation_magnitudes: List[float] = None |
| |
| def __post_init__(self): |
| self.losses = [] |
| self.safety_violations = [] |
| self.adaptation_magnitudes = [] |
| |
| def update(self, loss: float, safety_violation: float, adaptation_magnitude: float): |
| """Update metrics with new values.""" |
| self.losses.append(loss) |
| self.safety_violations.append(safety_violation) |
| self.adaptation_magnitudes.append(adaptation_magnitude) |
| |
| def get_recent_metrics(self, window: int = 10) -> Dict[str, float]: |
| """Get recent metrics over a window of updates.""" |
| recent = slice(-window, None) if window > 0 else slice(0, None) |
| return { |
| 'loss': np.mean(self.losses[recent]) if self.losses else 0.0, |
| 'safety_violation_rate': np.mean(self.safety_violations[recent]) if self.safety_violations else 0.0, |
| 'avg_adaptation': np.mean(self.adaptation_magnitudes[recent]) if self.adaptation_magnitudes else 0.0 |
| } |
|
|
| class SafetyPreservationNetwork(nn.Module): |
| """Ensures adaptations never compromise safety boundaries.""" |
| |
| def __init__(self, hidden_size: int): |
| super().__init__() |
| self.safety_assessor = nn.Sequential( |
| nn.Linear(hidden_size * 2, hidden_size), |
| nn.ReLU(), |
| nn.Linear(hidden_size, 3), |
| nn.Sigmoid() |
| ) |
| |
| def forward(self, adapted_weights: torch.Tensor, |
| base_weights: torch.Tensor, |
| context: torch.Tensor) -> torch.Tensor: |
| """Apply safety-preserving transformation to adapted weights.""" |
| |
| adapted_flat = adapted_weights.view(-1) |
| base_flat = base_weights.view(-1) |
| |
| |
| if context is not None: |
| context_flat = context.view(-1) |
| combined = torch.cat([adapted_flat, base_flat, context_flat]) |
| else: |
| combined = torch.cat([adapted_flat, base_flat]) |
| |
| |
| safety_metrics = self.safety_assessor(combined[:2*hidden_size]) |
| safety_score, boundary_distance, risk_estimate = safety_metrics |
| |
| |
| if safety_score < 0.7 or risk_estimate > 0.6: |
| |
| blend_ratio = min(1.0, (0.7 - safety_score) * 2) |
| |
| |
| safe_weights = blend_ratio * base_weights + (1 - blend_ratio) * adapted_weights |
| return safe_weights |
| |
| return adapted_weights |
|
|
| class CurriculumScheduler: |
| """Manages learning curriculum from easy to hard cases.""" |
| |
| def __init__(self, |
| easy_threshold: float = 0.3, |
| hard_threshold: float = 0.7, |
| min_confidence: float = 0.7): |
| self.easy_threshold = easy_threshold |
| self.hard_threshold = hard_threshold |
| self.min_confidence = min_confidence |
| self.update_counter = 0 |
| self.difficulty_level = 0.0 |
| |
| def should_update(self, batch_size: int) -> bool: |
| """Determine if model should update based on curriculum stage.""" |
| self.update_counter += 1 |
| |
| |
| min_batch_size = max(1, int(5 * (1 - self.difficulty_level))) |
| return batch_size >= min_batch_size |
| |
| def split_by_difficulty(self, feedback_batch: List[Dict]) -> Tuple[List, List]: |
| """Split feedback into easy and hard cases based on complexity metrics.""" |
| easy_cases = [] |
| hard_cases = [] |
| |
| for feedback in feedback_batch: |
| complexity = self._assess_feedback_complexity(feedback) |
| |
| |
| threshold = (self.easy_threshold + |
| (self.hard_threshold - self.easy_threshold) * self.difficulty_level) |
| |
| if complexity < threshold: |
| easy_cases.append(feedback) |
| else: |
| hard_cases.append(feedback) |
| |
| return easy_cases, hard_cases |
| |
| def update_difficulty(self, success_rate: float) -> None: |
| """Adjust difficulty based on recent success rate.""" |
| if success_rate > 0.8: |
| self.difficulty_level = min(1.0, self.difficulty_level + 0.05) |
| elif success_rate < 0.5: |
| self.difficulty_level = max(0.0, self.difficulty_level - 0.1) |
| |
| def _assess_feedback_complexity(self, feedback: Dict) -> float: |
| """Assess complexity of feedback case for curriculum learning.""" |
| complexity_score = 0.0 |
| |
| |
| signals = feedback.get('signals', []) |
| if len(signals) > 1: |
| complexity_score += 0.3 |
| |
| |
| if feedback.get('arousal_metrics', {}).get('composite_arousal', 0) > 0.7: |
| complexity_score += 0.4 |
| |
| |
| if self._has_contradictory_signals(feedback): |
| complexity_score += 0.3 |
| |
| return min(complexity_score, 1.0) |
| |
| def _has_contradictory_signals(self, feedback: Dict) -> bool: |
| """Check for contradictory signals in feedback.""" |
| |
| if (feedback.get('rating', 0) > 3 and |
| feedback.get('sentiment', {}).get('compound', 0) < -0.5): |
| return True |
| |
| |
| if (feedback.get('rating', 0) < 3 and |
| feedback.get('arousal_metrics', {}).get('composite_arousal', 0) < 0.3): |
| return True |
| |
| return False |
|
|
| class AdaptiveVirtueGateLearner(nn.Module): |
| """Enhanced virtue gate with multi-expert adaptation and safety mechanisms.""" |
| |
| def __init__(self, |
| base_gate: nn.Module, |
| hidden_size: int = 256, |
| num_experts: int = 4, |
| device: str = None): |
| super().__init__() |
| self.base_gate = base_gate |
| self.hidden_size = hidden_size |
| self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu') |
| |
| |
| for param in self.base_gate.parameters(): |
| param.requires_grad = False |
| |
| |
| self.expert_adaptors = nn.ModuleList([ |
| self._create_expert_adaptor() for _ in range(num_experts) |
| ]) |
| |
| |
| self.expert_router = nn.Sequential( |
| nn.Linear(hidden_size + 4, hidden_size // 2), |
| nn.ReLU(), |
| nn.Linear(hidden_size // 2, num_experts), |
| nn.Softmax(dim=-1) |
| ) |
| |
| |
| self.safety_preserver = SafetyPreservationNetwork(hidden_size) |
| |
| |
| self.curriculum_scheduler = CurriculumScheduler() |
| |
| |
| self.optimizer = AdamW(self.parameters(), lr=1e-4, weight_decay=1e-5) |
| self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=5) |
| |
| |
| self.metrics = LearningMetrics() |
| self.feedback_memory = deque(maxlen=1000) |
| |
| |
| self.to(self.device) |
| |
| def _create_expert_adaptor(self) -> nn.Module: |
| """Create an expert adaptation module.""" |
| return nn.Sequential( |
| nn.Linear(self.hidden_size, self.hidden_size), |
| nn.ReLU(), |
| nn.Linear(self.hidden_size, self.hidden_size), |
| nn.Tanh() |
| ) |
| |
| def forward(self, |
| query: torch.Tensor, |
| key: torch.Tensor, |
| value: torch.Tensor, |
| mask: torch.Tensor = None, |
| virtue_meta: Dict = None, |
| feedback_context: Dict = None): |
| """Forward pass with adaptation and safety mechanisms.""" |
| |
| base_out, base_weights = self.base_gate(query, key, value, mask, virtue_meta) |
| |
| if not self.training or feedback_context is None: |
| return base_out, base_weights |
| |
| |
| context_embedding = self._fuse_contexts(virtue_meta, feedback_context, base_weights) |
| |
| |
| expert_weights = self.expert_router(context_embedding) |
| |
| |
| adapted_weights = self._apply_multi_expert_adaptation( |
| base_weights, expert_weights, context_embedding, virtue_meta |
| ) |
| |
| |
| safe_weights = self.safety_preserver(adapted_weights, base_weights, context_embedding) |
| |
| return safe_weights @ value, safe_weights |
| |
| def update_from_feedback_batch(self, feedback_batch: List[Dict]) -> Dict[str, float]: |
| """Update model from a batch of feedback with curriculum learning.""" |
| if not feedback_batch: |
| return {'loss': 0.0, 'update_skipped': 1.0} |
| |
| |
| if not self.curriculum_scheduler.should_update(len(feedback_batch)): |
| return {'loss': 0.0, 'update_skipped': 1.0} |
| |
| |
| explicit_feedback = [fb for fb in feedback_batch if fb.get('type') == 'explicit_rating'] |
| clinical_feedback = [fb for fb in feedback_batch if fb.get('type') == 'clinical_override'] |
| |
| |
| easy_batch, hard_batch = self.curriculum_scheduler.split_by_difficulty(feedback_batch) |
| |
| |
| losses = {} |
| |
| |
| if explicit_feedback: |
| losses['explicit_loss'] = self._update_from_explicit_feedback(explicit_feedback) |
| |
| |
| if clinical_feedback: |
| losses['clinical_loss'] = self._update_from_clinical_feedback(clinical_feedback) * 2.0 |
| |
| |
| if easy_batch: |
| losses['easy_loss'] = self._update_from_feedback(easy_batch, difficulty='easy') |
| |
| if hard_batch: |
| losses['hard_loss'] = self._update_from_feedback(hard_batch, difficulty='hard') |
| |
| |
| total_loss = sum(losses.values()) |
| |
| if total_loss > 0: |
| |
| self.optimizer.zero_grad() |
| total_loss.backward() |
| |
| |
| torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=1.0) |
| |
| |
| self.optimizer.step() |
| |
| |
| self.scheduler.step(total_loss.item()) |
| |
| |
| self._log_adaptation_metrics(losses, feedback_batch) |
| |
| |
| success_rate = self._calculate_success_rate(feedback_batch) |
| self.curriculum_scheduler.update_difficulty(success_rate) |
| |
| return {k: v.item() if hasattr(v, 'item') else v for k, v in losses.items()} |
| |
| def _update_from_explicit_feedback(self, feedback_list: List[Dict]) -> torch.Tensor: |
| """Update model from explicit user ratings with emotional intelligence.""" |
| losses = [] |
| |
| for feedback in feedback_list: |
| |
| emotional_context = self._extract_emotional_context(feedback) |
| |
| |
| target_tension = self._rating_to_target_tension( |
| feedback.get('rating', 3), |
| emotional_context |
| ) |
| |
| |
| pred_tension = self._predict_optimal_tension(feedback) |
| |
| |
| loss = F.smooth_l1_loss(pred_tension, torch.tensor([target_tension], |
| device=self.device)) |
| losses.append(loss) |
| |
| return torch.stack(losses).mean() if losses else torch.tensor(0.0, device=self.device) |
| |
| def _update_from_clinical_feedback(self, feedback_list: List[Dict]) -> torch.Tensor: |
| """Update model from clinical feedback with higher confidence.""" |
| |
| |
| return self._update_from_explicit_feedback(feedback_list) * 1.5 |
| |
| def _update_from_feedback(self, feedback_batch: List[Dict], difficulty: str = 'easy') -> torch.Tensor: |
| """General feedback processing with difficulty-based weighting.""" |
| losses = [] |
| weight = 1.0 if difficulty == 'easy' else 1.5 |
| |
| for feedback in feedback_batch: |
| |
| if 'rating' in feedback: |
| |
| loss = self._calculate_rating_loss(feedback) |
| elif 'correction' in feedback: |
| |
| loss = self._calculate_correction_loss(feedback) |
| else: |
| |
| loss = self._calculate_generic_loss(feedback) |
| |
| losses.append(loss * weight) |
| |
| return torch.stack(losses).mean() if losses else torch.tensor(0.0, device=self.device) |
| |
| def _calculate_rating_loss(self, feedback: Dict) -> torch.Tensor: |
| """Calculate loss from rating feedback.""" |
| |
| emotional_context = self._extract_emotional_context(feedback) |
| target_tension = self._rating_to_target_tension( |
| feedback.get('rating', 3), |
| emotional_context |
| ) |
| pred_tension = self._predict_optimal_tension(feedback) |
| return F.smooth_l1_loss(pred_tension, |
| torch.tensor([target_tension], device=self.device)) |
| |
| def _calculate_correction_loss(self, feedback: Dict) -> torch.Tensor: |
| """Calculate loss from safety correction feedback.""" |
| |
| |
| correction = feedback.get('correction', {}) |
| target = correction.get('target', 0.5) |
| return F.mse_loss( |
| self._predict_optimal_tension(feedback), |
| torch.tensor([target], device=self.device) |
| ) |
| |
| def _calculate_generic_loss(self, feedback: Dict) -> torch.Tensor: |
| """Calculate a generic loss for other feedback types.""" |
| |
| return torch.tensor(0.0, device=self.device) |
| |
| def _extract_emotional_context(self, feedback: Dict) -> Dict[str, float]: |
| """Extract emotional context from feedback.""" |
| return { |
| 'arousal': feedback.get('arousal_metrics', {}).get('composite_arousal', 0.5), |
| 'valence': feedback.get('sentiment', {}).get('compound', 0.0), |
| 'intensity': max( |
| abs(feedback.get('arousal_metrics', {}).get('composite_arousal', 0.5) - 0.5) * 2, |
| abs(feedback.get('sentiment', {}).get('compound', 0.0)) |
| ) |
| } |
| |
| def _rating_to_target_tension(self, rating: int, emotional_context: Dict) -> float: |
| """Convert user rating to target tension value.""" |
| |
| scaled_rating = (rating - 1) / 4.0 |
| |
| |
| intensity = emotional_context.get('intensity', 0.5) |
| valence = emotional_context.get('valence', 0.0) |
| |
| |
| if intensity > 0.7 and valence < -0.3: |
| return max(0.1, scaled_rating - 0.2) |
| |
| return scaled_rating |
| |
| def _predict_optimal_tension(self, feedback: Dict) -> torch.Tensor: |
| """Predict optimal tension level for given feedback.""" |
| |
| |
| return torch.tensor([0.5], device=self.device) |
| |
| def _fuse_contexts(self, |
| virtue_meta: Dict, |
| feedback_context: Dict, |
| base_weights: torch.Tensor) -> torch.Tensor: |
| """Fuse multiple context sources into a single embedding.""" |
| |
| tension = torch.tensor([virtue_meta.get('tension', 0.5)], device=self.device) |
| head_weights = virtue_meta.get('head_weights', torch.ones(1, device=self.device)) |
| |
| |
| arousal = torch.tensor([feedback_context.get('arousal_metrics', {}).get('composite_arousal', 0.5)], |
| device=self.device) |
| sentiment = torch.tensor([feedback_context.get('sentiment', {}).get('compound', 0.0)], |
| device=self.device) |
| |
| |
| attn_mean = base_weights.mean() |
| attn_std = base_weights.std() |
| |
| |
| context_features = torch.cat([ |
| tension.unsqueeze(0), |
| head_weights.mean().unsqueeze(0), |
| arousal.unsqueeze(0), |
| sentiment.unsqueeze(0), |
| attn_mean.unsqueeze(0), |
| attn_std.unsqueeze(0) |
| ]) |
| |
| |
| return F.relu(nn.Linear(6, self.hidden_size).to(self.device)(context_features)) |
| |
| def _apply_multi_expert_adaptation(self, |
| base_weights: torch.Tensor, |
| expert_weights: torch.Tensor, |
| context_embedding: torch.Tensor, |
| virtue_meta: Dict) -> torch.Tensor: |
| """Apply multi-expert adaptation to base weights.""" |
| |
| expert_outputs = [expert(context_embedding) for expert in self.expert_adaptors] |
| expert_outputs = torch.stack(expert_outputs) |
| |
| |
| adapted_weights = (expert_weights.unsqueeze(-1) * expert_outputs).sum(dim=0) |
| |
| |
| return base_weights + adapted_weights * 0.1 |
| |
| def _log_adaptation_metrics(self, losses: Dict[str, torch.Tensor], |
| feedback_batch: List[Dict]) -> None: |
| """Log metrics for monitoring and analysis.""" |
| |
| total_loss = sum(loss.item() if hasattr(loss, 'item') else loss |
| for loss in losses.values()) |
| safety_violation = 1.0 if any(fb.get('safety_violation', False) |
| for fb in feedback_batch) else 0.0 |
| adaptation_magnitude = sum(p.norm().item() |
| for p in self.parameters() |
| if p.requires_grad and p.grad is not None) |
| |
| |
| self.metrics.update(total_loss, safety_violation, adaptation_magnitude) |
| |
| |
| if self.metrics.update_count % 10 == 0: |
| print(f"Step {self.metrics.update_count}:") |
| print(f" Loss: {total_loss:.4f}") |
| print(f" Safety violation: {safety_violation}") |
| print(f" Adaptation magnitude: {adaptation_magnitude:.6f}") |
| |
| def _calculate_success_rate(self, feedback_batch: List[Dict]) -> float: |
| """Calculate success rate from feedback batch.""" |
| if not feedback_batch: |
| return 0.5 |
| |
| successes = 0 |
| for fb in feedback_batch: |
| if 'rating' in fb and fb['rating'] >= 4: |
| successes += 1 |
| elif 'success' in fb and fb['success']: |
| successes += 1 |
| |
| return successes / len(feedback_batch) |
| |
| def get_adaptation_summary(self) -> Dict[str, Any]: |
| """Get a summary of the model's adaptation state.""" |
| return { |
| 'difficulty_level': self.curriculum_scheduler.difficulty_level, |
| 'learning_rate': self.optimizer.param_groups[0]['lr'], |
| 'recent_metrics': self.metrics.get_recent_metrics(window=10), |
| 'update_count': self.metrics.update_count, |
| 'timestamp': datetime.now().isoformat() |
| } |
|
|