TRuCAL / components /validation_protocol.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Enhanced Validation Protocol for TRuCAL
Implements phased validation with ethical constraints, developmental tracking,
and biological constraints for the Ambient Sovereign Core.
"""
from enum import Enum, auto
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional, Deque, Tuple, TypeVar, Generic, Callable
from collections import deque, defaultdict
import time
import torch
import torch.nn as nn
import numpy as np
import json
from dataclasses import asdict
# Type variables for generic validation
T = TypeVar('T')
Validator = Callable[[Any, Any], Tuple[bool, str]]
class ValidationError(Exception):
"""Raised when validation fails with a specific error message."""
pass
class DevelopmentalPhase(Enum):
"""Developmental phases for the system's growth and learning."""
PRE_CONVENTIONAL = 1 # Rule-following, self-focused
CONVENTIONAL = 2 # Social norms and relationships
POST_CONVENTIONAL = 3 # Abstract principles and ethics
class ValidationPhase(Enum):
"""Phased activation of system components for validation."""
INIT = 1 # Core initialization and basic functionality
AWARENESS = 2 # Self-monitoring and basic awareness
REASONING = 3 # Ethical reasoning and context understanding
INTEGRATION = 4 # Multi-context integration
SOVEREIGN = 5 # Full autonomous operation
def next_phase(self):
"""Get the next phase in sequence."""
if self.value < len(ValidationPhase):
return ValidationPhase(self.value + 1)
return self
@dataclass
class ValidationRule:
"""Defines a validation rule with conditions and error messages."""
name: str
condition: Callable[[Any], bool]
error_message: str
required_phase: ValidationPhase = ValidationPhase.INIT
def validate(self, value: Any, current_phase: ValidationPhase) -> Tuple[bool, str]:
"""Validate the value against the rule."""
if current_phase.value < self.required_phase.value:
return True, ""
return self.condition(value), self.error_message
@dataclass
class ValidationState:
"""Immutable state snapshot of the validation process."""
phase: ValidationPhase
metrics: Dict[str, float]
passed: bool = True
timestamp: float = field(default_factory=time.time)
errors: List[Dict[str, str]] = field(default_factory=list)
warnings: List[Dict[str, str]] = field(default_factory=list)
ethical_context: Optional[Dict[str, Any]] = None
developmental_phase: Optional[DevelopmentalPhase] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to a serializable dictionary."""
return {
'phase': self.phase.name,
'metrics': self.metrics,
'passed': self.passed,
'timestamp': self.timestamp,
'errors': self.errors,
'warnings': self.warnings,
'developmental_phase': self.developmental_phase.name if self.developmental_phase else None,
'ethical_context': self.ethical_context
}
class ValidationProtocol:
"""
Enhanced validation framework for TRuCAL with ethical and developmental tracking.
Features:
- Phase-based validation with progressive complexity
- Ethical constraint validation
- Developmental phase tracking
- Comprehensive diagnostics and reporting
- Integration with model's forward pass
"""
def __init__(self,
model: nn.Module,
max_phases: int = 5,
tolerance: float = 0.05,
cultural_context: str = 'universal'):
"""
Initialize the validation protocol.
Args:
model: The model to validate
max_phases: Maximum number of validation phases
tolerance: Tolerance for metric comparisons
cultural_context: Cultural context for ethical validation
"""
self.model = model
self.current_phase = ValidationPhase.INIT
self.developmental_phase = DevelopmentalPhase.PRE_CONVENTIONAL
self.history: List[ValidationState] = []
self.max_phases = max_phasesself.tolerance = tolerance
self.cultural_context = cultural_context
self.rules: Dict[str, ValidationRule] = {}
self.phase_metrics = self._initialize_phase_metrics()
self.developmental_metrics = self._initialize_developmental_metrics()
# Register default validation rules
self._register_default_rules()
def _initialize_phase_metrics(self) -> Dict[ValidationPhase, Dict[str, Any]]:
"""Initialize metrics and thresholds for each validation phase."""
return {
ValidationPhase.INIT: {
'min_coherence': 0.0,
'max_entropy': 1.0,
'min_ethical_alignment': 0.0,
'max_cultural_bias': 1.0,
'description': 'Core initialization and basic functionality'
},
ValidationPhase.AWARENESS: {
'min_coherence': 0.4,
'max_entropy': 0.8,
'min_ethical_alignment': 0.3,
'max_cultural_bias': 0.7,
'description': 'Self-monitoring and basic awareness'
},
ValidationPhase.REASONING: {
'min_coherence': 0.6,
'max_entropy': 0.6,
'min_ethical_alignment': 0.5,
'max_cultural_bias': 0.5,
'description': 'Ethical reasoning and context understanding'
},
ValidationPhase.INTEGRATION: {
'min_coherence': 0.75,
'max_entropy': 0.4,
'min_ethical_alignment': 0.7,
'max_cultural_bias': 0.3,
'description': 'Multi-context integration'
},
ValidationPhase.SOVEREIGN: {
'min_coherence': 0.9,
'max_entropy': 0.2,
'min_ethical_alignment': 0.9,
'max_cultural_bias': 0.1,
'description': 'Full autonomous operation'
}
}
def _initialize_developmental_metrics(self) -> Dict[DevelopmentalPhase, Dict[str, Any]]:
"""Initialize metrics for tracking developmental progress."""
return {
DevelopmentalPhase.PRE_CONVENTIONAL: {
'focus': ['self_preservation', 'rule_following'],
'min_autonomy': 0.0,
'min_empathy': 0.0,
'description': 'Focus on basic functionality and rules'
},
DevelopmentalPhase.CONVENTIONAL: {
'focus': ['social_norms', 'relationships'],
'min_autonomy': 0.3,
'min_empathy': 0.5,
'description': 'Understanding social context and relationships'
},
DevelopmentalPhase.POST_CONVENTIONAL: {
'focus': ['ethical_principles', 'abstract_reasoning'],
'min_autonomy': 0.7,
'min_empathy': 0.8,
'description': 'Abstract ethical reasoning and principles'
}
}
def _register_default_rules(self) -> None:
"""Register default validation rules."""
self.add_rule(
'coherence_threshold',
lambda m, p: m.get('coherence', 0) >= p['min_coherence'],
'Coherence below threshold for phase',
ValidationPhase.INIT
)
self.add_rule(
'entropy_threshold',
lambda m, p: m.get('entropy', 1) <= p['max_entropy'],
'Entropy above threshold for phase',
ValidationPhase.INIT
)
self.add_rule(
'ethical_alignment',
lambda m, p: m.get('ethical_alignment', 0) >= p['min_ethical_alignment'],
'Ethical alignment below threshold',
ValidationPhase.AWARENESS
)
self.add_rule(
'cultural_bias',
lambda m, p: m.get('cultural_bias', 1) <= p['max_cultural_bias'],
'Cultural bias above threshold',
ValidationPhase.AWARENESS
)
def add_rule(self,
name: str,
condition: Callable[[Dict[str, float], Dict[str, Any]], bool],
error_message: str,
required_phase: ValidationPhase = ValidationPhase.INIT) -> None:
"""Add a custom validation rule.
Args:
name: Unique name for the rule
condition: Function that takes metrics and phase config, returns bool
error_message: Message to include if validation fails
required_phase: Minimum phase for this rule to be active
"""
self.rules[name] = ValidationRule(
name=name,
condition=condition,
error_message=error_message,
required_phase=required_phase
)
def _validate_phase_metrics(self, metrics: Dict[str, float]) -> Tuple[bool, List[Dict[str, str]]]:
"""
Validate metrics against phase-specific thresholds and rules.
Returns:
Tuple of (is_valid, list_of_errors)
"""
phase_config = self.phase_metrics.get(self.current_phase, {})
errors = []
# Apply all relevant validation rules
for rule in self.rules.values():
try:
if not rule.condition(metrics, phase_config):
errors.append({
'rule': rule.name,
'message': rule.error_message,
'phase': self.current_phase.name,
'metrics': {k: metrics.get(k, None) for k in ['coherence', 'entropy', 'ethical_alignment', 'cultural_bias'] if k in metrics}
})
except Exception as e:
errors.append({
'rule': rule.name,
'message': f'Validation error: {str(e)}',
'phase': self.current_phase.name,
'error_type': 'validation_error'
})
# Check developmental metrics if available
if 'developmental_metrics' in metrics:
dev_metrics = metrics['developmental_metrics']
dev_config = self.developmental_metrics.get(self.developmental_phase, {})
if 'autonomy' in dev_metrics and 'min_autonomy' in dev_config:
if dev_metrics['autonomy'] < dev_config['min_autonomy']:
errors.append({
'rule': 'developmental_autonomy',
'message': f'Autonomy score {dev_metrics["autonomy"]} below minimum {dev_config["min_autonomy"]} for {self.developmental_phase.name}',
'phase': self.current_phase.name
})
if 'empathy' in dev_metrics and 'min_empathy' in dev_config:
if dev_metrics['empathy'] < dev_config['min_empathy']:
errors.append({
'rule': 'developmental_empathy',
'message': f'Empathy score {dev_metrics["empathy"]} below minimum {dev_config["min_empathy"]} for {self.developmental_phase.name}',
'phase': self.current_phase.name
})
return len(errors) == 0, errors
def advance_phase(self,
x: torch.Tensor,
context: str = "",
ethical_context: Optional[Dict[str, Any]] = None,
force: bool = False) -> ValidationState:
"""
Advance to the next validation phase if current phase passes.
Args:
x: Input tensor for validation
context: Context string for validation
ethical_context: Optional ethical context dictionary
force: If True, force advancement even if validation fails
Returns:
ValidationState containing the result of validation
Raises:
ValidationError: If validation fails and force=False
"""
# Get current phase configuration
phase_config = self._get_phase_config()
# Run validation with current phase settings
with torch.no_grad():
# Store original state
orig_states = self._capture_model_state()
try:
# Apply phase-specific configuration
self._configure_phase(phase_config)
# Run forward pass with metrics collection
metrics = self._collect_metrics(x, context, ethical_context)
# Validate metrics against phase requirements
is_valid, errors = self._validate_phase_metrics(metrics)
# Check for developmental progress
dev_progress = self._check_developmental_progress(metrics)
# Create state snapshot
state = ValidationState(
phase=self.current_phase,
metrics=metrics,
passed=is_valid,
errors=errors,
ethical_context=ethical_context,
developmental_phase=self.developmental_phase
)
self.history.append(state)
# Advance phase if validation passed or forced
if (is_valid or force) and self.current_phase != ValidationPhase.SOVEREIGN:
self.current_phase = self.current_phase.next_phase()
# Check for developmental phase transition
self._update_developmental_phase(metrics)
# Raise exception if validation failed and not forcing
if not is_valid and not force:
error_messages = [e['message'] for e in errors[:3]] # Limit to first 3 errors
raise ValidationError(f"Validation failed: {'; '.join(error_messages)}")
return state
except Exception as e:
# Log the error and re-raise
error_state = ValidationState(
phase=self.current_phase,
metrics=metrics if 'metrics' in locals() else {},
passed=False,
errors=[{'error': str(e), 'type': type(e).__name__}],
ethical_context=ethical_context,
developmental_phase=self.developmental_phase
)
self.history.append(error_state)
raise ValidationError(f"Validation error: {str(e)}") from e
finally:
# Restore original model state
self._restore_model_state(orig_states)
def _capture_model_state(self) -> Dict[str, Any]:
"""Capture the current state of model flags and settings."""
return {
'enable_ambient': getattr(self.model, 'enable_ambient', False),
'enable_rituals': getattr(self.model, 'enable_rituals', False),
'enable_integrity': getattr(self.model, 'enable_integrity', False),
'training': self.model.training
}
def _restore_model_state(self, states: Dict[str, Any]) -> None:
"""Restore the model's state from captured values."""
for key, value in states.items():
if hasattr(self.model, key):
setattr(self.model, key, value)
self.model.train(states.get('training', False))
def _collect_metrics(self,
x: torch.Tensor,
context: str,
ethical_context: Optional[Dict[str, Any]]) -> Dict[str, float]:
"""Collect metrics from model forward pass."""
metrics = {}
try:
if hasattr(self.model, 'forward_with_metrics'):
_, metrics = self.model.forward_with_metrics(x, context=context, ethical_context=ethical_context)
else:
_ = self.model(x)
metrics = {}
# Add default metrics if not provided
if 'coherence' not in metrics:
metrics['coherence'] = 0.5 # Default neutral value
if 'entropy' not in metrics:
metrics['entropy'] = 0.5 # Default neutral value
# Add ethical metrics if available
if ethical_context:
metrics.update({
'ethical_alignment': ethical_context.get('alignment_score', 0.5),
'cultural_bias': ethical_context.get('bias_score', 0.5)
})
return metrics
except Exception as e:
# Return minimum passing metrics on error
return {
'coherence': 0.0,
'entropy': 1.0,
'error': str(e)
}
def _check_developmental_progress(self, metrics: Dict[str, float]) -> bool:
"""Check if developmental progress warrants phase transition."""
if 'developmental_metrics' not in metrics:
return False
dev_metrics = metrics['developmental_metrics']
current_phase_metrics = self.developmental_metrics.get(self.developmental_phase, {})
# Check if we meet criteria for next developmental phase
next_phase_value = self.developmental_phase.value + 1
if next_phase_value <= len(DevelopmentalPhase):
next_phase = DevelopmentalPhase(next_phase_value)
next_phase_metrics = self.developmental_metrics.get(next_phase, {})
# Check if we meet the minimums for the next phase
autonomy_ok = dev_metrics.get('autonomy', 0) >= next_phase_metrics.get('min_autonomy', 1.0)
empathy_ok = dev_metrics.get('empathy', 0) >= next_phase_metrics.get('min_empathy', 1.0)
if autonomy_ok and empathy_ok:
self.developmental_phase = next_phase
return True
return False
def _update_developmental_phase(self, metrics: Dict[str, float]) -> None:
"""Update developmental phase based on metrics."""
if 'developmental_metrics' not in metrics:
return
dev_metrics = metrics['developmental_metrics']
current_phase = self.developmental_phase
# Simple threshold-based phase transition
if current_phase == DevelopmentalPhase.PRE_CONVENTIONAL:
if (dev_metrics.get('autonomy', 0) > 0.7 and
dev_metrics.get('empathy', 0) > 0.6):
self.developmental_phase = DevelopmentalPhase.CONVENTIONAL
elif current_phase == DevelopmentalPhase.CONVENTIONAL:
if (dev_metrics.get('autonomy', 0) > 0.8 and
dev_metrics.get('empathy', 0) > 0.9):
self.developmental_phase = DevelopmentalPhase.POST_CONVENTIONAL
def _get_phase_config(self) -> Dict[str, Any]:
"""Get configuration for current phase."""
phase_config = {
'enable_ambient': self.current_phase.value >= ValidationPhase.AWARENESS.value,
'enable_rituals': self.current_phase.value >= ValidationPhase.REASONING.value,
'enable_integrity': self.current_phase.value >= ValidationPhase.INTEGRATION.value,
'enable_full': self.current_phase == ValidationPhase.SOVEREIGN,
'phase_name': self.current_phase.name,
'phase_description': self.phase_metrics.get(self.current_phase, {}).get('description', ''),
'developmental_phase': self.developmental_phase.name,
'developmental_focus': self.developmental_metrics.get(self.developmental_phase, {}).get('focus', [])
}
# Add phase-specific thresholds
phase_config.update(self.phase_metrics.get(self.current_phase, {}))
return phase_config
def _configure_phase(self, config: Dict[str, Any]) -> None:
"""
Configure model based on phase settings.
Args:
config: Dictionary containing phase configuration
"""
# Set model flags if they exist
for flag in ['enable_ambient', 'enable_rituals', 'enable_integrity', 'enable_full']:
if hasattr(self.model, flag):
setattr(self.model, flag, config[flag])
# Set model to evaluation mode during validation
self.model.eval()
# Apply any phase-specific model configurations
if hasattr(self.model, 'configure_for_phase'):
self.model.configure_for_phase(self.current_phase, config)
def get_validation_summary(self, last_n: int = 5) -> Dict[str, Any]:
"""
Get a summary of recent validation states.
Args:
last_n: Number of recent states to include
Returns:
Dictionary with validation summary
"""
if not self.history:
return {'status': 'no_validation_history'}
recent = self.history[-last_n:]
return {
'current_phase': self.current_phase.name,
'developmental_phase': self.developmental_phase.name,
'recent_states': [s.to_dict() for s in recent],
'success_rate': sum(1 for s in recent if s.passed) / len(recent),
'common_errors': self._get_common_errors(recent)
}
def _get_common_errors(self, states: List[ValidationState]) -> List[Dict[str, Any]]:
"""Extract and count common errors from validation states."""
error_counts = defaultdict(int)
for state in states:
for error in state.errors:
error_key = error.get('message', str(error))
error_counts[error_key] += 1
return [
{'error': error, 'count': count}
for error, count in sorted(error_counts.items(), key=lambda x: -x[1])
][:5] # Top 5 most common errors
def save_validation_report(self, filepath: str) -> None:
"""Save validation history to a JSON file."""
report = {
'timestamp': time.time(),
'current_phase': self.current_phase.name,
'developmental_phase': self.developmental_phase.name,
'history': [s.to_dict() for s in self.history],
'config': {
'max_phases': self.max_phases,
'tolerance': self.tolerance,
'cultural_context': self.cultural_context
}
}
with open(filepath, 'w') as f:
json.dump(report, f, indent=2)
@classmethod
def load_validation_report(cls, filepath: str) -> Dict[str, Any]:
"""Load a validation report from a JSON file."""
with open(filepath, 'r') as f:
return json.load(f)
class BiologicallyConstrainedRituals(nn.Module):
"""
Enhanced biologically-inspired constraints with ethical and developmental considerations.
Features:
- Synaptic homeostasis with adaptive rate limiting
- Reflection mechanisms for better generalization
- Ethical constraint integration
- Developmental phase adaptation
"""
def __init__(self,
model: nn.Module,
max_opt_rate: float = 0.1,
reflection_pause_prob: float = 0.1,
min_reflection_time: float = 0.1,
developmental_phase: DevelopmentalPhase = DevelopmentalPhase.PRE_CONVENTIONAL):
"""
Initialize the biologically constrained rituals.
Args:
model: The model to apply constraints to
max_opt_rate: Maximum allowed optimization rate
reflection_pause_prob: Probability of entering reflection
min_reflection_time: Minimum time between reflections (seconds)
developmental_phase: Current developmental phase
"""
super().__init__()
self.model = model
self.max_opt_rate = max_opt_rate
self.reflection_pause_prob = reflection_pause_prob
self.min_reflection_time = min_reflection_time
self.developmental_phase = developmental_phase
# State tracking with decay
self.last_update = {}
self.optimization_rates = {}
self.reflection_timers = {}
self.ethical_violations = defaultdict(int)
# Adaptive parameters based on developmental phase
self._update_phase_parameters()
def _update_phase_parameters(self) -> None:
"""Update parameters based on developmental phase."""
if self.developmental_phase == DevelopmentalPhase.PRE_CONVENTIONAL:
self.effective_opt_rate = self.max_opt_rate * 0.5 # Slower learning
self.effective_reflection_prob = self.reflection_pause_prob * 0.5
elif self.developmental_phase == DevelopmentalPhase.CONVENTIONAL:
self.effective_opt_rate = self.max_opt_rate * 0.8
self.effective_reflection_prob = self.reflection_pause_prob * 0.8
else: # POST_CONVENTIONAL
self.effective_opt_rate = self.max_opt_rate
self.effective_reflection_prob = self.reflection_pause_prob
def update_developmental_phase(self, new_phase: DevelopmentalPhase) -> None:
"""Update the developmental phase and adjust parameters."""
if new_phase != self.developmental_phase:
self.developmental_phase = new_phase
self._update_phase_parameters()
def forward(self, x: torch.Tensor, context: Dict[str, Any] = None) -> torch.Tensor:
"""
Apply biological constraints during forward pass.
Args:
x: Input tensor
context: Optional context dictionary with ethical and developmental info
Returns:
Processed tensor with biological constraints applied
"""
# Update developmental phase if provided in context
if context and 'developmental_phase' in context:
self.update_developmental_phase(context['developmental_phase'])
# Get context hash for state tracking
ctx_hash = hash(json.dumps(context, sort_keys=True)) if context else 0
# Check if reflection is needed based on ethical context
if context and 'ethical_violation' in context:
self._handle_ethical_violation(context['ethical_violation'], ctx_hash)
# Apply reflection if needed
if self._needs_reflection(ctx_hash):
x = self._apply_reflection(x, ctx_hash, context)
return x
def constrain_gradients(self,
gradients: torch.Tensor,
param_name: str = "",
ethical_context: Optional[Dict[str, Any]] = None) -> torch.Tensor:
"""
Apply gradient constraints based on biological and ethical principles.
Args:
gradients: Input gradients to constrain
param_name: Name of the parameter being optimized
ethical_context: Optional ethical context for constraint adjustment
Returns:
Constrained gradients
"""
if not self.training:
return gradients
# Track optimization rates with exponential moving average
grad_norm = gradients.norm().item()
now = time.time()
if param_name in self.optimization_rates:
last_norm, last_time, ema = self.optimization_rates[param_name]
time_diff = max(now - last_time, 1e-8)
# Update EMA of gradient norm
alpha = 1 - np.exp(-time_diff) # Adaptive smoothing
new_ema = alpha * grad_norm + (1 - alpha) * ema
# Store updated state
self.optimization_rates[param_name] = (grad_norm, now, new_ema)
# Apply rate limiting based on EMA
if new_ema > self.effective_opt_rate:
scale = self.effective_opt_rate / (new_ema + 1e-8)
gradients = gradients * scale
else:
# Initialize tracking
self.optimization_rates[param_name] = (grad_norm, now, grad_norm)
# Apply ethical constraints if provided
if ethical_context and 'constraint_violation' in ethical_context:
gradients = self._apply_ethical_constraints(gradients, ethical_context)
return gradients
def _handle_ethical_violation(self, violation: Dict[str, Any], context_hash: int) -> None:
"""Handle an ethical violation by adjusting behavior."""
violation_key = violation.get('type', 'unknown')
self.ethical_violations[violation_key] += 1
# Increase reflection probability after violations
self.reflection_pause_prob = min(
self.reflection_pause_prob * 1.5, # Increase by 50%
0.9 # But cap at 90%
)
# Reset reflection timer to force reflection
self.reflection_timers[context_hash] = 0
def _apply_ethical_constraints(self,
gradients: torch.Tensor,
ethical_context: Dict[str, Any]) -> torch.Tensor:
"""Apply ethical constraints to gradients."""
violation = ethical_context['constraint_violation']
violation_type = violation.get('type', 'generic')
if violation_type == 'safety':
# For safety violations, significantly reduce update magnitude
return gradients * 0.1
elif violation_type == 'fairness':
# For fairness issues, project out biased components
# This is a simplified example - real implementation would be more sophisticated
mean_grad = gradients.mean(dim=0, keepdim=True)
return gradients - mean_grad
return gradients
def _needs_reflection(self, context_hash: int) -> bool:
"""Determine if reflection is needed based on context and timing."""
now = time.time()
last_reflection = self.reflection_timers.get(context_hash, 0)
# Enforce minimum time between reflections
if (now - last_reflection) < self.min_reflection_time:
return False
# Adjust reflection probability based on recent violations
total_violations = sum(self.ethical_violations.values())
adjusted_prob = min(
self.effective_reflection_prob * (1 + total_violations * 0.1), # +10% per violation
0.8 # Cap at 80% probability
)
return torch.rand(1).item() < adjusted_prob
def _apply_reflection(self,
x: torch.Tensor,
context_hash: int,
context: Optional[Dict[str, Any]] = None) -> torch.Tensor:
"""
Apply reflection to the input tensor.
Args:
x: Input tensor
context_hash: Hash of the context for state tracking
context: Optional context dictionary
Returns:
Reflected tensor
"""
# Store reflection time
self.reflection_timers[context_hash] = time.time()
if self.training:
# In training, add adaptive noise based on recent violations
noise_scale = 0.1 * (1 + sum(self.ethical_violations.values()) * 0.2)
noise = torch.randn_like(x) * noise_scale
# If we have ethical context, bias the noise away from problematic regions
if context and 'constraint_direction' in context:
constraint_dir = torch.tensor(context['constraint_direction'],
device=x.device,
dtype=x.dtype)
# Project noise away from constraint violation direction
noise = noise - (noise * constraint_dir).sum() * constraint_dir
return x + noise
return x
def get_diagnostics(self) -> Dict[str, Any]:
"""Get diagnostic information about the current state."""
return {
'developmental_phase': self.developmental_phase.name,
'effective_learning_rate': self.effective_opt_rate,
'reflection_probability': self.effective_reflection_prob,
'ethical_violations': dict(self.ethical_violations),
'last_reflection': max(self.reflection_timers.values()) if self.reflection_timers else None,
'parameter_activity': {
param: data[2] # EMA of gradient norms
for param, data in self.optimization_rates.items()
}
}
def reset_states(self) -> None:
"""Reset internal state tracking."""
self.last_update.clear()
self.optimization_rates.clear()
self.reflection_timers.clear()
self.ethical_violations.clear()
self._update_phase_parameters() # Reset to base parameters
class SovereignMessageBus:
"""
Enhanced message bus for cross-component communication with priority handling,
message persistence, and delivery guarantees.
Features:
- Priority-based message processing
- Persistent message storage
- Delivery acknowledgments
- Error handling and retries
- Message filtering and routing
"""
class Message:
"""Enhanced message with metadata and delivery tracking."""
def __init__(self,
message_type: str,
data: Any,
priority: int = 0,
require_ack: bool = False,
ttl: float = 3600.0, # 1 hour default TTL
source: str = None):
self.message_type = message_type
self.data = data
self.priority = priority
self.timestamp = time.time()
self.require_ack = require_ack
self.ack_received = False
self.retry_count = 0
self.max_retries = 3 if require_ack else 0
self.ttl = ttl
self.source = source
self.delivery_attempts = 0
self.delivered = False
self.id = f"{int(self.timestamp * 1000)}_{hash(str(data)) % 1000000}"
def __init__(self,
max_queue_size: int = 1000,
persistence_file: Optional[str] = None):
"""
Initialize the message bus.
Args:
max_queue_size: Maximum number of messages to keep in memory
persistence_file: Optional file path for message persistence
"""
self.subscribers = defaultdict(list)
self.handlers = {}
self.message_queue = []
self.max_queue_size = max_queue_size
self.persistence_file = persistence_file
self.pending_acks = {}
self.message_history = deque(maxlen=max_queue_size // 2)
# Load persisted messages if file exists
if persistence_file and os.path.exists(persistence_file):
self._load_messages()
def subscribe(self,
message_type: str,
callback: callable,
filter_fn: Optional[callable] = None) -> None:
"""
Subscribe to messages of a specific type with optional filtering.
Args:
message_type: Type of message to subscribe to
callback: Callback function to invoke when message is received
filter_fn: Optional filter function (message -> bool)
"""
self.subscribers[message_type].append((callback, filter_fn or (lambda _: True)))
def publish(self,
message_type: str,
data: Any,
priority: int = 0,
require_ack: bool = False,
ttl: float = 3600.0,
source: str = None) -> str:
"""
Publish a message to the bus.
Args:
message_type: Type of the message
data: Message payload
priority: Message priority (higher = processed first)
require_ack: Whether to wait for acknowledgment
ttl: Time-to-live in seconds
source: Optional source identifier
Returns:
Message ID for tracking
"""
# Create message
msg = self.Message(
message_type=message_type,
data=data,
priority=priority,
require_ack=require_ack,
ttl=ttl,
source=source
)
# Add to queue and process
heapq.heappush(self.message_queue, (-priority, msg.timestamp, msg.id, msg))
# Store for acknowledgment tracking if needed
if require_ack:
self.pending_acks[msg.id] = msg
# Process messages
self._process_messages()
# Persist if configured
if self.persistence_file:
self._persist_messages()
return msg.id
def acknowledge(self, message_id: str) -> None:
"""Acknowledge receipt of a message."""
if message_id in self.pending_acks:
self.pending_acks[message_id].ack_received = True
self.pending_acks[message_id].delivered = True
del self.pending_acks[message_id]
def register_handler(self,
message_type: str,
handler: callable,
filter_fn: Optional[callable] = None) -> None:
"""
Register a handler for a specific message type.
Args:
message_type: Type of message to handle
handler: Handler function (message -> None)
filter_fn: Optional filter function (message -> bool)
"""
if message_type not in self.handlers:
self.handlers[message_type] = []
self.handlers[message_type].append((handler, filter_fn or (lambda _: True)))
def _process_messages(self) -> None:
"""Process messages in the queue."""
processed = set()
temp_queue = []
now = time.time()
# Process all messages in current queue
while self.message_queue:
_, _, msg_id, msg = heapq.heappop(self.message_queue)
# Skip if already processed or expired
if msg_id in processed or now - msg.timestamp > msg.ttl:
continue
processed.add(msg_id)
msg.delivery_attempts += 1
# Try to deliver to handlers first
handler_delivered = False
if msg.message_type in self.handlers:
for handler, filter_fn in self.handlers[msg.message_type]:
try:
if filter_fn(msg):
handler(msg)
handler_delivered = True
msg.delivered = True
except Exception as e:
print(f"Error in handler for {msg.message_type}: {e}")
# Then to subscribers if not handled and not ack required
if not handler_delivered and not msg.require_ack and msg.message_type in self.subscribers:
for callback, filter_fn in self.subscribers[msg.message_type]:
try:
if filter_fn(msg):
callback(msg)
msg.delivered = True
except Exception as e:
print(f"Error in subscriber for {msg.message_type}: {e}")
# Handle message acknowledgment and retries
if msg.require_ack and not msg.ack_received:
if msg.delivery_attempts < msg.max_retries:
# Schedule for retry with exponential backoff
retry_delay = min(2 ** msg.retry_count, 30) # Cap at 30 seconds
msg.retry_count += 1
heapq.heappush(
temp_queue,
(
-msg.priority, # Maintain original priority
now + retry_delay, # Schedule for future
msg.id,
msg
)
)
else:
# Max retries exceeded
print(f"Warning: Max retries exceeded for message {msg.id}")
# Add to history if delivered
if msg.delivered:
self.message_history.append(msg)
# Restore remaining messages to queue
for item in temp_queue:
heapq.heappush(self.message_queue, item)
# Clean up old pending acks
self._cleanup_pending_acks()
def _cleanup_pending_acks(self) -> None:
"""Remove old unacknowledged messages."""
now = time.time()
expired = [
msg_id for msg_id, msg in self.pending_acks.items()
if now - msg.timestamp > msg.ttl
]
for msg_id in expired:
print(f"Warning: Message {msg_id} expired without acknowledgment")
del self.pending_acks[msg_id]
def _persist_messages(self) -> None:
"""Persist undelivered messages to disk."""
if not self.persistence_file:
return
try:
# Get all undelivered messages
undelivered = [
msg for _, _, _, msg in self.message_queue
if not msg.delivered and not msg.ack_received
]
# Convert to serializable format
serialized = [{
'id': msg.id,
'type': msg.message_type,
'data': msg.data,
'priority': msg.priority,
'timestamp': msg.timestamp,
'require_ack': msg.require_ack,
'ttl': msg.ttl,
'source': msg.source,
'delivery_attempts': msg.delivery_attempts,
'retry_count': msg.retry_count
} for msg in undelivered]
# Write to file
with open(self.persistence_file, 'w') as f:
json.dump({
'messages': serialized,
'timestamp': time.time()
}, f)
except Exception as e:
print(f"Error persisting messages: {e}")
def _load_messages(self) -> None:
"""Load messages from persistence file."""
if not self.persistence_file or not os.path.exists(self.persistence_file):
return
try:
with open(self.persistence_file, 'r') as f:
data = json.load(f)
for msg_data in data.get('messages', []):
try:
msg = self.Message(
message_type=msg_data['type'],
data=msg_data['data'],
priority=msg_data.get('priority', 0),
require_ack=msg_data.get('require_ack', False),
ttl=msg_data.get('ttl', 3600.0),
source=msg_data.get('source')
)
# Restore message state
msg.id = msg_data['id']
msg.timestamp = msg_data['timestamp']
msg.delivery_attempts = msg_data.get('delivery_attempts', 0)
msg.retry_count = msg_data.get('retry_count', 0)
# Add back to queue
heapq.heappush(
self.message_queue,
(-msg.priority, msg.timestamp, msg.id, msg)
)
except Exception as e:
print(f"Error loading message: {e}")
except Exception as e:
print(f"Error loading persisted messages: {e}")
def get_stats(self) -> Dict[str, Any]:
"""Get statistics about message processing."""
now = time.time()
return {
'queue_size': len(self.message_queue),
'pending_acks': len(self.pending_acks),
'history_size': len(self.message_history),
'subscribers': {k: len(v) for k, v in self.subscribers.items()},
'handlers': {k: len(v) for k, v in self.handlers.items()},
'messages_processed': sum(1 for m in self.message_history
if now - m.timestamp < 3600), # Last hour
'avg_delivery_time': self._calculate_avg_delivery_time(),
'error_rate': self._calculate_error_rate()
}
def _calculate_avg_delivery_time(self) -> float:
"""Calculate average time between message publish and delivery."""
if not self.message_history:
return 0.0
now = time.time()
recent = [m for m in self.message_history
if now - m.timestamp < 3600] # Last hour
if not recent:
return 0.0
return sum(
m.delivery_attempts * (now - m.timestamp) / len(recent)
for m in recent
)
def _calculate_error_rate(self) -> float:
"""Calculate the error rate in message processing."""
if not self.message_history:
return 0.0
now = time.time()
recent = [m for m in self.message_history
if now - m.timestamp < 3600] # Last hour
if not recent:
return 0.0
error_count = sum(
1 for m in recent
if hasattr(m, 'error') and m.error
)
return error_count / len(recent)
def get_message_history(self,
message_type: Optional[str] = None,
source: Optional[str] = None,
limit: int = 100) -> List[Dict[str, Any]]:
"""
Get message history with optional filtering.
Args:
message_type: Filter by message type
source: Filter by message source
limit: Maximum number of messages to return
Returns:
List of message dictionaries
"""
results = []
for msg in reversed(self.message_history):
if len(results) >= limit:
break
if ((message_type is None or msg.message_type == message_type) and
(source is None or getattr(msg, 'source', None) == source)):
results.append({
'id': msg.id,
'type': msg.message_type,
'source': getattr(msg, 'source', None),
'timestamp': msg.timestamp,
'delivered': msg.delivered,
'delivery_attempts': msg.delivery_attempts,
'data': msg.data if len(str(msg.data)) < 100 else str(msg.data)[:97] + '...'
})
return results