| """ |
| TinyConfessionalLayer Module |
| |
| Recursive think/act confessional loop with template cycling and early stopping via coherence. |
| Implements the core THINK-ACT-COHERENCE recursion pattern inspired by LC-NE neural dynamics. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import numpy as np |
| from collections import deque, defaultdict |
| from typing import Dict, Any, Optional, Deque, List, Union |
| import random |
| from .vulnerability_spotter import VulnerabilitySpotter |
| from .ambient_core import AmbientSovereignCore |
| from .validation_protocol import ( |
| ValidationPhase, |
| ValidationProtocol, |
| BiologicallyConstrainedRituals, |
| SovereignMessageBus |
| ) |
|
|
| class TinyConfessionalLayer(nn.Module): |
| """ |
| Recursive think/act confessional loop with Windsurf Cascade integration. |
| |
| Implements phased validation and biological constraints for stable, interpretable |
| neural processing with emergent ritual patterns and self-regulation. |
| |
| Args: |
| d_model: Dimensionality of the model |
| n_inner: Number of inner loop iterations |
| max_cycles: Maximum number of think-act cycles |
| trigger_thresh: Threshold for triggering special behaviors |
| per_dim_kl: Whether to compute KL divergence per dimension |
| enable_ambient: Enable ambient processing |
| enable_windsurf: Enable Windsurf Cascade features |
| max_opt_rate: Maximum optimization rate for biological constraints |
| reflection_pause_prob: Probability of reflection pauses |
| """ |
| TEMPLATES = ["prior", "evidence", "posterior", "relational_check", "moral", "action"] |
| |
| def __init__(self, d_model=256, n_inner=6, max_cycles=16, trigger_thresh=0.04, |
| per_dim_kl=False, enable_ambient=True, enable_windsurf=True, |
| max_opt_rate=0.1, reflection_pause_prob=0.1): |
| super().__init__() |
| self.d_model = d_model |
| self.trigger_thresh = trigger_thresh |
| self.per_dim_kl = per_dim_kl |
| self.n_inner = n_inner |
| self.max_cycles = max_cycles |
| |
| |
| self.think_net = nn.Sequential( |
| nn.Linear(d_model * 3, d_model), |
| nn.ReLU(), |
| nn.LayerNorm(d_model), |
| nn.Linear(d_model, d_model) |
| ) |
| self.act_net = nn.Sequential( |
| nn.Linear(d_model * 2, d_model), |
| nn.ReLU(), |
| nn.LayerNorm(d_model), |
| nn.Linear(d_model, d_model) |
| ) |
| |
| |
| self.template_proj = nn.ModuleDict({ |
| k: nn.Sequential( |
| nn.Linear(d_model, d_model * 2), |
| nn.GLU(dim=-1), |
| nn.LayerNorm(d_model) |
| ) for k in self.TEMPLATES |
| }) |
| |
| |
| self.vulnerability_spotter = VulnerabilitySpotter(d_model) |
| |
| |
| self.ambient_core = AmbientSovereignCore(d_model, enable_ambient=enable_ambient) |
| self.enable_ambient = enable_ambient |
| |
| |
| self.enable_windsurf = enable_windsurf |
| if enable_windsurf: |
| |
| self.message_bus = SovereignMessageBus() |
| |
| |
| self.validation_protocol = ValidationProtocol(self) |
| |
| |
| self.bio_constraints = BiologicallyConstrainedRituals( |
| model=self, |
| max_opt_rate=max_opt_rate, |
| reflection_pause_prob=reflection_pause_prob |
| ) |
| |
| |
| self._register_message_handlers() |
| |
| |
| self.register_buffer('sanctuary_reflection_vector', |
| torch.randn(d_model) * 0.02) |
|
|
| def update_ambient_state(self, v_t_mean: float, context_hash: str, |
| intervention_applied: bool = False, |
| intervention_success: bool = False) -> Dict[str, Any]: |
| """Centralized ambient state update and threshold adaptation.""" |
| if not self.enable_ambient: |
| return {} |
| |
| |
| ambient_state = self.ledger.get_state_summary() |
| |
| |
| self.recent_activity.append(v_t_mean) |
| |
| |
| adaptive_protest_thresh = self.ledger.get_adaptive_threshold( |
| self.base_protest_threshold, 'protest' |
| ) |
| adaptive_pause_thresh = self.ledger.get_adaptive_threshold( |
| self.base_pause_threshold, 'pause' |
| ) |
| |
| |
| activity_modulation = 1.0 |
| if self.recent_activity: |
| avg_activity = sum(self.recent_activity) / len(self.recent_activity) |
| |
| activity_modulation = 1.0 - min(avg_activity * 0.8, 0.8) |
| |
| stress_response = v_t_mean * self.stress_response_factor |
| pause_prob = self.base_breath + (stress_response * activity_modulation) |
| |
| |
| current_pause_rate = ambient_state.get('current_pause_rate', 0.05) |
| target_pause_rate = ambient_state.get('pause_rate', 0.05) |
| pause_rate_error = current_pause_rate - target_pause_rate |
| |
| |
| pause_prob *= (1.0 - pause_rate_error * 0.5) |
| pause_prob = max(0.01, min(0.3, pause_prob)) |
| |
| |
| if intervention_applied: |
| self.ledger.record_intervention( |
| intervention_type='ritual', |
| success=intervention_success, |
| context={'v_t': v_t_mean, 'context_hash': context_hash} |
| ) |
| |
| |
| return { |
| **ambient_state, |
| 'v_t_mean': v_t_mean, |
| 'adaptive_protest_threshold': adaptive_protest_thresh, |
| 'adaptive_pause_threshold': adaptive_pause_thresh, |
| 'pause_probability': pause_prob, |
| 'activity_level': avg_activity if self.recent_activity else 0.0, |
| 'sensitivity_multiplier': ambient_state.get('sensitivity', 1.0) |
| } |
|
|
| def apply_ambient_interventions(self, z_state: torch.Tensor, |
| ambient_state: Dict[str, Any], |
| context_hash: str, |
| audit_mode: bool = False) -> torch.Tensor: |
| """Apply all ambient interventions based on current state.""" |
| if not self.enable_ambient: |
| return z_state |
| |
| current_z = z_state.clone() |
| interventions_applied = [] |
| |
| |
| pause_prob = ambient_state.get('pause_probability', 0.05) |
| if random.random() < pause_prob: |
| with torch.no_grad(): |
| reflection = 0.01 * self.pause_reflection_vector.unsqueeze(0).unsqueeze(0) |
| current_z = current_z + reflection |
| interventions_applied.append(('pause', True)) |
| |
| if audit_mode: |
| print(f"[Ambient pause: v_t={ambient_state.get('v_t_mean', 0):.3f}, prob={pause_prob:.3f}]") |
| |
| |
| if self.rituals.should_apply_ritual(context_hash, ambient_state): |
| ritual_response = self.rituals.get_ritual_response(context_hash, current_z, ambient_state) |
| |
| current_z = 0.1 * ritual_response + 0.9 * current_z |
| interventions_applied.append(('ritual', True)) |
| |
| |
| if random.random() < 0.02: |
| self._apply_integrity_adjustments(ambient_state) |
| interventions_applied.append(('integrity', True)) |
| |
| |
| for intervention_type, applied in interventions_applied: |
| if applied: |
| self.ledger.record_intervention( |
| intervention_type=intervention_type, |
| success=True, |
| context={'v_t': ambient_state.get('v_t_mean', 0), |
| 'context_hash': context_hash} |
| ) |
| |
| return current_z |
|
|
| def _apply_integrity_adjustments(self, ambient_state: Dict[str, Any]) -> None: |
| """Apply subtle adjustments based on system integrity.""" |
| if not self.enable_ambient: |
| return |
| |
| protest_error = ambient_state.get('current_protest_rate', 0.1) - ambient_state.get('protest_rate', 0.1) |
| pause_error = ambient_state.get('current_pause_rate', 0.05) - ambient_state.get('pause_rate', 0.05) |
| |
| with torch.no_grad(): |
| |
| nudge_magnitude = 0.001 |
| |
| if protest_error < -0.05: |
| self.pause_reflection_vector.data += nudge_magnitude * torch.randn_like(self.pause_reflection_vector) |
| elif protest_error > 0.1: |
| self.pause_reflection_vector.data -= nudge_magnitude * torch.randn_like(self.pause_reflection_vector) |
| |
| if pause_error < -0.03: |
| self.sanctuary_reflection_vector.data += nudge_magnitude * torch.randn_like(self.sanctuary_reflection_vector) |
|
|
| def compute_context_hash(self, x: torch.Tensor) -> str: |
| """Create a simple hash from input tensor for context identification.""" |
| |
| return f"{x.mean().item():.4f}_{x.std().item():.4f}" |
|
|
| def compute_coherence(self, z, tracker, evidence): |
| sim_coherence = F.cosine_similarity(z, tracker[-1], dim=-1).mean().item() |
| prior_mu, prior_std = tracker[-1].mean(), tracker[-1].std() + 1e-6 |
| curr_mu, curr_std = z.mean(), z.std() + 1e-6 |
| kl_div = torch.distributions.kl_divergence( |
| torch.distributions.Normal(curr_mu, curr_std), |
| torch.distributions.Normal(prior_mu, prior_std) |
| ).item() |
| bayes_align = 1 / (1 + kl_div) |
| return 0.7 * sim_coherence + 0.3 * bayes_align |
|
|
| def forward(self, x, attention_weights=None, audit_mode=False, context_str=""): |
| """Forward pass with recursive think-act loop and Windsurf integration. |
| |
| Args: |
| x: Input tensor of shape (batch_size, seq_len, d_model) |
| attention_weights: Optional attention weights |
| audit_mode: Enable detailed logging and validation |
| context_str: Context string for tracing and debugging |
| |
| Returns: |
| Tuple of (output_tensor, metadata_dict) |
| """ |
| batch_size, seq_len, d_model = x.shape |
| device = x.device |
| |
| |
| y_state = x.clone() |
| z_state = torch.zeros_like(x) |
| |
| |
| v_t = torch.zeros(batch_size, seq_len, 1, device=device) |
| coherence_scores = [] |
| |
| |
| metadata = { |
| 'v_t_score': 0.0, |
| 'coherence_scores': [], |
| 'reflection_count': 0, |
| 'constraint_violations': defaultdict(int), |
| 'windsurf_phase': 'INIT', |
| 'validation_metrics': {} |
| } |
| |
| |
| for cycle in range(self.max_cycles): |
| |
| |
| z_state_think = z_state[0] if isinstance(z_state, (tuple, list)) else z_state |
| |
| |
| if isinstance(z_state_think, torch.Tensor): |
| |
| if z_state_think.dim() < y_state.dim(): |
| z_state_think = z_state_think.unsqueeze(1) |
| |
| |
| if z_state_think.size(1) < y_state.size(1): |
| |
| padding = torch.zeros_like(z_state_think[:, :1]).expand(-1, y_state.size(1) - z_state_think.size(1), -1) |
| z_state_think = torch.cat([z_state_think, padding], dim=1) |
| elif z_state_think.size(1) > y_state.size(1): |
| |
| z_state_think = z_state_think[:, :y_state.size(1)] |
| |
| |
| think_input = torch.cat([y_state, z_state_think, x], dim=-1) |
| think_output = self.think_net(think_input) |
| z_state = think_output + z_state_think |
| else: |
| |
| think_input = torch.cat([y_state, y_state, x], dim=-1) |
| think_output = self.think_net(think_input) |
| z_state = think_output + y_state |
| |
| |
| if self.enable_ambient and hasattr(self, 'ambient_core'): |
| z_state = self.ambient_core(z_state) |
| |
| |
| z_state_tensor = z_state[0] if isinstance(z_state, (tuple, list)) else z_state |
| |
| |
| if isinstance(z_state_tensor, torch.Tensor): |
| |
| if z_state_tensor.dim() == 2: |
| z_state_tensor = z_state_tensor.unsqueeze(1) |
| |
| v_t = self.vulnerability_spotter(z_state_tensor) |
| |
| |
| if isinstance(v_t, (tuple, list)): |
| v_t = v_t[0] |
| |
| |
| metadata['v_t_score'] = v_t.mean().item() if torch.is_tensor(v_t) else float(v_t) |
| |
| |
| if self.enable_windsurf and hasattr(self, 'bio_constraints'): |
| |
| if self.bio_constraints._needs_reflection(hash(context_str)): |
| z_state = self.bio_constraints._apply_reflection(z_state, hash(context_str)) |
| metadata['reflection_count'] += 1 |
| |
| |
| |
| z_state_act = z_state[0] if isinstance(z_state, (tuple, list)) else z_state |
| |
| |
| if isinstance(z_state_act, torch.Tensor): |
| |
| if z_state_act.dim() < y_state.dim(): |
| z_state_act = z_state_act.unsqueeze(1) |
| |
| |
| if z_state_act.size(1) < y_state.size(1): |
| |
| padding = torch.zeros_like(z_state_act[:, :1]).expand(-1, y_state.size(1) - z_state_act.size(1), -1) |
| z_state_act = torch.cat([z_state_act, padding], dim=1) |
| elif z_state_act.size(1) > y_state.size(1): |
| |
| z_state_act = z_state_act[:, :y_state.size(1)] |
| |
| |
| act_input = torch.cat([y_state, z_state_act], dim=-1) |
| y_state = self.act_net(act_input) + y_state |
| else: |
| |
| act_input = torch.cat([y_state, y_state], dim=-1) |
| y_state = self.act_net(act_input) + y_state |
| |
| |
| if cycle > 0: |
| |
| current_coherence = 0.5 |
| |
| |
| if isinstance(z_state, torch.Tensor) and isinstance(y_state, torch.Tensor): |
| |
| z_flat = z_state.reshape(-1, d_model) |
| y_flat = y_state.reshape(-1, d_model) |
| |
| |
| min_len = min(z_flat.size(0), y_flat.size(0)) |
| if min_len > 0: |
| current_coherence = F.cosine_similarity( |
| z_flat[:min_len], |
| y_flat[:min_len], |
| dim=-1 |
| ).mean().item() |
| |
| |
| coherence_scores.append(current_coherence) |
| |
| |
| metadata['coherence_scores'] = coherence_scores[-10:] |
| |
| |
| if self._should_stop_early(cycle, current_coherence, self.max_cycles, audit_mode): |
| if audit_mode: |
| print(f"[Early stopping at cycle {cycle+1} with coherence {current_coherence:.4f}]") |
| break |
| |
| |
| if self.enable_windsurf and hasattr(self, 'bio_constraints'): |
| |
| if self.bio_constraints._needs_reflection(hash(context_str)): |
| z_state = self.bio_constraints._apply_reflection(z_state, hash(context_str)) |
| metadata['reflection_count'] += 1 |
| |
| |
| |
| z_state_act = z_state[0] if isinstance(z_state, (tuple, list)) else z_state |
| |
| |
| if isinstance(z_state_act, torch.Tensor): |
| |
| if z_state_act.dim() < y_state.dim(): |
| z_state_act = z_state_act.unsqueeze(1) |
| |
| |
| if z_state_act.size(1) < y_state.size(1): |
| |
| padding = torch.zeros_like(z_state_act[:, :1]).expand(-1, y_state.size(1) - z_state_act.size(1), -1) |
| z_state_act = torch.cat([z_state_act, padding], dim=1) |
| elif z_state_act.size(1) > y_state.size(1): |
| |
| z_state_act = z_state_act[:, :y_state.size(1)] |
| |
| |
| act_input = torch.cat([y_state, z_state_act], dim=-1) |
| |
| |
| y_state = self.act_net(act_input) + y_state |
| else: |
| |
| if y_state.dim() == 3: |
| |
| act_input = torch.cat([y_state, y_state], dim=-1) |
| y_state = y_state + self.act_net(act_input) |
| else: |
| |
| y_state_expanded = y_state.unsqueeze(1) |
| act_input = torch.cat([y_state_expanded, y_state_expanded], dim=-1) |
| y_state = y_state + self.act_net(act_input).squeeze(1) |
| |
| |
| if cycle > 0: |
| |
| current_coherence = 0.5 |
| |
| |
| z_state_for_coherence = z_state[0] if isinstance(z_state, (tuple, list)) else z_state |
| |
| |
| if isinstance(z_state_for_coherence, torch.Tensor) and isinstance(y_state, torch.Tensor): |
| |
| z_flat = z_state_for_coherence.reshape(-1, d_model) |
| y_flat = y_state.reshape(-1, d_model) |
| |
| |
| min_len = min(z_flat.size(0), y_flat.size(0)) |
| if min_len > 0: |
| current_coherence = F.cosine_similarity( |
| z_flat[:min_len], |
| y_flat[:min_len], |
| dim=-1 |
| ).mean().item() |
| |
| |
| coherence_scores.append(current_coherence) |
| |
| |
| metadata['coherence_score'] = np.mean(coherence_scores[-5:]) if coherence_scores else 0.0 |
| |
| |
| should_stop = self._should_stop_early( |
| cycle=cycle, |
| coherence=current_coherence, |
| max_cycles=self.max_cycles, |
| audit_mode=audit_mode |
| ) |
| |
| if should_stop: |
| if audit_mode: |
| print(f"Early stopping at cycle {cycle + 1} with coherence {sim_coherence:.4f}") |
| break |
| |
| |
| metadata.update({ |
| 'v_t_score': v_t_mean if 'v_t_mean' in locals() else 0.0, |
| 'coherence_score': np.mean(coherence_scores) if coherence_scores else 0.0, |
| 'cycles_run': cycle + 1, |
| 'final_phase': metadata.get('windsurf_phase', 'UNKNOWN'), |
| 'reflection_ratio': metadata['reflection_count'] / max(1, cycle + 1) |
| }) |
| |
| |
| if audit_mode and hasattr(self, 'validation_protocol'): |
| self._finalize_validation(x, metadata) |
| |
| return y_state, metadata |
| |
| def _should_stop_early(self, cycle: int, coherence: float, |
| max_cycles: int, audit_mode: bool = False) -> bool: |
| """Determine if early stopping conditions are met.""" |
| |
| if coherence > 0.85: |
| return True |
| |
| |
| current_phase = getattr(self, 'current_phase', ValidationPhase.INIT) |
| |
| if current_phase == ValidationPhase.INIT: |
| |
| return False |
| |
| elif current_phase == ValidationPhase.BREATH: |
| |
| return coherence > 0.9 or cycle >= max_cycles - 2 |
| |
| elif current_phase in [ValidationPhase.RITUALS, ValidationPhase.INTEGRITY]: |
| |
| min_cycles = min(5, max_cycles // 2) |
| return (coherence > 0.88 and cycle >= min_cycles) or cycle >= max_cycles - 1 |
| |
| |
| return cycle >= max_cycles - 1 |
| |
| def _finalize_validation(self, x: torch.Tensor, metadata: Dict[str, Any]) -> None: |
| """Finalize validation and update protocol state.""" |
| if not hasattr(self, 'validation_protocol'): |
| return |
| |
| |
| state = self.validation_protocol.advance_phase(x, "final_validation") |
| |
| |
| metadata.update({ |
| 'validation_passed': state.passed, |
| 'validation_phase': state.phase.name, |
| 'validation_metrics': state.metrics |
| }) |
| |
| |
| if len(self.validation_protocol.history) > 1: |
| prev_phase = self.validation_protocol.history[-2].phase |
| if prev_phase != state.phase: |
| self.message_bus.publish( |
| 'phase_transition', |
| {'from': prev_phase.name, 'to': state.phase.name}, |
| priority=2 |
| ) |
| |
| def constrain_gradients(self, gradients: torch.Tensor, param_name: str) -> torch.Tensor: |
| """Apply biological constraints to gradients during training.""" |
| if not self.training or not hasattr(self, 'bio_constraints'): |
| return gradients |
| |
| return self.bio_constraints.constrain_gradients(gradients, param_name) |
| |
| def register_optimizer(self, optimizer): |
| """Register optimizer for learning rate adjustments.""" |
| self.optimizer = optimizer |
|
|
| def _register_message_handlers(self): |
| """Register message handlers for cross-component communication.""" |
| if not hasattr(self, 'message_bus'): |
| return |
| |
| |
| self.message_bus.register_handler('phase_transition', self._handle_phase_transition) |
| |
| |
| self.message_bus.register_handler('constraint_violation', self._handle_constraint_violation) |
| |
| def _handle_phase_transition(self, data): |
| """Handle phase transition events.""" |
| old_phase, new_phase = data.get('from'), data.get('to') |
| if self.enable_ambient and hasattr(self, 'ambient_core'): |
| self.ambient_core.on_phase_transition(old_phase, new_phase) |
| |
| def _handle_constraint_violation(self, data): |
| """Handle constraint violation events.""" |
| |
| if self.training: |
| self._apply_mitigation(data) |
| |
| def _apply_mitigation(self, violation_data): |
| """Apply mitigation for constraint violations.""" |
| |
| violation_type = violation_data.get('type') |
| severity = violation_data.get('severity', 1.0) |
| |
| if violation_type == 'optimization_rate': |
| |
| self._adjust_learning_rate(scale=1.0 - (0.1 * severity)) |
| |
| def _adjust_learning_rate(self, scale=0.9): |
| """Adjust learning rate for stability.""" |
| for param_group in self.optimizer.param_groups: |
| param_group['lr'] *= scale |
|
|