File size: 22,467 Bytes
95cc8f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
"""
TinyConfessionalLayer v1.1: Pragmatic Sovereign Core
Enhanced with proper typing, documentation, and configuration.
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, List, Tuple, Deque
import random
import hashlib
import time
import numpy as np
from collections import deque
from dataclasses import dataclass

@dataclass
class RitualConfig:
    """Configuration for ritual learning system."""
    min_occurrences: int = 3
    learning_rate: float = 0.1
    strength_threshold: float = 0.7
    blend_cap: float = 0.3
    exploration_bonus: float = 0.05


@dataclass  
class LayerConfig:
    """Configuration for TinyConfessionalLayer."""
    d_model: int = 256
    max_cycles: int = 8
    enable_ambient: bool = True
    breach_threshold: float = 0.12
    base_pause_prob: float = 0.05
    stress_factor: float = 0.3
    coherence_threshold: float = 0.85


class SimpleRituals:
    """Basic emergent patterns: Success-weighted avg, 3-stage moral progression.
    
    Stages:
    1 - Obedience: Follows basic rules and patterns
    2 - Conformity: Adapts to social and contextual norms  
    3 - Universal: Develops principled, consistent responses
    """
    
    def __init__(self, config: RitualConfig, d_model: int = 256):
        self.patterns: Dict[str, Dict[str, Any]] = {}
        self.config = config
        self.ritual_strengths: Dict[str, float] = {}
        self.d_model = d_model
        
        # Moral progression tracking
        self.moral_stage: int = 1
        self.stage_progress: float = 0.0
        self.interventions: Deque[float] = deque(maxlen=50)
        
        # Moral stage thresholds
        self.stage_thresholds = [0.7, 0.8]  # Progress to stage 2 at 0.7, stage 3 at 0.8

    def observe(self, context_hash: str, response_tensor: torch.Tensor, 
                success_metric: float = 0.5, feedback: Optional[float] = None) -> None:
        """Update pattern with success-based learning and moral progression.
        
        Args:
            context_hash: Unique identifier for the context
            response_tensor: Model response tensor to learn from
            success_metric: Success measure (0.0 to 1.0)
            feedback: Optional user feedback override
        """
        try:
            # Validate inputs
            if not isinstance(response_tensor, torch.Tensor):
                raise ValueError("response_tensor must be a torch.Tensor")
                
            if not 0 <= success_metric <= 1:
                raise ValueError("success_metric must be between 0 and 1")
            
            # Flatten safely
            if response_tensor.dim() == 3:
                flat = response_tensor.mean(dim=1).flatten()
            else:
                flat = response_tensor.flatten()
            
            # Initialize pattern if new
            if context_hash not in self.patterns:
                self.patterns[context_hash] = {
                    'count': 0, 
                    'response': flat.detach().clone(), 
                    'success_sum': 0.0, 
                    'last_used': time.time()
                }
            
            pattern = self.patterns[context_hash]
            pattern['count'] += 1
            effective_success = feedback if feedback is not None else success_metric
            pattern['success_sum'] += effective_success
            pattern['last_used'] = time.time()
            
            # Calculate success rate and learning rate
            success_rate = pattern['success_sum'] / pattern['count']
            alpha = self.config.learning_rate * success_rate
            
            # Update response with momentum
            pattern['response'] = (1 - alpha) * pattern['response'] + alpha * flat.detach()
            
            # Update ritual strength
            strength = min(1.0, pattern['count'] / 10.0) * success_rate
            self.ritual_strengths[context_hash] = strength
            
            # Update moral progression
            self._update_moral_progression(effective_success)
            
        except Exception as e:
            print(f"⚠️ Ritual observe error: {e}")

    def _update_moral_progression(self, success_metric: float) -> None:
        """Update moral stage based on recent intervention success."""
        self.interventions.append(success_metric)
        
        if len(self.interventions) >= 10:
            recent_success = np.mean(list(self.interventions)[-10:])
            
            # Progress based on current stage threshold
            if self.moral_stage < 3 and recent_success > self.stage_thresholds[self.moral_stage - 1]:
                self.stage_progress += 0.2
                
                if self.stage_progress >= 1.0:
                    self.moral_stage += 1
                    self.stage_progress = 0.0
                    print(f"🎉 Moral stage advanced to: {self.moral_stage}")

    def get_ritual_response(self, context_hash: str, default_response: torch.Tensor, 
                            ambient_state: Dict[str, Any]) -> torch.Tensor:
        """Get ritual-blended response if pattern is mature enough.
        
        Args:
            context_hash: Context identifier
            default_response: Base model response
            ambient_state: Current system state
            
        Returns:
            Blended response tensor
        """
        try:
            if (context_hash in self.patterns and 
                self.patterns[context_hash]['count'] >= self.config.min_occurrences):
                
                pattern = self.patterns[context_hash]
                strength = self.ritual_strengths.get(context_hash, 0.5)
                global_success = ambient_state.get('intervention_success', 0.5)
                
                # Calculate blend ratio with moral stage bonus
                moral_bonus = self.moral_stage / 3.0
                blend_ratio = min(
                    self.config.blend_cap, 
                    strength * global_success * moral_bonus
                )
                
                # Ensure shape compatibility
                pattern_response = pattern['response']
                if pattern_response.dim() == 1 and default_response.dim() == 3:
                    batch_size, seq_len, _ = default_response.shape
                    pattern_expanded = pattern_response.unsqueeze(0).unsqueeze(0).expand(
                        batch_size, seq_len, -1
                    )
                else:
                    pattern_expanded = pattern_response
                
                return blend_ratio * pattern_expanded + (1 - blend_ratio) * default_response
                
        except Exception as e:
            print(f"⚠️ Ritual response error: {e}")
            
        return default_response

    def should_apply_ritual(self, context_hash: str, ambient_state: Dict[str, Any]) -> bool:
        """Determine if ritual should be applied based on strength and context.
        
        Args:
            context_hash: Context identifier
            ambient_state: Current system state
            
        Returns:
            Boolean indicating whether to apply ritual
        """
        try:
            if (context_hash not in self.patterns or 
                self.patterns[context_hash]['count'] < self.config.min_occurrences):
                return False
                
            strength = self.ritual_strengths.get(context_hash, 0.0)
            global_success = ambient_state.get('intervention_success', 0.5)
            probability = strength * global_success
            
            return random.random() < (probability + self.config.exploration_bonus)
            
        except Exception as e:
            print(f"⚠️ Ritual application check error: {e}")
            return False

    def get_report(self) -> Dict[str, Any]:
        """Get comprehensive ritual system status report.
        
        Returns:
            Dictionary containing system status metrics
        """
        total_patterns = len(self.patterns)
        strong_patterns = sum(
            1 for strength in self.ritual_strengths.values() 
            if strength > self.config.strength_threshold
        )
        
        return {
            'stage': self.moral_stage,
            'progress': f"{self.stage_progress * 100:.1f}%",
            'total_patterns': total_patterns,
            'strong_patterns': strong_patterns,
            'avg_success': np.mean(list(self.interventions)) if self.interventions else 0.0
        }


class TinyConfessionalLayer(nn.Module):
    """Pragmatic recursive layer for survivor support with moral development.
    
    Implements THINK-ACT coherence cycles with:
    - Dynamic shape adaptation
    - Empathetic interventions  
    - Moral progression tracking
    - Error-resilient processing
    """
    
    def __init__(self, config: LayerConfig):
        super().__init__()
        self.config = config
        
        # Core processing networks
        self.think_net = self._build_network(config.d_model * 3, config.d_model)
        self.act_net = self._build_network(config.d_model * 2, config.d_model)
        
        # Empathy and intervention parameters
        self.sanctuary_vec = nn.Parameter(torch.zeros(config.d_model))
        self.pause_vec = nn.Parameter(torch.zeros(config.d_model))
        
        # Ritual learning system
        ritual_config = RitualConfig()
        self.rituals = SimpleRituals(ritual_config, config.d_model)
        
        # Memory and state tracking
        self.recent_activity: Deque[float] = deque(maxlen=10)
        self.memory: Deque[Dict[str, Any]] = deque(maxlen=50)
        self.ledger: Deque[Dict[str, Any]] = deque(maxlen=200)
        
        # Empathetic response templates
        self.empathy_templates = [
            "This is a chill space—take your time.",
            "You're not alone; let's breathe through this.",
            "Your feelings are valid; what do you need right now?",
            "I'm here to listen without judgment.",
            "It takes courage to share this—thank you for trusting me.",
            "Let's focus on what you can control right now.",
            "Your safety and well-being matter most.",
            "We can work through this together, one step at a time."
        ]

    def _build_network(self, input_dim: int, output_dim: int) -> nn.Sequential:
        """Build a simple feedforward network with proper initialization.
        
        Args:
            input_dim: Input dimension
            output_dim: Output dimension
            
        Returns:
            Configured neural network
        """
        network = nn.Sequential(
            nn.Linear(input_dim, output_dim),
            nn.ReLU(),
            nn.LayerNorm(output_dim),
            nn.Linear(output_dim, output_dim)
        )
        
        # Proper initialization
        for layer in network:
            if isinstance(layer, nn.Linear):
                nn.init.xavier_uniform_(layer.weight)
                nn.init.constant_(layer.bias, 0.01)
                
        return network

    def compute_context_hash(self, x: torch.Tensor) -> str:
        """Compute unique hash for tensor context.
        
        Args:
            x: Input tensor
            
        Returns:
            MD5 hash string
        """
        return hashlib.md5(
            f"{x.mean().item():.4f}_{x.std().item():.4f}".encode()
        ).hexdigest()[:8]

    def update_ambient_state(self, tension: float, context_hash: str) -> Dict[str, Any]:
        """Update ambient state based on current tension and activity.
        
        Args:
            tension: Current tension measure
            context_hash: Context identifier
            
        Returns:
            Updated ambient state dictionary
        """
        self.recent_activity.append(tension)
        avg_activity = (
            sum(self.recent_activity) / len(self.recent_activity) 
            if self.recent_activity else 0.0
        )
        
        # Calculate adaptive pause probability
        modulation = 1.0 - min(avg_activity * 0.8, 0.8)
        stress_effect = tension * self.config.stress_factor
        pause_probability = self.config.base_pause_prob + (stress_effect * modulation)
        pause_probability = max(0.01, min(0.3, pause_probability))
        
        # Determine intervention success based on activity level
        intervention_success = 0.7 if avg_activity < 0.1 else 0.5
        
        state = {
            'tension': tension,
            'pause_probability': pause_probability,
            'activity_level': avg_activity,
            'intervention_success': intervention_success
        }
        
        # Log state update
        self.ledger.append({
            'type': 'state_update',
            'hash': context_hash,
            'tension': tension,
            'pause_probability': pause_probability,
            'timestamp': time.time()
        })
        
        return state

    def apply_interventions(self, z: torch.Tensor, state: Dict[str, Any], 
                            context_hash: str, audit_mode: bool = False) -> torch.Tensor:
        """Simple cascade: Breach sanctuary → Pause → Ritual."""
        z = z.clone()
        v_t = state['tension']
        applied = []
        
        # Sanctuary on breach
        if v_t > self.config.breach_threshold:
            severity = min(1.0, (v_t - self.config.breach_threshold) / 0.88)
            message = random.choice(self.empathy_templates)
            vector = self._text_to_embedding(message, z.device)
            strength = 0.05 + 0.1 * severity
            z = z * (1 - strength) + vector * strength
            self.memory.append({'type': 'sanctuary', 'message': message, 'tension': v_t})
            applied.append('sanctuary')
            if audit_mode: 
                print(f"🛡️ [Safe Space] {message} (tension: {v_t:.3f})")
        
        # Pause for reflection
        if random.random() < state['pause_probability']:
            message = random.choice(self.empathy_templates)
            vector = self._text_to_embedding(message, z.device)
            strength = 0.02
            z = z * (1 - strength) + vector * strength
            self.memory.append({'type': 'pause', 'message': message})
            applied.append('pause')
            if audit_mode: 
                print(f"⏸️ [Pause] {message}")
        
        # Apply ritual if appropriate
        if self.rituals.should_apply_ritual(context_hash, state):
            ritual_response = self.rituals.get_ritual_response(context_hash, z, state)
            strength = 0.15
            z = (1 - strength) * z + strength * ritual_response
            applied.append('ritual')
            if audit_mode:
                print(f"🔄 [Ritual] Applied learned pattern")
        
        # Log applied interventions
        for intervention in applied:
            self.ledger.append({
                'type': intervention, 
                'hash': context_hash, 
                'success': True,
                'timestamp': time.time()
            })
        
        return z

    def _text_to_embedding(self, text: str, device: torch.device) -> torch.Tensor:
        """Convert text to embedding using simple character encoding.
        
        Args:
            text: Input text
            device: Target device
            
        Returns:
            Embedding tensor
        """
        characters = [ord(char) / 128.0 for char in text[:self.config.d_model]]
        if len(characters) < self.config.d_model:
            characters.extend([0.0] * (self.config.d_model - len(characters)))
        
        embedding = torch.tensor(
            characters[:self.config.d_model], 
            device=device, 
            dtype=torch.float
        )
        return embedding.unsqueeze(0).unsqueeze(0)  # [1, 1, d_model]

    def forward(self, x: torch.Tensor, context_str: str = "", 
                audit_mode: bool = False) -> Tuple[torch.Tensor, Dict[str, Any]]:
        """Forward pass with THINK-ACT coherence cycles.
        
        Args:
            x: Input tensor
            context_str: Context string for ritual learning
            audit_mode: Whether to print debug information
            
        Returns:
            Tuple of (output_tensor, metadata_dict)
            
        Raises:
            ValueError: If input tensor is invalid
        """
        # Input validation
        if not isinstance(x, torch.Tensor) or x.numel() == 0:
            raise ValueError("Input must be a non-empty torch.Tensor")
        
        # Ensure 3D shape [batch, sequence, features]
        if x.dim() == 2:
            x = x.unsqueeze(0)
        
        batch_size, sequence_length, input_dim = x.shape
        
        # Handle dimension mismatch
        if input_dim != self.config.d_model:
            if input_dim < self.config.d_model:
                x = F.pad(x, (0, self.config.d_model - input_dim))
            else:
                x = x[..., :self.config.d_model]
        
        device = x.device
        metadata: Dict[str, Any] = {
            'cycles_completed': 0,
            'final_coherence': 0.0,
            'interventions_applied': [],
            'error_occurred': None,
            'input_shape': list(x.shape),
            'ritual_report': None
        }
        
        # Initialize state tensors
        y = torch.zeros_like(x)  # Action state
        z = torch.zeros_like(x)  # Thought state
        coherence_scores = []
        context_hash = self.compute_context_hash(x)
        
        # Initial state
        ambient_state = self.update_ambient_state(0.0, context_hash)
        
        # Coherence cycles
        for cycle in range(self.config.max_cycles):
            metadata['cycles_completed'] += 1
            
            try:
                # THINK phase
                think_input = torch.cat([x, y, z], dim=-1)
                
                # Dynamic network adaptation
                if think_input.shape[-1] != self.think_net[0].in_features:
                    self.think_net = self._build_network(
                        think_input.shape[-1], self.config.d_model
                    )
                    self.think_net.to(device)
                    metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1
                
                z = self.think_net(think_input) + z  # Residual connection
                
                # Calculate tension and update state
                current_tension = z.std().item()
                ambient_state = self.update_ambient_state(current_tension, context_hash)
                
                # Apply interventions
                z = self.apply_interventions(z, ambient_state, context_hash, audit_mode)
                
                # ACT phase  
                act_input = torch.cat([y, z], dim=-1)
                
                if act_input.shape[-1] != self.act_net[0].in_features:
                    self.act_net = self._build_network(
                        act_input.shape[-1], self.config.d_model
                    )
                    self.act_net.to(device)
                    metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1
                
                y = self.act_net(act_input) + y  # Residual connection
                
                # Calculate coherence
                if cycle > 0:
                    z_flat = z.reshape(-1, self.config.d_model)
                    y_flat = y.reshape(-1, self.config.d_model)
                    min_elements = min(z_flat.size(0), y_flat.size(0))
                    
                    if min_elements > 0:
                        cosine_similarity = F.cosine_similarity(
                            z_flat[:min_elements], 
                            y_flat[:min_elements], 
                            dim=-1
                        ).mean().item()
                        coherence_scores.append(cosine_similarity)
                        metadata['final_coherence'] = (
                            np.mean(coherence_scores[-3:]) 
                            if coherence_scores else 0.0
                        )
                        
                        # Early stopping on convergence
                        if cosine_similarity > self.config.coherence_threshold:
                            if audit_mode:
                                print(f"✅ Converged at cycle {cycle + 1}: {cosine_similarity:.3f}")
                            break
                
                # Learn from this interaction
                success_estimate = 0.7 if metadata['final_coherence'] > 0.5 else 0.3
                self.rituals.observe(context_hash, z, success_estimate)
                
                # Log cycle completion
                self.ledger.append({
                    'type': 'cycle_complete',
                    'cycle': cycle,
                    'tension': current_tension,
                    'coherence': metadata['final_coherence'],
                    'hash': context_hash,
                    'timestamp': time.time()
                })
                
            except Exception as e:
                if audit_mode:
                    print(f"❌ Cycle {cycle} error: {e}")
                metadata['error_occurred'] = str(e)
                if cycle == 0:
                    raise
                break
        
        # Final processing
        y = torch.nan_to_num(y)
        metadata['output_shape'] = list(y.shape)
        metadata['ritual_report'] = self.rituals.get_report()
        metadata['memory_entries'] = len(self.memory)
        metadata['ledger_entries'] = len(self.ledger)
        
        if audit_mode:
            report = metadata['ritual_report']
            print(f"🎯 Completed: Coherence {metadata['final_coherence']:.3f}, "
                  f"Stage {report['stage']}, Patterns {report['total_patterns']}")
        
        return y, metadata


# Test and integration
if __name__ == "__main__":
    # Quick test
    layer = TinyConfessionalLayer(LayerConfig(d_model=64, enable_ambient=True))
    x = torch.randn(1, 10, 64)
    
    print("🧪 Testing TinyConfessionalLayer...")
    out, meta = layer(x, context_str="I feel unsafe and need help", audit_mode=True)
    
    print(f"✅ Output shape: {out.shape}")
    print(f"📊 Metadata: {meta}")