Spaces:
Runtime error
Runtime error
Personaz1
commited on
Commit
·
247545d
1
Parent(s):
4e4035c
ΔΣ::TORI - Copy TORUS modules locally for Hugging Face deployment
Browse files- app.py +0 -3
- central_singularity.py +517 -0
- coherence_monitor.py +409 -0
- toroidal_topology.py +339 -0
app.py
CHANGED
|
@@ -11,9 +11,6 @@ import io
|
|
| 11 |
import base64
|
| 12 |
|
| 13 |
# Импорт архитектуры TORUS
|
| 14 |
-
import sys
|
| 15 |
-
sys.path.append('./TORUS/toroidal_diffusion_complete_website/toroidal_diffusion_project/src')
|
| 16 |
-
|
| 17 |
from central_singularity import SingularityCore, CognitiveFeedbackLoop
|
| 18 |
from coherence_monitor import CoherenceMetrics, SelfReflectionModule
|
| 19 |
from toroidal_topology import ToroidalLatentSpace
|
|
|
|
| 11 |
import base64
|
| 12 |
|
| 13 |
# Импорт архитектуры TORUS
|
|
|
|
|
|
|
|
|
|
| 14 |
from central_singularity import SingularityCore, CognitiveFeedbackLoop
|
| 15 |
from coherence_monitor import CoherenceMetrics, SelfReflectionModule
|
| 16 |
from toroidal_topology import ToroidalLatentSpace
|
central_singularity.py
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Central Singularity Module
|
| 3 |
+
|
| 4 |
+
This module implements the central singularity of the toroidal diffusion model,
|
| 5 |
+
which acts as a self-reflective node of cognition - absorbing latent intent,
|
| 6 |
+
transforming internal state, and emitting structured informational jets.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import numpy as np
|
| 13 |
+
import math
|
| 14 |
+
from typing import Dict, List, Tuple, Optional
|
| 15 |
+
from einops import rearrange, reduce, repeat
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SingularityCore(nn.Module):
|
| 19 |
+
"""
|
| 20 |
+
The core singularity that processes all information flowing through the torus center.
|
| 21 |
+
|
| 22 |
+
This acts as the central cognitive node that:
|
| 23 |
+
1. Absorbs latent intent from the toroidal surface
|
| 24 |
+
2. Transforms and integrates internal state
|
| 25 |
+
3. Emits structured informational jets back to the surface
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self,
|
| 29 |
+
latent_dim: int,
|
| 30 |
+
singularity_dim: int = 256,
|
| 31 |
+
num_jets: int = 8,
|
| 32 |
+
absorption_strength: float = 0.1,
|
| 33 |
+
emission_strength: float = 0.1):
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.latent_dim = latent_dim
|
| 36 |
+
self.singularity_dim = singularity_dim
|
| 37 |
+
self.num_jets = num_jets
|
| 38 |
+
self.absorption_strength = absorption_strength
|
| 39 |
+
self.emission_strength = emission_strength
|
| 40 |
+
|
| 41 |
+
# Absorption network - processes incoming information
|
| 42 |
+
self.absorption_net = nn.Sequential(
|
| 43 |
+
nn.Linear(latent_dim, singularity_dim),
|
| 44 |
+
nn.LayerNorm(singularity_dim),
|
| 45 |
+
nn.SiLU(),
|
| 46 |
+
nn.Linear(singularity_dim, singularity_dim),
|
| 47 |
+
nn.LayerNorm(singularity_dim),
|
| 48 |
+
nn.SiLU()
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# Internal state transformation - the cognitive core
|
| 52 |
+
num_heads = min(8, singularity_dim // 64) if singularity_dim >= 64 else 1
|
| 53 |
+
self.cognitive_core = nn.ModuleList([
|
| 54 |
+
nn.MultiheadAttention(singularity_dim, num_heads=num_heads, batch_first=True),
|
| 55 |
+
nn.Sequential(
|
| 56 |
+
nn.Linear(singularity_dim, singularity_dim * 4),
|
| 57 |
+
nn.SiLU(),
|
| 58 |
+
nn.Linear(singularity_dim * 4, singularity_dim)
|
| 59 |
+
)
|
| 60 |
+
])
|
| 61 |
+
|
| 62 |
+
# Emission network - generates informational jets
|
| 63 |
+
self.emission_net = nn.Sequential(
|
| 64 |
+
nn.Linear(singularity_dim, singularity_dim * 2),
|
| 65 |
+
nn.SiLU(),
|
| 66 |
+
nn.Linear(singularity_dim * 2, num_jets * latent_dim),
|
| 67 |
+
nn.Tanh()
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# Learnable singularity state
|
| 71 |
+
self.singularity_state = nn.Parameter(torch.randn(1, singularity_dim) * 0.1)
|
| 72 |
+
|
| 73 |
+
# Jet direction embeddings (learnable)
|
| 74 |
+
self.jet_directions = nn.Parameter(torch.randn(num_jets, 2) * 0.1) # (theta, phi) for each jet
|
| 75 |
+
|
| 76 |
+
def absorb_intent(self, toroidal_features: torch.Tensor) -> torch.Tensor:
|
| 77 |
+
"""
|
| 78 |
+
Absorb latent intent from the toroidal surface into the singularity.
|
| 79 |
+
|
| 80 |
+
Args:
|
| 81 |
+
toroidal_features: Features from the toroidal surface [B, C, H, W]
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
absorbed_intent: Absorbed and processed intent [B, singularity_dim]
|
| 85 |
+
"""
|
| 86 |
+
batch_size, channels, height, width = toroidal_features.shape
|
| 87 |
+
|
| 88 |
+
# Global average pooling to extract global intent
|
| 89 |
+
global_intent = F.adaptive_avg_pool2d(toroidal_features, 1).flatten(1)
|
| 90 |
+
|
| 91 |
+
# Weighted absorption based on distance from center
|
| 92 |
+
center_h, center_w = height // 2, width // 2
|
| 93 |
+
y_coords, x_coords = torch.meshgrid(
|
| 94 |
+
torch.arange(height, device=toroidal_features.device),
|
| 95 |
+
torch.arange(width, device=toroidal_features.device),
|
| 96 |
+
indexing='ij'
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# Distance from center (inverted for absorption weight)
|
| 100 |
+
dist_from_center = torch.sqrt((y_coords - center_h)**2 + (x_coords - center_w)**2)
|
| 101 |
+
absorption_weight = 1.0 / (1.0 + dist_from_center)
|
| 102 |
+
absorption_weight = absorption_weight / absorption_weight.sum()
|
| 103 |
+
|
| 104 |
+
# Weighted spatial pooling
|
| 105 |
+
weighted_features = toroidal_features * absorption_weight.unsqueeze(0).unsqueeze(0)
|
| 106 |
+
spatial_intent = weighted_features.sum(dim=[2, 3])
|
| 107 |
+
|
| 108 |
+
# Combine global and spatial intent
|
| 109 |
+
combined_intent = global_intent + spatial_intent
|
| 110 |
+
|
| 111 |
+
# Process through absorption network
|
| 112 |
+
absorbed_intent = self.absorption_net(combined_intent)
|
| 113 |
+
|
| 114 |
+
return absorbed_intent
|
| 115 |
+
|
| 116 |
+
def transform_state(self, absorbed_intent: torch.Tensor) -> torch.Tensor:
|
| 117 |
+
"""
|
| 118 |
+
Transform the internal singularity state using absorbed intent.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
absorbed_intent: Absorbed intent from toroidal surface
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
transformed_state: New singularity state
|
| 125 |
+
"""
|
| 126 |
+
batch_size = absorbed_intent.shape[0]
|
| 127 |
+
|
| 128 |
+
# Expand singularity state for batch
|
| 129 |
+
current_state = self.singularity_state.expand(batch_size, -1)
|
| 130 |
+
|
| 131 |
+
# Combine current state with absorbed intent
|
| 132 |
+
combined = torch.stack([current_state, absorbed_intent], dim=1) # [B, 2, D]
|
| 133 |
+
|
| 134 |
+
# Self-attention for cognitive processing
|
| 135 |
+
attn_layer, ffn_layer = self.cognitive_core
|
| 136 |
+
|
| 137 |
+
# Self-attention
|
| 138 |
+
attended, _ = attn_layer(combined, combined, combined)
|
| 139 |
+
attended = attended + combined # Residual connection
|
| 140 |
+
|
| 141 |
+
# Feed-forward processing
|
| 142 |
+
transformed = ffn_layer(attended)
|
| 143 |
+
transformed = transformed + attended # Residual connection
|
| 144 |
+
|
| 145 |
+
# Extract the transformed singularity state
|
| 146 |
+
transformed_state = transformed[:, 0] # Take the first token (singularity state)
|
| 147 |
+
|
| 148 |
+
return transformed_state
|
| 149 |
+
|
| 150 |
+
def emit_jets(self, transformed_state: torch.Tensor, target_shape: Tuple[int, int]) -> torch.Tensor:
|
| 151 |
+
"""
|
| 152 |
+
Emit structured informational jets from the singularity to the toroidal surface.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
transformed_state: Transformed singularity state
|
| 156 |
+
target_shape: Target spatial shape (H, W) for the jets
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
emitted_jets: Informational jets projected onto toroidal surface [B, C, H, W]
|
| 160 |
+
"""
|
| 161 |
+
batch_size = transformed_state.shape[0]
|
| 162 |
+
height, width = target_shape
|
| 163 |
+
|
| 164 |
+
# Generate jet information
|
| 165 |
+
jet_info = self.emission_net(transformed_state) # [B, num_jets * latent_dim]
|
| 166 |
+
jet_info = jet_info.view(batch_size, self.num_jets, self.latent_dim)
|
| 167 |
+
|
| 168 |
+
# Create spatial grid
|
| 169 |
+
y_coords, x_coords = torch.meshgrid(
|
| 170 |
+
torch.linspace(-1, 1, height, device=transformed_state.device),
|
| 171 |
+
torch.linspace(-1, 1, width, device=transformed_state.device),
|
| 172 |
+
indexing='ij'
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# Convert to polar coordinates
|
| 176 |
+
theta = torch.atan2(y_coords, x_coords)
|
| 177 |
+
radius = torch.sqrt(x_coords**2 + y_coords**2)
|
| 178 |
+
|
| 179 |
+
# Initialize emission field
|
| 180 |
+
emission_field = torch.zeros(batch_size, self.latent_dim, height, width,
|
| 181 |
+
device=transformed_state.device)
|
| 182 |
+
|
| 183 |
+
# Emit each jet
|
| 184 |
+
for jet_idx in range(self.num_jets):
|
| 185 |
+
# Jet direction
|
| 186 |
+
jet_theta = self.jet_directions[jet_idx, 0]
|
| 187 |
+
jet_phi = self.jet_directions[jet_idx, 1]
|
| 188 |
+
|
| 189 |
+
# Compute jet influence based on angular distance
|
| 190 |
+
angular_dist = torch.abs(theta - jet_theta)
|
| 191 |
+
angular_dist = torch.min(angular_dist, 2 * math.pi - angular_dist) # Wrap around
|
| 192 |
+
|
| 193 |
+
# Jet strength decreases with angular distance and radius
|
| 194 |
+
jet_strength = torch.exp(-angular_dist**2 / 0.5) * torch.exp(-radius**2 / 2.0)
|
| 195 |
+
|
| 196 |
+
# Apply jet information
|
| 197 |
+
jet_contribution = jet_info[:, jet_idx].unsqueeze(-1).unsqueeze(-1) * jet_strength.unsqueeze(0)
|
| 198 |
+
emission_field += jet_contribution
|
| 199 |
+
|
| 200 |
+
return emission_field
|
| 201 |
+
|
| 202 |
+
def forward(self, toroidal_features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 203 |
+
"""
|
| 204 |
+
Complete singularity processing cycle.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
toroidal_features: Input features from toroidal surface
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
result: Dictionary containing all singularity outputs
|
| 211 |
+
"""
|
| 212 |
+
# Absorption phase
|
| 213 |
+
absorbed_intent = self.absorb_intent(toroidal_features)
|
| 214 |
+
|
| 215 |
+
# Transformation phase
|
| 216 |
+
transformed_state = self.transform_state(absorbed_intent)
|
| 217 |
+
|
| 218 |
+
# Emission phase
|
| 219 |
+
target_shape = toroidal_features.shape[2:]
|
| 220 |
+
emitted_jets = self.emit_jets(transformed_state, target_shape)
|
| 221 |
+
|
| 222 |
+
return {
|
| 223 |
+
'absorbed_intent': absorbed_intent,
|
| 224 |
+
'transformed_state': transformed_state,
|
| 225 |
+
'emitted_jets': emitted_jets,
|
| 226 |
+
'singularity_influence': emitted_jets * self.emission_strength
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class SingularityToroidalCoupling(nn.Module):
|
| 231 |
+
"""
|
| 232 |
+
Manages the coupling between the central singularity and the toroidal surface.
|
| 233 |
+
|
| 234 |
+
This module handles the bidirectional information flow and ensures
|
| 235 |
+
proper integration of singularity effects with toroidal dynamics.
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
def __init__(self,
|
| 239 |
+
latent_dim: int,
|
| 240 |
+
singularity_dim: int = 256,
|
| 241 |
+
coupling_strength: float = 0.1):
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.latent_dim = latent_dim
|
| 244 |
+
self.singularity_dim = singularity_dim
|
| 245 |
+
self.coupling_strength = coupling_strength
|
| 246 |
+
|
| 247 |
+
# Singularity core
|
| 248 |
+
self.singularity = SingularityCore(latent_dim, singularity_dim)
|
| 249 |
+
|
| 250 |
+
# Coupling networks
|
| 251 |
+
num_groups = min(8, latent_dim) if latent_dim >= 8 else 1
|
| 252 |
+
self.surface_to_singularity = nn.Sequential(
|
| 253 |
+
nn.Conv2d(latent_dim, latent_dim, 3, padding=1),
|
| 254 |
+
nn.GroupNorm(num_groups, latent_dim),
|
| 255 |
+
nn.SiLU(),
|
| 256 |
+
nn.Conv2d(latent_dim, latent_dim, 1)
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
self.singularity_to_surface = nn.Sequential(
|
| 260 |
+
nn.Conv2d(latent_dim, latent_dim, 3, padding=1),
|
| 261 |
+
nn.GroupNorm(num_groups, latent_dim),
|
| 262 |
+
nn.SiLU(),
|
| 263 |
+
nn.Conv2d(latent_dim, latent_dim, 1)
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Adaptive coupling strength
|
| 267 |
+
self.coupling_modulator = nn.Sequential(
|
| 268 |
+
nn.AdaptiveAvgPool2d(1),
|
| 269 |
+
nn.Conv2d(latent_dim, max(1, latent_dim // 4), 1),
|
| 270 |
+
nn.SiLU(),
|
| 271 |
+
nn.Conv2d(max(1, latent_dim // 4), 1, 1),
|
| 272 |
+
nn.Sigmoid()
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
def compute_coupling_strength(self, features: torch.Tensor) -> torch.Tensor:
|
| 276 |
+
"""
|
| 277 |
+
Compute adaptive coupling strength based on feature characteristics.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
features: Input features
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
coupling_strength: Adaptive coupling strength
|
| 284 |
+
"""
|
| 285 |
+
return self.coupling_modulator(features)
|
| 286 |
+
|
| 287 |
+
def forward(self, toroidal_features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 288 |
+
"""
|
| 289 |
+
Process toroidal features through singularity coupling.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
toroidal_features: Features on the toroidal surface
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
result: Dictionary containing coupled features and singularity outputs
|
| 296 |
+
"""
|
| 297 |
+
# Prepare features for singularity processing
|
| 298 |
+
prepared_features = self.surface_to_singularity(toroidal_features)
|
| 299 |
+
|
| 300 |
+
# Process through singularity
|
| 301 |
+
singularity_result = self.singularity(prepared_features)
|
| 302 |
+
|
| 303 |
+
# Process singularity output for surface integration
|
| 304 |
+
processed_jets = self.singularity_to_surface(singularity_result['emitted_jets'])
|
| 305 |
+
|
| 306 |
+
# Compute adaptive coupling strength
|
| 307 |
+
coupling_strength = self.compute_coupling_strength(toroidal_features)
|
| 308 |
+
|
| 309 |
+
# Apply coupling
|
| 310 |
+
coupled_influence = processed_jets * coupling_strength * self.coupling_strength
|
| 311 |
+
|
| 312 |
+
# Integrate with original features
|
| 313 |
+
coupled_features = toroidal_features + coupled_influence
|
| 314 |
+
|
| 315 |
+
return {
|
| 316 |
+
'coupled_features': coupled_features,
|
| 317 |
+
'original_features': toroidal_features,
|
| 318 |
+
'singularity_influence': coupled_influence,
|
| 319 |
+
'coupling_strength': coupling_strength,
|
| 320 |
+
**singularity_result
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
class CognitiveFeedbackLoop(nn.Module):
|
| 325 |
+
"""
|
| 326 |
+
Implements the cognitive feedback loop between observation, integration, and action.
|
| 327 |
+
|
| 328 |
+
This creates a continuous cycle of self-reflection and adaptation.
|
| 329 |
+
"""
|
| 330 |
+
|
| 331 |
+
def __init__(self, latent_dim: int, memory_size: int = 10):
|
| 332 |
+
super().__init__()
|
| 333 |
+
self.latent_dim = latent_dim
|
| 334 |
+
self.memory_size = memory_size
|
| 335 |
+
|
| 336 |
+
# Observation network
|
| 337 |
+
num_groups = min(8, latent_dim) if latent_dim >= 8 else 1
|
| 338 |
+
self.observer = nn.Sequential(
|
| 339 |
+
nn.Conv2d(latent_dim, latent_dim, 3, padding=1),
|
| 340 |
+
nn.GroupNorm(num_groups, latent_dim),
|
| 341 |
+
nn.SiLU(),
|
| 342 |
+
nn.Conv2d(latent_dim, latent_dim // 2, 1),
|
| 343 |
+
nn.AdaptiveAvgPool2d(1),
|
| 344 |
+
nn.Flatten(),
|
| 345 |
+
nn.Linear(latent_dim // 2, latent_dim // 4)
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
# Integration network (memory + current observation)
|
| 349 |
+
self.integrator = nn.Sequential(
|
| 350 |
+
nn.Linear(latent_dim // 4 * (memory_size + 1), latent_dim // 2),
|
| 351 |
+
nn.SiLU(),
|
| 352 |
+
nn.Linear(latent_dim // 2, latent_dim // 4)
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
# Action network
|
| 356 |
+
self.actor = nn.Sequential(
|
| 357 |
+
nn.Linear(latent_dim // 4, latent_dim),
|
| 358 |
+
nn.SiLU(),
|
| 359 |
+
nn.Linear(latent_dim, latent_dim * 4),
|
| 360 |
+
nn.SiLU(),
|
| 361 |
+
nn.Linear(latent_dim * 4, latent_dim)
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
# Memory buffer
|
| 365 |
+
self.register_buffer('memory', torch.zeros(memory_size, latent_dim // 4))
|
| 366 |
+
self.register_buffer('memory_ptr', torch.zeros(1, dtype=torch.long))
|
| 367 |
+
|
| 368 |
+
def observe(self, features: torch.Tensor) -> torch.Tensor:
|
| 369 |
+
"""
|
| 370 |
+
Observe current state and extract key information.
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
features: Current features
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
observation: Compressed observation
|
| 377 |
+
"""
|
| 378 |
+
return self.observer(features)
|
| 379 |
+
|
| 380 |
+
def update_memory(self, observation: torch.Tensor):
|
| 381 |
+
"""
|
| 382 |
+
Update memory with new observation.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
observation: New observation to store
|
| 386 |
+
"""
|
| 387 |
+
batch_size = observation.shape[0]
|
| 388 |
+
|
| 389 |
+
# Store observation in memory (simple circular buffer)
|
| 390 |
+
ptr = self.memory_ptr.item()
|
| 391 |
+
self.memory[ptr] = observation[0] # Store first batch item
|
| 392 |
+
self.memory_ptr[0] = (ptr + 1) % self.memory_size
|
| 393 |
+
|
| 394 |
+
def integrate(self, current_observation: torch.Tensor) -> torch.Tensor:
|
| 395 |
+
"""
|
| 396 |
+
Integrate current observation with memory.
|
| 397 |
+
|
| 398 |
+
Args:
|
| 399 |
+
current_observation: Current observation
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
integrated_state: Integrated cognitive state
|
| 403 |
+
"""
|
| 404 |
+
batch_size = current_observation.shape[0]
|
| 405 |
+
|
| 406 |
+
# Expand memory for batch
|
| 407 |
+
memory_expanded = self.memory.unsqueeze(0).expand(batch_size, -1, -1)
|
| 408 |
+
memory_flat = memory_expanded.flatten(1)
|
| 409 |
+
|
| 410 |
+
# Combine with current observation
|
| 411 |
+
combined = torch.cat([current_observation, memory_flat], dim=1)
|
| 412 |
+
|
| 413 |
+
# Integrate
|
| 414 |
+
integrated_state = self.integrator(combined)
|
| 415 |
+
|
| 416 |
+
return integrated_state
|
| 417 |
+
|
| 418 |
+
def act(self, integrated_state: torch.Tensor, original_shape: Tuple[int, int]) -> torch.Tensor:
|
| 419 |
+
"""
|
| 420 |
+
Generate action based on integrated state.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
integrated_state: Integrated cognitive state
|
| 424 |
+
original_shape: Original spatial shape
|
| 425 |
+
|
| 426 |
+
Returns:
|
| 427 |
+
action: Action to apply to features
|
| 428 |
+
"""
|
| 429 |
+
# Generate action vector
|
| 430 |
+
action_vector = self.actor(integrated_state)
|
| 431 |
+
|
| 432 |
+
# Reshape to spatial dimensions
|
| 433 |
+
height, width = original_shape
|
| 434 |
+
action = action_vector.unsqueeze(-1).unsqueeze(-1)
|
| 435 |
+
action = action.expand(-1, -1, height, width)
|
| 436 |
+
|
| 437 |
+
return action
|
| 438 |
+
|
| 439 |
+
def forward(self, features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 440 |
+
"""
|
| 441 |
+
Complete cognitive feedback loop.
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
features: Input features
|
| 445 |
+
|
| 446 |
+
Returns:
|
| 447 |
+
result: Dictionary containing cognitive processing results
|
| 448 |
+
"""
|
| 449 |
+
# Observe
|
| 450 |
+
observation = self.observe(features)
|
| 451 |
+
|
| 452 |
+
# Integrate with memory
|
| 453 |
+
integrated_state = self.integrate(observation)
|
| 454 |
+
|
| 455 |
+
# Generate action
|
| 456 |
+
action = self.act(integrated_state, features.shape[2:])
|
| 457 |
+
|
| 458 |
+
# Apply action
|
| 459 |
+
modified_features = features + action * 0.1 # Small action strength
|
| 460 |
+
|
| 461 |
+
# Update memory
|
| 462 |
+
self.update_memory(observation)
|
| 463 |
+
|
| 464 |
+
return {
|
| 465 |
+
'modified_features': modified_features,
|
| 466 |
+
'observation': observation,
|
| 467 |
+
'integrated_state': integrated_state,
|
| 468 |
+
'action': action,
|
| 469 |
+
'original_features': features
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def test_central_singularity():
|
| 474 |
+
"""Test function for central singularity components."""
|
| 475 |
+
print("Testing Central Singularity Components...")
|
| 476 |
+
|
| 477 |
+
# Test parameters
|
| 478 |
+
batch_size, latent_dim, height, width = 2, 64, 32, 32
|
| 479 |
+
test_features = torch.randn(batch_size, latent_dim, height, width)
|
| 480 |
+
|
| 481 |
+
# Test SingularityCore
|
| 482 |
+
print("Testing SingularityCore...")
|
| 483 |
+
singularity_core = SingularityCore(latent_dim, singularity_dim=128, num_jets=8)
|
| 484 |
+
core_result = singularity_core(test_features)
|
| 485 |
+
|
| 486 |
+
print(f"Absorbed intent shape: {core_result['absorbed_intent'].shape}")
|
| 487 |
+
print(f"Transformed state shape: {core_result['transformed_state'].shape}")
|
| 488 |
+
print(f"Emitted jets shape: {core_result['emitted_jets'].shape}")
|
| 489 |
+
print(f"Singularity influence shape: {core_result['singularity_influence'].shape}")
|
| 490 |
+
|
| 491 |
+
# Test SingularityToroidalCoupling
|
| 492 |
+
print("\nTesting SingularityToroidalCoupling...")
|
| 493 |
+
coupling = SingularityToroidalCoupling(latent_dim, singularity_dim=128)
|
| 494 |
+
coupling_result = coupling(test_features)
|
| 495 |
+
|
| 496 |
+
print(f"Coupled features shape: {coupling_result['coupled_features'].shape}")
|
| 497 |
+
print(f"Coupling strength shape: {coupling_result['coupling_strength'].shape}")
|
| 498 |
+
print(f"Coupling strength mean: {coupling_result['coupling_strength'].mean().item():.4f}")
|
| 499 |
+
|
| 500 |
+
# Test CognitiveFeedbackLoop
|
| 501 |
+
print("\nTesting CognitiveFeedbackLoop...")
|
| 502 |
+
feedback_loop = CognitiveFeedbackLoop(latent_dim, memory_size=5)
|
| 503 |
+
|
| 504 |
+
# Run multiple iterations to test memory
|
| 505 |
+
for i in range(3):
|
| 506 |
+
feedback_result = feedback_loop(test_features)
|
| 507 |
+
print(f"Iteration {i+1}:")
|
| 508 |
+
print(f" Modified features shape: {feedback_result['modified_features'].shape}")
|
| 509 |
+
print(f" Observation shape: {feedback_result['observation'].shape}")
|
| 510 |
+
print(f" Action mean: {feedback_result['action'].mean().item():.4f}")
|
| 511 |
+
|
| 512 |
+
print("\nAll central singularity tests passed!")
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
if __name__ == "__main__":
|
| 516 |
+
test_central_singularity()
|
| 517 |
+
|
coherence_monitor.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Coherence Monitoring and Self-Reflection Module
|
| 3 |
+
|
| 4 |
+
This module implements the coherence assessment and self-reflection mechanisms
|
| 5 |
+
that are central to the toroidal diffusion model architecture.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import numpy as np
|
| 12 |
+
from typing import Dict, List, Tuple, Optional
|
| 13 |
+
from collections import deque
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class CoherenceMetrics:
|
| 17 |
+
"""
|
| 18 |
+
Computes various coherence metrics for assessing generation quality.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@staticmethod
|
| 22 |
+
def semantic_coherence(features: torch.Tensor, window_size: int = 3) -> torch.Tensor:
|
| 23 |
+
"""
|
| 24 |
+
Compute semantic coherence based on local feature consistency.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
features: Feature tensor of shape (batch, channels, height, width)
|
| 28 |
+
window_size: Size of the local window for coherence computation
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
coherence: Semantic coherence score
|
| 32 |
+
"""
|
| 33 |
+
batch_size, channels, height, width = features.shape
|
| 34 |
+
|
| 35 |
+
# Compute local variance within windows
|
| 36 |
+
kernel = torch.ones(1, 1, window_size, window_size, device=features.device) / (window_size ** 2)
|
| 37 |
+
|
| 38 |
+
# Mean within windows
|
| 39 |
+
local_mean = F.conv2d(features, kernel.repeat(channels, 1, 1, 1),
|
| 40 |
+
groups=channels, padding=window_size//2)
|
| 41 |
+
|
| 42 |
+
# Variance within windows
|
| 43 |
+
local_var = F.conv2d((features - local_mean) ** 2, kernel.repeat(channels, 1, 1, 1),
|
| 44 |
+
groups=channels, padding=window_size//2)
|
| 45 |
+
|
| 46 |
+
# Coherence is inverse of variance (lower variance = higher coherence)
|
| 47 |
+
coherence = 1.0 / (1.0 + local_var.mean(dim=1, keepdim=True))
|
| 48 |
+
|
| 49 |
+
return coherence
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def structural_coherence(features: torch.Tensor) -> torch.Tensor:
|
| 53 |
+
"""
|
| 54 |
+
Compute structural coherence based on gradient consistency.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
features: Feature tensor
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
coherence: Structural coherence score
|
| 61 |
+
"""
|
| 62 |
+
# Compute gradients
|
| 63 |
+
grad_x = torch.diff(features, dim=3, prepend=features[:, :, :, -1:])
|
| 64 |
+
grad_y = torch.diff(features, dim=2, prepend=features[:, :, -1:, :])
|
| 65 |
+
|
| 66 |
+
# Gradient magnitude
|
| 67 |
+
grad_mag = torch.sqrt(grad_x ** 2 + grad_y ** 2)
|
| 68 |
+
|
| 69 |
+
# Coherence based on gradient smoothness
|
| 70 |
+
grad_smoothness = 1.0 / (1.0 + torch.std(grad_mag, dim=1, keepdim=True))
|
| 71 |
+
|
| 72 |
+
return grad_smoothness
|
| 73 |
+
|
| 74 |
+
@staticmethod
|
| 75 |
+
def temporal_coherence(features_sequence: List[torch.Tensor]) -> torch.Tensor:
|
| 76 |
+
"""
|
| 77 |
+
Compute temporal coherence across a sequence of features.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
features_sequence: List of feature tensors from different timesteps
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
coherence: Temporal coherence score
|
| 84 |
+
"""
|
| 85 |
+
if len(features_sequence) < 2:
|
| 86 |
+
return torch.ones_like(features_sequence[0][:, :1])
|
| 87 |
+
|
| 88 |
+
# Compute frame-to-frame differences
|
| 89 |
+
temporal_diffs = []
|
| 90 |
+
for i in range(1, len(features_sequence)):
|
| 91 |
+
diff = torch.abs(features_sequence[i] - features_sequence[i-1])
|
| 92 |
+
temporal_diffs.append(diff.mean(dim=1, keepdim=True))
|
| 93 |
+
|
| 94 |
+
# Average temporal difference
|
| 95 |
+
avg_temporal_diff = torch.stack(temporal_diffs).mean(dim=0)
|
| 96 |
+
|
| 97 |
+
# Coherence is inverse of temporal variation
|
| 98 |
+
temporal_coherence = 1.0 / (1.0 + avg_temporal_diff)
|
| 99 |
+
|
| 100 |
+
return temporal_coherence
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class SelfReflectionModule(nn.Module):
|
| 104 |
+
"""
|
| 105 |
+
Implements self-reflection mechanisms for the toroidal diffusion model.
|
| 106 |
+
|
| 107 |
+
This module analyzes the current generation state and provides feedback
|
| 108 |
+
for improving coherence and quality.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
def __init__(self, feature_dim: int, reflection_depth: int = 3):
|
| 112 |
+
super().__init__()
|
| 113 |
+
self.feature_dim = feature_dim
|
| 114 |
+
self.reflection_depth = reflection_depth
|
| 115 |
+
|
| 116 |
+
# Reflection network layers
|
| 117 |
+
num_groups = min(8, feature_dim) if feature_dim >= 8 else 1
|
| 118 |
+
self.reflection_layers = nn.ModuleList([
|
| 119 |
+
nn.Sequential(
|
| 120 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1),
|
| 121 |
+
nn.GroupNorm(num_groups, feature_dim),
|
| 122 |
+
nn.SiLU(),
|
| 123 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1),
|
| 124 |
+
nn.GroupNorm(num_groups, feature_dim),
|
| 125 |
+
nn.SiLU()
|
| 126 |
+
) for _ in range(reflection_depth)
|
| 127 |
+
])
|
| 128 |
+
|
| 129 |
+
# Coherence assessment head
|
| 130 |
+
self.coherence_head = nn.Sequential(
|
| 131 |
+
nn.Conv2d(feature_dim, feature_dim // 2, 1),
|
| 132 |
+
nn.SiLU(),
|
| 133 |
+
nn.Conv2d(feature_dim // 2, 1, 1),
|
| 134 |
+
nn.Sigmoid()
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# Correction suggestion head
|
| 138 |
+
self.correction_head = nn.Sequential(
|
| 139 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1),
|
| 140 |
+
nn.GroupNorm(num_groups, feature_dim),
|
| 141 |
+
nn.SiLU(),
|
| 142 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def analyze_coherence(self, features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 146 |
+
"""
|
| 147 |
+
Analyze the coherence of current features.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
features: Input feature tensor
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
analysis: Dictionary containing coherence metrics
|
| 154 |
+
"""
|
| 155 |
+
semantic_coh = CoherenceMetrics.semantic_coherence(features)
|
| 156 |
+
structural_coh = CoherenceMetrics.structural_coherence(features)
|
| 157 |
+
|
| 158 |
+
# Overall coherence score
|
| 159 |
+
overall_coherence = self.coherence_head(features)
|
| 160 |
+
|
| 161 |
+
return {
|
| 162 |
+
'semantic_coherence': semantic_coh,
|
| 163 |
+
'structural_coherence': structural_coh,
|
| 164 |
+
'overall_coherence': overall_coherence,
|
| 165 |
+
'mean_coherence': (semantic_coh + structural_coh + overall_coherence) / 3
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
def generate_corrections(self, features: torch.Tensor, coherence_analysis: Dict[str, torch.Tensor]) -> torch.Tensor:
|
| 169 |
+
"""
|
| 170 |
+
Generate correction suggestions based on coherence analysis.
|
| 171 |
+
|
| 172 |
+
Args:
|
| 173 |
+
features: Input feature tensor
|
| 174 |
+
coherence_analysis: Coherence analysis results
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
corrections: Suggested corrections to improve coherence
|
| 178 |
+
"""
|
| 179 |
+
# Weight corrections by coherence deficiency
|
| 180 |
+
coherence_weight = 1.0 - coherence_analysis['mean_coherence']
|
| 181 |
+
|
| 182 |
+
# Generate corrections
|
| 183 |
+
corrections = self.correction_head(features)
|
| 184 |
+
|
| 185 |
+
# Apply coherence-weighted corrections
|
| 186 |
+
weighted_corrections = corrections * coherence_weight
|
| 187 |
+
|
| 188 |
+
return weighted_corrections
|
| 189 |
+
|
| 190 |
+
def reflect(self, features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 191 |
+
"""
|
| 192 |
+
Perform self-reflection on the current features.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
features: Input feature tensor
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
reflection_result: Dictionary containing analysis and corrections
|
| 199 |
+
"""
|
| 200 |
+
# Multi-layer reflection
|
| 201 |
+
reflected_features = features
|
| 202 |
+
for layer in self.reflection_layers:
|
| 203 |
+
reflected_features = layer(reflected_features) + reflected_features # Residual connection
|
| 204 |
+
|
| 205 |
+
# Analyze coherence
|
| 206 |
+
coherence_analysis = self.analyze_coherence(reflected_features)
|
| 207 |
+
|
| 208 |
+
# Generate corrections
|
| 209 |
+
corrections = self.generate_corrections(reflected_features, coherence_analysis)
|
| 210 |
+
|
| 211 |
+
return {
|
| 212 |
+
'reflected_features': reflected_features,
|
| 213 |
+
'coherence_analysis': coherence_analysis,
|
| 214 |
+
'corrections': corrections,
|
| 215 |
+
'original_features': features
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
def forward(self, features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 219 |
+
"""
|
| 220 |
+
Forward pass performing self-reflection.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
features: Input feature tensor
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
reflection_result: Self-reflection results
|
| 227 |
+
"""
|
| 228 |
+
return self.reflect(features)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class MultiPassRefinement(nn.Module):
|
| 232 |
+
"""
|
| 233 |
+
Implements multi-pass refinement mechanism for iterative improvement.
|
| 234 |
+
|
| 235 |
+
This module performs multiple passes of generation and refinement,
|
| 236 |
+
using self-reflection to guide the improvement process.
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
def __init__(self, feature_dim: int, max_passes: int = 3, coherence_threshold: float = 0.8):
|
| 240 |
+
super().__init__()
|
| 241 |
+
self.feature_dim = feature_dim
|
| 242 |
+
self.max_passes = max_passes
|
| 243 |
+
self.coherence_threshold = coherence_threshold
|
| 244 |
+
|
| 245 |
+
# Self-reflection module
|
| 246 |
+
self.reflection_module = SelfReflectionModule(feature_dim)
|
| 247 |
+
|
| 248 |
+
# Refinement network
|
| 249 |
+
num_groups = min(8, feature_dim) if feature_dim >= 8 else 1
|
| 250 |
+
self.refinement_net = nn.Sequential(
|
| 251 |
+
nn.Conv2d(feature_dim * 2, feature_dim, 3, padding=1), # features + corrections
|
| 252 |
+
nn.GroupNorm(num_groups, feature_dim),
|
| 253 |
+
nn.SiLU(),
|
| 254 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1),
|
| 255 |
+
nn.GroupNorm(num_groups, feature_dim),
|
| 256 |
+
nn.SiLU(),
|
| 257 |
+
nn.Conv2d(feature_dim, feature_dim, 3, padding=1)
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# History tracking
|
| 261 |
+
self.coherence_history = deque(maxlen=max_passes)
|
| 262 |
+
|
| 263 |
+
def should_continue_refinement(self, coherence_score: float, pass_num: int) -> bool:
|
| 264 |
+
"""
|
| 265 |
+
Determine if refinement should continue.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
coherence_score: Current coherence score
|
| 269 |
+
pass_num: Current pass number
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
should_continue: Whether to continue refinement
|
| 273 |
+
"""
|
| 274 |
+
# Stop if coherence threshold is reached
|
| 275 |
+
if coherence_score >= self.coherence_threshold:
|
| 276 |
+
return False
|
| 277 |
+
|
| 278 |
+
# Stop if maximum passes reached
|
| 279 |
+
if pass_num >= self.max_passes:
|
| 280 |
+
return False
|
| 281 |
+
|
| 282 |
+
# Stop if coherence is not improving
|
| 283 |
+
if len(self.coherence_history) >= 2:
|
| 284 |
+
recent_improvement = self.coherence_history[-1] - self.coherence_history[-2]
|
| 285 |
+
if recent_improvement < 0.01: # Minimal improvement threshold
|
| 286 |
+
return False
|
| 287 |
+
|
| 288 |
+
return True
|
| 289 |
+
|
| 290 |
+
def refine_features(self, features: torch.Tensor, corrections: torch.Tensor) -> torch.Tensor:
|
| 291 |
+
"""
|
| 292 |
+
Apply refinement to features using corrections.
|
| 293 |
+
|
| 294 |
+
Args:
|
| 295 |
+
features: Input features
|
| 296 |
+
corrections: Correction suggestions
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
refined_features: Refined feature tensor
|
| 300 |
+
"""
|
| 301 |
+
# Concatenate features and corrections
|
| 302 |
+
combined = torch.cat([features, corrections], dim=1)
|
| 303 |
+
|
| 304 |
+
# Apply refinement network
|
| 305 |
+
refinement = self.refinement_net(combined)
|
| 306 |
+
|
| 307 |
+
# Apply refinement with residual connection
|
| 308 |
+
refined_features = features + refinement
|
| 309 |
+
|
| 310 |
+
return refined_features
|
| 311 |
+
|
| 312 |
+
def forward(self, initial_features: torch.Tensor) -> Dict[str, torch.Tensor]:
|
| 313 |
+
"""
|
| 314 |
+
Perform multi-pass refinement.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
initial_features: Initial feature tensor
|
| 318 |
+
|
| 319 |
+
Returns:
|
| 320 |
+
refinement_result: Dictionary containing refinement results
|
| 321 |
+
"""
|
| 322 |
+
current_features = initial_features
|
| 323 |
+
pass_num = 0
|
| 324 |
+
refinement_history = []
|
| 325 |
+
|
| 326 |
+
# Clear history for new refinement session
|
| 327 |
+
self.coherence_history.clear()
|
| 328 |
+
|
| 329 |
+
while True:
|
| 330 |
+
# Perform self-reflection
|
| 331 |
+
reflection_result = self.reflection_module(current_features)
|
| 332 |
+
|
| 333 |
+
# Extract coherence score
|
| 334 |
+
coherence_score = reflection_result['coherence_analysis']['mean_coherence'].mean().item()
|
| 335 |
+
self.coherence_history.append(coherence_score)
|
| 336 |
+
|
| 337 |
+
# Store history
|
| 338 |
+
refinement_history.append({
|
| 339 |
+
'pass': pass_num,
|
| 340 |
+
'features': current_features.clone(),
|
| 341 |
+
'coherence_score': coherence_score,
|
| 342 |
+
'reflection_result': reflection_result
|
| 343 |
+
})
|
| 344 |
+
|
| 345 |
+
# Check if refinement should continue
|
| 346 |
+
if not self.should_continue_refinement(coherence_score, pass_num):
|
| 347 |
+
break
|
| 348 |
+
|
| 349 |
+
# Apply refinement
|
| 350 |
+
corrections = reflection_result['corrections']
|
| 351 |
+
current_features = self.refine_features(current_features, corrections)
|
| 352 |
+
|
| 353 |
+
pass_num += 1
|
| 354 |
+
|
| 355 |
+
return {
|
| 356 |
+
'final_features': current_features,
|
| 357 |
+
'initial_features': initial_features,
|
| 358 |
+
'refinement_history': refinement_history,
|
| 359 |
+
'total_passes': pass_num + 1,
|
| 360 |
+
'final_coherence': coherence_score
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def test_coherence_monitoring():
|
| 365 |
+
"""Test function for coherence monitoring components."""
|
| 366 |
+
print("Testing Coherence Monitoring and Self-Reflection...")
|
| 367 |
+
|
| 368 |
+
# Create test features
|
| 369 |
+
batch_size, channels, height, width = 2, 64, 32, 32
|
| 370 |
+
test_features = torch.randn(batch_size, channels, height, width)
|
| 371 |
+
|
| 372 |
+
# Test coherence metrics
|
| 373 |
+
semantic_coh = CoherenceMetrics.semantic_coherence(test_features)
|
| 374 |
+
structural_coh = CoherenceMetrics.structural_coherence(test_features)
|
| 375 |
+
|
| 376 |
+
print(f"Semantic coherence shape: {semantic_coh.shape}")
|
| 377 |
+
print(f"Structural coherence shape: {structural_coh.shape}")
|
| 378 |
+
print(f"Semantic coherence mean: {semantic_coh.mean().item():.4f}")
|
| 379 |
+
print(f"Structural coherence mean: {structural_coh.mean().item():.4f}")
|
| 380 |
+
|
| 381 |
+
# Test temporal coherence
|
| 382 |
+
feature_sequence = [torch.randn(batch_size, channels, height, width) for _ in range(5)]
|
| 383 |
+
temporal_coh = CoherenceMetrics.temporal_coherence(feature_sequence)
|
| 384 |
+
print(f"Temporal coherence shape: {temporal_coh.shape}")
|
| 385 |
+
print(f"Temporal coherence mean: {temporal_coh.mean().item():.4f}")
|
| 386 |
+
|
| 387 |
+
# Test self-reflection module
|
| 388 |
+
reflection_module = SelfReflectionModule(channels)
|
| 389 |
+
reflection_result = reflection_module(test_features)
|
| 390 |
+
|
| 391 |
+
print(f"Reflected features shape: {reflection_result['reflected_features'].shape}")
|
| 392 |
+
print(f"Corrections shape: {reflection_result['corrections'].shape}")
|
| 393 |
+
print(f"Overall coherence mean: {reflection_result['coherence_analysis']['overall_coherence'].mean().item():.4f}")
|
| 394 |
+
|
| 395 |
+
# Test multi-pass refinement
|
| 396 |
+
refinement_module = MultiPassRefinement(channels, max_passes=3, coherence_threshold=0.9)
|
| 397 |
+
refinement_result = refinement_module(test_features)
|
| 398 |
+
|
| 399 |
+
print(f"Final features shape: {refinement_result['final_features'].shape}")
|
| 400 |
+
print(f"Total passes: {refinement_result['total_passes']}")
|
| 401 |
+
print(f"Final coherence: {refinement_result['final_coherence']:.4f}")
|
| 402 |
+
print(f"Refinement history length: {len(refinement_result['refinement_history'])}")
|
| 403 |
+
|
| 404 |
+
print("All coherence monitoring tests passed!")
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
if __name__ == "__main__":
|
| 408 |
+
test_coherence_monitoring()
|
| 409 |
+
|
toroidal_topology.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Toroidal Topology Module for Diffusion Models
|
| 3 |
+
|
| 4 |
+
This module implements toroidal topology functions for wrapping diffusion models
|
| 5 |
+
in a toroidal latent space, enabling cyclic continuity and self-reflection.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
import numpy as np
|
| 12 |
+
import math
|
| 13 |
+
from typing import Tuple, Optional, Union
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ToroidalCoordinates:
|
| 17 |
+
"""
|
| 18 |
+
Handles coordinate transformations between Cartesian and toroidal spaces.
|
| 19 |
+
|
| 20 |
+
The torus is parameterized by:
|
| 21 |
+
- Major radius R (distance from center to tube center)
|
| 22 |
+
- Minor radius r (tube radius)
|
| 23 |
+
- Angular coordinates (θ, φ) where θ ∈ [0, 2π], φ ∈ [0, 2π]
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, major_radius: float = 1.0, minor_radius: float = 0.3):
|
| 27 |
+
self.R = major_radius # Major radius
|
| 28 |
+
self.r = minor_radius # Minor radius
|
| 29 |
+
|
| 30 |
+
def cartesian_to_toroidal(self, x: torch.Tensor, y: torch.Tensor, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 31 |
+
"""
|
| 32 |
+
Convert Cartesian coordinates to toroidal coordinates (θ, φ).
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
x, y, z: Cartesian coordinates
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
theta, phi: Toroidal angular coordinates
|
| 39 |
+
"""
|
| 40 |
+
# Distance from z-axis
|
| 41 |
+
rho = torch.sqrt(x**2 + y**2)
|
| 42 |
+
|
| 43 |
+
# Major angle (around the main axis)
|
| 44 |
+
theta = torch.atan2(y, x)
|
| 45 |
+
|
| 46 |
+
# Minor angle (around the tube)
|
| 47 |
+
# Distance from the major circle
|
| 48 |
+
d_major = rho - self.R
|
| 49 |
+
phi = torch.atan2(z, d_major)
|
| 50 |
+
|
| 51 |
+
# Normalize to [0, 2π]
|
| 52 |
+
theta = (theta + 2 * math.pi) % (2 * math.pi)
|
| 53 |
+
phi = (phi + 2 * math.pi) % (2 * math.pi)
|
| 54 |
+
|
| 55 |
+
return theta, phi
|
| 56 |
+
|
| 57 |
+
def toroidal_to_cartesian(self, theta: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 58 |
+
"""
|
| 59 |
+
Convert toroidal coordinates to Cartesian coordinates.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
theta, phi: Toroidal angular coordinates
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
x, y, z: Cartesian coordinates
|
| 66 |
+
"""
|
| 67 |
+
x = (self.R + self.r * torch.cos(phi)) * torch.cos(theta)
|
| 68 |
+
y = (self.R + self.r * torch.cos(phi)) * torch.sin(theta)
|
| 69 |
+
z = self.r * torch.sin(phi)
|
| 70 |
+
|
| 71 |
+
return x, y, z
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class ToroidalLatentSpace(nn.Module):
|
| 75 |
+
"""
|
| 76 |
+
Implements toroidal latent space operations for diffusion models.
|
| 77 |
+
|
| 78 |
+
This class wraps standard latent space operations to work on a torus,
|
| 79 |
+
providing cyclic continuity and enabling self-reflection mechanisms.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
def __init__(self, latent_dim: int, major_radius: float = 1.0, minor_radius: float = 0.3):
|
| 83 |
+
super().__init__()
|
| 84 |
+
self.latent_dim = latent_dim
|
| 85 |
+
self.coords = ToroidalCoordinates(major_radius, minor_radius)
|
| 86 |
+
|
| 87 |
+
# Learnable parameters for toroidal embedding
|
| 88 |
+
self.embedding_scale = nn.Parameter(torch.ones(latent_dim))
|
| 89 |
+
self.embedding_offset = nn.Parameter(torch.zeros(latent_dim))
|
| 90 |
+
|
| 91 |
+
def wrap_to_torus(self, latent: torch.Tensor) -> torch.Tensor:
|
| 92 |
+
"""
|
| 93 |
+
Wrap latent vectors to toroidal space using periodic boundary conditions.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
latent: Input latent tensor of shape (batch, channels, height, width)
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
wrapped_latent: Latent tensor wrapped to toroidal space
|
| 100 |
+
"""
|
| 101 |
+
# Apply learnable scaling and offset
|
| 102 |
+
scaled_latent = latent * self.embedding_scale.view(1, -1, 1, 1) + self.embedding_offset.view(1, -1, 1, 1)
|
| 103 |
+
|
| 104 |
+
# Wrap to [0, 2π] using modular arithmetic
|
| 105 |
+
wrapped = torch.fmod(scaled_latent + 2 * math.pi, 2 * math.pi)
|
| 106 |
+
|
| 107 |
+
return wrapped
|
| 108 |
+
|
| 109 |
+
def toroidal_distance(self, latent1: torch.Tensor, latent2: torch.Tensor) -> torch.Tensor:
|
| 110 |
+
"""
|
| 111 |
+
Compute distance between points on the torus.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
latent1, latent2: Latent tensors on the torus
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
distance: Toroidal distance tensor
|
| 118 |
+
"""
|
| 119 |
+
# Wrap both latents to torus
|
| 120 |
+
wrapped1 = self.wrap_to_torus(latent1)
|
| 121 |
+
wrapped2 = self.wrap_to_torus(latent2)
|
| 122 |
+
|
| 123 |
+
# Compute angular differences
|
| 124 |
+
diff = wrapped1 - wrapped2
|
| 125 |
+
|
| 126 |
+
# Handle periodic boundary: choose shorter path around the circle
|
| 127 |
+
diff = torch.where(diff > math.pi, diff - 2 * math.pi, diff)
|
| 128 |
+
diff = torch.where(diff < -math.pi, diff + 2 * math.pi, diff)
|
| 129 |
+
|
| 130 |
+
# Compute Euclidean distance on the torus surface
|
| 131 |
+
distance = torch.sqrt(torch.sum(diff**2, dim=1, keepdim=True))
|
| 132 |
+
|
| 133 |
+
return distance
|
| 134 |
+
|
| 135 |
+
def toroidal_interpolation(self, latent1: torch.Tensor, latent2: torch.Tensor, t: float) -> torch.Tensor:
|
| 136 |
+
"""
|
| 137 |
+
Interpolate between two points on the torus along the shorter geodesic.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
latent1, latent2: Latent tensors on the torus
|
| 141 |
+
t: Interpolation parameter [0, 1]
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
interpolated: Interpolated latent tensor
|
| 145 |
+
"""
|
| 146 |
+
wrapped1 = self.wrap_to_torus(latent1)
|
| 147 |
+
wrapped2 = self.wrap_to_torus(latent2)
|
| 148 |
+
|
| 149 |
+
# Compute angular differences (shorter path)
|
| 150 |
+
diff = wrapped2 - wrapped1
|
| 151 |
+
diff = torch.where(diff > math.pi, diff - 2 * math.pi, diff)
|
| 152 |
+
diff = torch.where(diff < -math.pi, diff + 2 * math.pi, diff)
|
| 153 |
+
|
| 154 |
+
# Linear interpolation along the shorter path
|
| 155 |
+
interpolated = wrapped1 + t * diff
|
| 156 |
+
|
| 157 |
+
# Ensure result is wrapped to [0, 2π]
|
| 158 |
+
interpolated = torch.fmod(interpolated + 2 * math.pi, 2 * math.pi)
|
| 159 |
+
|
| 160 |
+
return interpolated
|
| 161 |
+
|
| 162 |
+
def compute_curvature(self, latent: torch.Tensor) -> torch.Tensor:
|
| 163 |
+
"""
|
| 164 |
+
Compute local curvature of the latent space at given points.
|
| 165 |
+
|
| 166 |
+
This is used for coherence assessment and self-reflection.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
latent: Input latent tensor
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
curvature: Local curvature tensor
|
| 173 |
+
"""
|
| 174 |
+
wrapped = self.wrap_to_torus(latent)
|
| 175 |
+
|
| 176 |
+
# Compute second derivatives (discrete approximation)
|
| 177 |
+
# This is a simplified curvature estimation
|
| 178 |
+
batch_size, channels, height, width = wrapped.shape
|
| 179 |
+
|
| 180 |
+
# Compute gradients
|
| 181 |
+
grad_x = torch.diff(wrapped, dim=3, prepend=wrapped[:, :, :, -1:])
|
| 182 |
+
grad_y = torch.diff(wrapped, dim=2, prepend=wrapped[:, :, -1:, :])
|
| 183 |
+
|
| 184 |
+
# Compute second derivatives
|
| 185 |
+
grad_xx = torch.diff(grad_x, dim=3, prepend=grad_x[:, :, :, -1:])
|
| 186 |
+
grad_yy = torch.diff(grad_y, dim=2, prepend=grad_y[:, :, -1:, :])
|
| 187 |
+
|
| 188 |
+
# Gaussian curvature approximation
|
| 189 |
+
curvature = torch.abs(grad_xx + grad_yy)
|
| 190 |
+
|
| 191 |
+
return curvature
|
| 192 |
+
|
| 193 |
+
def forward(self, latent: torch.Tensor) -> dict:
|
| 194 |
+
"""
|
| 195 |
+
Forward pass that computes toroidal properties.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
latent: Input latent tensor
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
dict: Dictionary containing wrapped latent and computed properties
|
| 202 |
+
"""
|
| 203 |
+
wrapped_latent = self.wrap_to_torus(latent)
|
| 204 |
+
curvature = self.compute_curvature(latent)
|
| 205 |
+
|
| 206 |
+
return {
|
| 207 |
+
'wrapped_latent': wrapped_latent,
|
| 208 |
+
'curvature': curvature,
|
| 209 |
+
'original_latent': latent
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class ToroidalFlow(nn.Module):
|
| 214 |
+
"""
|
| 215 |
+
Implements flow dynamics on the toroidal manifold.
|
| 216 |
+
|
| 217 |
+
This class handles the flow of information and energy across the torus,
|
| 218 |
+
enabling the self-stabilizing properties of the toroidal diffusion model.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
def __init__(self, channels: int, flow_strength: float = 0.1):
|
| 222 |
+
super().__init__()
|
| 223 |
+
self.channels = channels
|
| 224 |
+
self.flow_strength = flow_strength
|
| 225 |
+
|
| 226 |
+
# Learnable flow parameters
|
| 227 |
+
self.flow_weights = nn.Parameter(torch.randn(channels, channels) * 0.1)
|
| 228 |
+
self.flow_bias = nn.Parameter(torch.zeros(channels))
|
| 229 |
+
|
| 230 |
+
def compute_flow_field(self, latent: torch.Tensor) -> torch.Tensor:
|
| 231 |
+
"""
|
| 232 |
+
Compute the flow field on the toroidal surface.
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
latent: Input latent tensor on the torus
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
flow_field: Vector field representing flow directions
|
| 239 |
+
"""
|
| 240 |
+
batch_size, channels, height, width = latent.shape
|
| 241 |
+
|
| 242 |
+
# Compute gradients for flow direction
|
| 243 |
+
grad_x = torch.diff(latent, dim=3, prepend=latent[:, :, :, -1:])
|
| 244 |
+
grad_y = torch.diff(latent, dim=2, prepend=latent[:, :, -1:, :])
|
| 245 |
+
|
| 246 |
+
# Apply learnable transformation
|
| 247 |
+
flow_x = torch.einsum('bchw,cd->bdhw', grad_x, self.flow_weights) + self.flow_bias.view(1, -1, 1, 1)
|
| 248 |
+
flow_y = torch.einsum('bchw,cd->bdhw', grad_y, self.flow_weights) + self.flow_bias.view(1, -1, 1, 1)
|
| 249 |
+
|
| 250 |
+
# Combine into flow field
|
| 251 |
+
flow_field = torch.stack([flow_x, flow_y], dim=-1)
|
| 252 |
+
|
| 253 |
+
return flow_field
|
| 254 |
+
|
| 255 |
+
def apply_flow(self, latent: torch.Tensor, flow_field: torch.Tensor, dt: float = 0.01) -> torch.Tensor:
|
| 256 |
+
"""
|
| 257 |
+
Apply flow dynamics to the latent tensor.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
latent: Input latent tensor
|
| 261 |
+
flow_field: Flow field tensor
|
| 262 |
+
dt: Time step for flow integration
|
| 263 |
+
|
| 264 |
+
Returns:
|
| 265 |
+
flowed_latent: Latent tensor after applying flow
|
| 266 |
+
"""
|
| 267 |
+
# Simple Euler integration
|
| 268 |
+
flow_x, flow_y = flow_field[..., 0], flow_field[..., 1]
|
| 269 |
+
|
| 270 |
+
# Apply flow with periodic boundary conditions
|
| 271 |
+
flowed_latent = latent + dt * self.flow_strength * (flow_x + flow_y)
|
| 272 |
+
|
| 273 |
+
return flowed_latent
|
| 274 |
+
|
| 275 |
+
def forward(self, latent: torch.Tensor) -> torch.Tensor:
|
| 276 |
+
"""
|
| 277 |
+
Forward pass applying toroidal flow dynamics.
|
| 278 |
+
|
| 279 |
+
Args:
|
| 280 |
+
latent: Input latent tensor
|
| 281 |
+
|
| 282 |
+
Returns:
|
| 283 |
+
flowed_latent: Latent tensor after flow application
|
| 284 |
+
"""
|
| 285 |
+
flow_field = self.compute_flow_field(latent)
|
| 286 |
+
flowed_latent = self.apply_flow(latent, flow_field)
|
| 287 |
+
|
| 288 |
+
return flowed_latent
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def test_toroidal_operations():
|
| 292 |
+
"""Test function for toroidal operations."""
|
| 293 |
+
print("Testing Toroidal Topology Operations...")
|
| 294 |
+
|
| 295 |
+
# Test coordinate transformations
|
| 296 |
+
coords = ToroidalCoordinates()
|
| 297 |
+
|
| 298 |
+
# Test points
|
| 299 |
+
theta = torch.tensor([0.0, math.pi/2, math.pi, 3*math.pi/2])
|
| 300 |
+
phi = torch.tensor([0.0, math.pi/4, math.pi/2, math.pi])
|
| 301 |
+
|
| 302 |
+
# Convert to Cartesian and back
|
| 303 |
+
x, y, z = coords.toroidal_to_cartesian(theta, phi)
|
| 304 |
+
theta_back, phi_back = coords.cartesian_to_toroidal(x, y, z)
|
| 305 |
+
|
| 306 |
+
print(f"Original theta: {theta}")
|
| 307 |
+
print(f"Recovered theta: {theta_back}")
|
| 308 |
+
print(f"Original phi: {phi}")
|
| 309 |
+
print(f"Recovered phi: {phi_back}")
|
| 310 |
+
|
| 311 |
+
# Test toroidal latent space
|
| 312 |
+
latent_space = ToroidalLatentSpace(latent_dim=4)
|
| 313 |
+
|
| 314 |
+
# Create test latent
|
| 315 |
+
test_latent = torch.randn(2, 4, 8, 8)
|
| 316 |
+
|
| 317 |
+
# Test wrapping
|
| 318 |
+
result = latent_space(test_latent)
|
| 319 |
+
print(f"Input latent shape: {test_latent.shape}")
|
| 320 |
+
print(f"Wrapped latent shape: {result['wrapped_latent'].shape}")
|
| 321 |
+
print(f"Curvature shape: {result['curvature'].shape}")
|
| 322 |
+
|
| 323 |
+
# Test distance computation
|
| 324 |
+
latent1 = torch.randn(1, 4, 8, 8)
|
| 325 |
+
latent2 = torch.randn(1, 4, 8, 8)
|
| 326 |
+
distance = latent_space.toroidal_distance(latent1, latent2)
|
| 327 |
+
print(f"Toroidal distance shape: {distance.shape}")
|
| 328 |
+
|
| 329 |
+
# Test flow dynamics
|
| 330 |
+
flow = ToroidalFlow(channels=4)
|
| 331 |
+
flowed = flow(test_latent)
|
| 332 |
+
print(f"Flowed latent shape: {flowed.shape}")
|
| 333 |
+
|
| 334 |
+
print("All tests passed!")
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
if __name__ == "__main__":
|
| 338 |
+
test_toroidal_operations()
|
| 339 |
+
|