Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| # -*- coding: utf-8 -*- | |
| """ | |
| π Mem|8 OceanMind Visualizer π§ | |
| ================================= | |
| A visually stunning implementation of the Mem|8 wave-based memory architecture. | |
| This application creates an immersive experience to explore how memories propagate | |
| and interact like waves in an ocean of consciousness. | |
| Created by: Aye & Hue (with Trisha from Accounting keeping the numbers flowing) | |
| """ | |
| import spaces | |
| import os | |
| import gradio as gr | |
| import torch | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from matplotlib import cm | |
| import random | |
| import time | |
| from typing import Tuple, List, Dict, Optional, Union | |
| import json | |
| from datetime import datetime | |
| import plotly.graph_objects as go | |
| import plotly.express as px | |
| from plotly.subplots import make_subplots | |
| import colorsys | |
| # Set seeds for reproducibility (but we'll allow for randomness too!) | |
| RANDOM_SEED = 42 | |
| torch.manual_seed(RANDOM_SEED) | |
| np.random.seed(RANDOM_SEED) | |
| random.seed(RANDOM_SEED) | |
| # Constants | |
| DEFAULT_GRID_SIZE = 64 | |
| EMOTION_RANGE = (-5, 5) # Range for emotional valence | |
| AROUSAL_RANGE = (0, 255) # Range for arousal | |
| MAX_SEED = 999999999 # Maximum seed value for art generation | |
| # Initialize everything on CPU first | |
| device = "cpu" # Start on CPU, let spaces.GPU handle CUDA | |
| STABLE_DIFFUSION_AVAILABLE = False | |
| pipe = None | |
| def get_device(): | |
| """Get the appropriate device for the current context.""" | |
| if torch.cuda.is_available(): | |
| return "cuda" | |
| print("β οΈ Warning: CUDA not available, falling back to CPU") | |
| return "cpu" | |
| def init_stable_diffusion(): | |
| """Initialize Stable Diffusion on CPU first.""" | |
| global STABLE_DIFFUSION_AVAILABLE, pipe | |
| try: | |
| from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler | |
| STABLE_DIFFUSION_AVAILABLE = True | |
| try: | |
| model_id = "stabilityai/stable-diffusion-xl-base-1.0" | |
| print(f"π Loading Stable Diffusion model: {model_id}") | |
| pipe = DiffusionPipeline.from_pretrained( | |
| model_id, | |
| torch_dtype=torch.float32, # Use float32 consistently | |
| use_safetensors=True, | |
| variant="fp32" # Explicitly request fp32 variant | |
| ) | |
| pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config) | |
| print("β¨ Stable Diffusion loaded on CPU") | |
| except Exception as e: | |
| print(f"β Failed to initialize Stable Diffusion: {e}") | |
| STABLE_DIFFUSION_AVAILABLE = False | |
| pipe = None | |
| except ImportError: | |
| print("β diffusers package not available. Artistic visualization will be disabled.") | |
| # Create a directory for memory snapshots if it doesn't exist | |
| MEMORY_DIR = "memory_snapshots" | |
| os.makedirs(MEMORY_DIR, exist_ok=True) | |
| class EmotionalContext: | |
| """Implements Mem|8's emotional context structure.""" | |
| def __init__(self, device_str="cpu"): | |
| self.device = device_str | |
| self.valence = torch.zeros(1, device=device_str) | |
| self.arousal = torch.zeros(1, device=device_str) | |
| self.context = torch.zeros(1, device=device_str) | |
| self.safety = torch.ones(1, device=device_str) | |
| # Track emotional history for visualization | |
| self.history = { | |
| 'valence': [], | |
| 'arousal': [], | |
| 'timestamps': [] | |
| } | |
| def update(self, valence: float, arousal: Optional[float] = None): | |
| """Update emotional context with new values.""" | |
| # Convert inputs to tensors on the right device | |
| if not isinstance(valence, torch.Tensor): | |
| valence = torch.tensor([valence], device=self.device) | |
| elif valence.device != self.device: | |
| valence = valence.to(self.device) | |
| self.valence = valence | |
| # If arousal not provided, calculate based on valence | |
| if arousal is None: | |
| self.arousal = torch.abs(valence * 2) | |
| else: | |
| if not isinstance(arousal, torch.Tensor): | |
| arousal = torch.tensor([arousal], device=self.device) | |
| elif arousal.device != self.device: | |
| arousal = arousal.to(self.device) | |
| self.arousal = arousal | |
| # Update history (use CPU values for storage) | |
| self.history['valence'].append(float(self.valence.cpu().item())) | |
| self.history['arousal'].append(float(self.arousal.cpu().item())) | |
| self.history['timestamps'].append(time.time()) | |
| # Keep history at a reasonable size | |
| if len(self.history['valence']) > 100: | |
| self.history['valence'] = self.history['valence'][-100:] | |
| self.history['arousal'] = self.history['arousal'][-100:] | |
| self.history['timestamps'] = self.history['timestamps'][-100:] | |
| def get_color_mapping(self) -> Tuple[float, float, float]: | |
| """Maps emotional state to RGB color values.""" | |
| # Get values from tensors (move to CPU for calculations) | |
| valence = self.valence.cpu().item() | |
| arousal = self.arousal.cpu().item() | |
| # Normalize valence to 0-1 range for hue | |
| norm_valence = (valence - EMOTION_RANGE[0]) / (EMOTION_RANGE[1] - EMOTION_RANGE[0]) | |
| # Normalize arousal to 0-1 range for saturation | |
| norm_arousal = arousal / AROUSAL_RANGE[1] | |
| # Convert HSV to RGB | |
| rgb = colorsys.hsv_to_rgb(norm_valence, norm_arousal, 1.0) | |
| return rgb | |
| def to(self, device_str): | |
| """Move the context to a different device.""" | |
| if self.device == device_str: | |
| return self | |
| self.device = device_str | |
| self.valence = self.valence.to(device_str) | |
| self.arousal = self.arousal.to(device_str) | |
| self.context = self.context.to(device_str) | |
| self.safety = self.safety.to(device_str) | |
| return self | |
| def __str__(self) -> str: | |
| """String representation of emotional context.""" | |
| return f"EmotionalContext(valence={self.valence.cpu().item():.2f}, arousal={self.arousal.cpu().item():.2f})" | |
| class MemoryWave: | |
| """ | |
| Implements the wave-based memory patterns from Mem|8 paper. | |
| This class creates and manipulates wave patterns that represent memories, | |
| allowing them to propagate, interfere, and resonate as described in the paper. | |
| """ | |
| def __init__(self, | |
| size: int = DEFAULT_GRID_SIZE, | |
| device_str: str = "cpu"): | |
| """ | |
| Initialize a memory wave system. | |
| Args: | |
| size: Size of the memory grid (NxN) | |
| device_str: Device to use for computations (defaults to CPU) | |
| """ | |
| self.size = size | |
| self.device = device_str | |
| self.grid = torch.zeros((size, size), device=device_str) | |
| self.emotion = EmotionalContext(device_str) | |
| # Initialize coordinates for wave calculations | |
| self.x = torch.linspace(0, 2*np.pi, size, device=device_str) | |
| self.y = torch.linspace(0, 2*np.pi, size, device=device_str) | |
| self.X, self.Y = torch.meshgrid(self.x, self.y, indexing='ij') | |
| # Memory storage for different types | |
| self.memory_types = {i: torch.zeros((size, size), device=device_str) for i in range(6)} | |
| # History of wave states for animation | |
| self.history = [] | |
| def to(self, device_str): | |
| """Move the wave system to a different device.""" | |
| if self.device == device_str: | |
| return self | |
| self.device = device_str | |
| self.grid = self.grid.to(device_str) | |
| self.emotion = self.emotion.to(device_str) | |
| self.x = self.x.to(device_str) | |
| self.y = self.y.to(device_str) | |
| self.X = self.X.to(device_str) | |
| self.Y = self.Y.to(device_str) | |
| self.memory_types = {k: v.to(device_str) for k, v in self.memory_types.items()} | |
| return self | |
| def create_wave(self, | |
| frequency: float, | |
| amplitude: float, | |
| phase: float = 0.0, | |
| direction: str = "radial") -> torch.Tensor: | |
| """Create a wave pattern as described in Mem|8 paper.""" | |
| # Ensure we're on the right device | |
| if not isinstance(frequency, torch.Tensor): | |
| frequency = torch.tensor(frequency, device=self.device) | |
| if not isinstance(amplitude, torch.Tensor): | |
| amplitude = torch.tensor(amplitude, device=self.device) | |
| if not isinstance(phase, torch.Tensor): | |
| phase = torch.tensor(phase, device=self.device) | |
| if direction == "radial": | |
| # Radial waves emanating from center (like dropping a stone in water) | |
| center_x, center_y = self.size/2, self.size/2 | |
| distance = torch.sqrt((self.X - center_x)**2 + (self.Y - center_y)**2) | |
| wave = amplitude * torch.sin(frequency * distance + phase) | |
| elif direction == "linear_x": | |
| # Waves moving along x-axis | |
| wave = amplitude * torch.sin(frequency * self.X + phase) | |
| elif direction == "linear_y": | |
| # Waves moving along y-axis | |
| wave = amplitude * torch.sin(frequency * self.Y + phase) | |
| elif direction == "spiral": | |
| # Spiral wave pattern | |
| center_x, center_y = self.size/2, self.size/2 | |
| distance = torch.sqrt((self.X - center_x)**2 + (self.Y - center_y)**2) | |
| angle = torch.atan2(self.Y - center_y, self.X - center_x) | |
| wave = amplitude * torch.sin(frequency * distance + 5 * angle + phase) | |
| else: | |
| raise ValueError(f"Unknown direction: {direction}") | |
| return wave | |
| def apply_emotional_modulation(self, wave: torch.Tensor) -> torch.Tensor: | |
| """Apply emotional modulation to a wave pattern.""" | |
| # Ensure wave is on the right device | |
| if wave.device != self.device: | |
| wave = wave.to(self.device) | |
| # Emotional modulation formula from paper: M = AΒ·exp(iΟt-kx)Β·DΒ·E | |
| # We implement a simplified version where E is based on valence | |
| valence_factor = self.emotion.valence / 128 # Normalize to -1 to 1 range | |
| # Different modulation based on valence sign | |
| if valence_factor > 0: | |
| # Positive emotions enhance wave (amplify) | |
| emotional_mod = torch.exp(valence_factor * wave) | |
| else: | |
| # Negative emotions suppress wave (dampen) | |
| emotional_mod = 1 / torch.exp(torch.abs(valence_factor) * wave) | |
| # Apply modulation | |
| modulated_wave = wave * emotional_mod | |
| return modulated_wave | |
| def create_interference(self, wave1: torch.Tensor, wave2: torch.Tensor, | |
| interference_type: str = "constructive") -> torch.Tensor: | |
| """Create interference between two memory waves.""" | |
| # Ensure waves are on the right device | |
| if wave1.device != self.device: | |
| wave1 = wave1.to(self.device) | |
| if wave2.device != self.device: | |
| wave2 = wave2.to(self.device) | |
| if interference_type == "constructive": | |
| # Simple addition for constructive interference | |
| return wave1 + wave2 | |
| elif interference_type == "destructive": | |
| # Subtraction for destructive interference | |
| return wave1 - wave2 | |
| elif interference_type == "resonance": | |
| # Multiplication for resonance | |
| return wave1 * wave2 | |
| else: | |
| raise ValueError(f"Unknown interference type: {interference_type}") | |
| def apply_memory_blanket(self, wave: torch.Tensor, threshold: float = 0.5) -> torch.Tensor: | |
| """Apply the memory blanket concept.""" | |
| # Ensure wave is on the right device | |
| if wave.device != self.device: | |
| wave = wave.to(self.device) | |
| if not isinstance(threshold, torch.Tensor): | |
| threshold = torch.tensor(threshold, device=self.device) | |
| # Calculate wave importance (amplitude) | |
| importance = torch.abs(wave) | |
| # Apply threshold filter (memory blanket) | |
| filtered_wave = wave * (importance > threshold).float() | |
| return filtered_wave | |
| def store_memory(self, wave: torch.Tensor, memory_type: int = 0) -> None: | |
| """Store a wave pattern in memory.""" | |
| # Ensure wave is on the right device | |
| if wave.device != self.device: | |
| wave = wave.to(self.device) | |
| # Store the wave pattern | |
| self.memory_types[memory_type] = wave | |
| # Add to history for animation (move to CPU for numpy conversion) | |
| self.history.append(wave.cpu().numpy()) | |
| # Keep history at a reasonable size | |
| if len(self.history) > 100: | |
| self.history = self.history[-100:] | |
| def generate_wave_memory(self, | |
| emotion_valence: float, | |
| wave_type: str = "radial", | |
| frequency: float = 2.0, | |
| amplitude: float = 1.0) -> Dict: | |
| """ | |
| Generate a wave memory pattern with emotional context. | |
| Args: | |
| emotion_valence: Emotional valence value | |
| wave_type: Type of wave pattern | |
| frequency: Wave frequency | |
| amplitude: Wave amplitude | |
| Returns: | |
| Dict: Results including wave pattern and metrics | |
| """ | |
| # Update emotional context | |
| self.emotion.update(emotion_valence) | |
| # Create base wave pattern | |
| wave = self.create_wave(frequency, amplitude, direction=wave_type) | |
| # Apply emotional modulation | |
| emotional_mod = self.apply_emotional_modulation(wave) | |
| memory_state = wave * emotional_mod | |
| # Store in memory | |
| self.store_memory(memory_state, memory_type=0) | |
| # Calculate metrics | |
| metrics = { | |
| "shape": memory_state.shape, | |
| "emotional_modulation": emotional_mod.mean().item(), | |
| "memory_coherence": torch.linalg.norm(memory_state).item(), | |
| "max_amplitude": memory_state.max().item(), | |
| "min_amplitude": memory_state.min().item(), | |
| "mean_amplitude": memory_state.mean().item(), | |
| } | |
| return { | |
| "wave": memory_state.cpu().numpy(), | |
| "metrics": metrics, | |
| "emotion": { | |
| "valence": self.emotion.valence.item(), | |
| "arousal": self.emotion.arousal.item(), | |
| } | |
| } | |
| def generate_interference_pattern(self, | |
| emotion_valence: float, | |
| interference_type: str = "constructive", | |
| freq1: float = 2.0, | |
| freq2: float = 3.0, | |
| amp1: float = 1.0, | |
| amp2: float = 0.5) -> Dict: | |
| """ | |
| Generate interference between two memory waves. | |
| Args: | |
| emotion_valence: Emotional valence value | |
| interference_type: Type of interference | |
| freq1: Frequency of first wave | |
| freq2: Frequency of second wave | |
| amp1: Amplitude of first wave | |
| amp2: Amplitude of second wave | |
| Returns: | |
| Dict: Results including interference pattern and metrics | |
| """ | |
| # Update emotional context | |
| self.emotion.update(emotion_valence) | |
| # Create two wave patterns | |
| wave1 = self.create_wave(freq1, amp1, direction="radial") | |
| wave2 = self.create_wave(freq2, amp2, direction="spiral") | |
| # Create interference pattern | |
| interference = self.create_interference(wave1, wave2, interference_type) | |
| # Apply emotional weighting | |
| emotional_weight = torch.sigmoid(self.emotion.valence/128) * interference | |
| # Store in memory | |
| self.store_memory(emotional_weight, memory_type=1) | |
| # Calculate metrics | |
| metrics = { | |
| "pattern_strength": torch.max(emotional_weight).item(), | |
| "emotional_weight": self.emotion.valence.item()/128, | |
| "interference_type": interference_type, | |
| "wave1_freq": freq1, | |
| "wave2_freq": freq2, | |
| } | |
| return { | |
| "wave": emotional_weight.cpu().numpy(), | |
| "metrics": metrics, | |
| "emotion": { | |
| "valence": self.emotion.valence.item(), | |
| "arousal": self.emotion.arousal.item(), | |
| } | |
| } | |
| def generate_resonance_pattern(self, | |
| emotion_valence: float, | |
| base_freq: float = 2.0, | |
| resonance_strength: float = 0.5) -> Dict: | |
| """ | |
| Generate emotional resonance patterns as described in the paper. | |
| Args: | |
| emotion_valence: Emotional valence value | |
| base_freq: Base frequency | |
| resonance_strength: Strength of resonance effect | |
| Returns: | |
| Dict: Results including resonance pattern and metrics | |
| """ | |
| # Update emotional context | |
| self.emotion.update(emotion_valence) | |
| # Calculate resonance frequency based on emotional state | |
| resonance_freq = 1.0 + torch.sigmoid(self.emotion.valence/128) | |
| # Create wave patterns | |
| base_wave = self.create_wave(base_freq, 1.0, direction="radial") | |
| resonant_wave = self.create_wave(resonance_freq.item(), 1.0, direction="spiral") | |
| # Create resonance | |
| resonance = base_wave * resonant_wave * resonance_strength | |
| # Store in memory | |
| self.store_memory(resonance, memory_type=2) | |
| # Calculate metrics | |
| metrics = { | |
| "resonance_frequency": resonance_freq.item(), | |
| "pattern_energy": torch.sum(resonance**2).item(), | |
| "base_frequency": base_freq, | |
| "resonance_strength": resonance_strength, | |
| } | |
| return { | |
| "wave": resonance.cpu().numpy(), | |
| "metrics": metrics, | |
| "emotion": { | |
| "valence": self.emotion.valence.item(), | |
| "arousal": self.emotion.arousal.item(), | |
| } | |
| } | |
| def generate_memory_reconstruction(self, | |
| emotion_valence: float, | |
| corruption_level: float = 0.3) -> Dict: | |
| """ | |
| Generate memory reconstruction as described in the paper. | |
| This simulates how Mem|8 reconstructs complete memories from partial patterns, | |
| similar to how digital cameras reconstruct full-color images from partial sensor data. | |
| Args: | |
| emotion_valence: Emotional valence value | |
| corruption_level: Level of corruption in the original memory (0-1) | |
| Returns: | |
| Dict: Results including original, corrupted and reconstructed patterns | |
| """ | |
| # Update emotional context | |
| self.emotion.update(emotion_valence) | |
| # Create an original "memory" pattern | |
| original = self.create_wave(2.0, 1.0, direction="radial") | |
| # Create a corruption mask (1 = keep, 0 = corrupt) | |
| mask = torch.rand_like(original) > corruption_level | |
| # Apply corruption | |
| corrupted = original * mask | |
| # Reconstruct using a simple interpolation | |
| # In a real implementation, this would use more sophisticated algorithms | |
| reconstructed = torch.zeros_like(corrupted) | |
| # Simple 3x3 kernel averaging for missing values | |
| for i in range(1, self.size-1): | |
| for j in range(1, self.size-1): | |
| if not mask[i, j]: | |
| # If this point is corrupted, reconstruct it | |
| neighbors = [ | |
| original[i-1, j-1] if mask[i-1, j-1] else 0, | |
| original[i-1, j] if mask[i-1, j] else 0, | |
| original[i-1, j+1] if mask[i-1, j+1] else 0, | |
| original[i, j-1] if mask[i, j-1] else 0, | |
| original[i, j+1] if mask[i, j+1] else 0, | |
| original[i+1, j-1] if mask[i+1, j-1] else 0, | |
| original[i+1, j] if mask[i+1, j] else 0, | |
| original[i+1, j+1] if mask[i+1, j+1] else 0, | |
| ] | |
| valid_neighbors = [n for n in neighbors if n != 0] | |
| if valid_neighbors: | |
| reconstructed[i, j] = sum(valid_neighbors) / len(valid_neighbors) | |
| else: | |
| # If this point is not corrupted, keep original value | |
| reconstructed[i, j] = original[i, j] | |
| # Apply emotional coloring to reconstruction | |
| emotional_factor = torch.sigmoid(self.emotion.valence/64) | |
| colored_reconstruction = reconstructed * emotional_factor | |
| # Store in memory | |
| self.store_memory(colored_reconstruction, memory_type=3) | |
| # Calculate metrics | |
| reconstruction_error = torch.mean((original - reconstructed)**2).item() | |
| emotional_influence = emotional_factor.item() | |
| metrics = { | |
| "corruption_level": corruption_level, | |
| "reconstruction_error": reconstruction_error, | |
| "emotional_influence": emotional_influence, | |
| "reconstruction_fidelity": 1.0 - reconstruction_error, | |
| } | |
| return { | |
| "original": original.cpu().numpy(), | |
| "corrupted": corrupted.cpu().numpy(), | |
| "reconstructed": reconstructed.cpu().numpy(), | |
| "colored": colored_reconstruction.cpu().numpy(), | |
| "metrics": metrics, | |
| "emotion": { | |
| "valence": self.emotion.valence.item(), | |
| "arousal": self.emotion.arousal.item(), | |
| } | |
| } | |
| def generate_hot_tub_simulation(self, | |
| emotion_valence: float, | |
| comfort_level: float = 0.8, | |
| exploration_depth: float = 0.5) -> Dict: | |
| """ | |
| Simulate the Hot Tub Mode concept from the paper. | |
| Hot Tub Mode provides a safe space for exploring alternate paths and difficult scenarios | |
| without judgment or permanent consequence. | |
| Args: | |
| emotion_valence: Emotional valence value | |
| comfort_level: Safety threshold (0-1) | |
| exploration_depth: How deep to explore alternate patterns (0-1) | |
| Returns: | |
| Dict: Results including safe exploration patterns and metrics | |
| """ | |
| # Update emotional context | |
| self.emotion.update(emotion_valence) | |
| # Create base safe space wave (calm, regular pattern) | |
| safe_space = self.create_wave(1.0, 0.5, direction="radial") | |
| # Create exploration waves with increasing complexity | |
| exploration_waves = [] | |
| for i in range(3): # Three levels of exploration | |
| freq = 1.0 + (i + 1) * exploration_depth | |
| wave = self.create_wave(freq, 0.5 * (1 - i * 0.2), direction="spiral") | |
| exploration_waves.append(wave) | |
| # Combine waves based on comfort level | |
| combined = safe_space * comfort_level | |
| for i, wave in enumerate(exploration_waves): | |
| # Reduce influence of more complex patterns based on comfort | |
| influence = comfort_level * (1 - i * 0.3) | |
| combined += wave * influence | |
| # Apply emotional safety modulation (S = Ξ±C + Ξ²E + Ξ³D + Ξ΄L from paper) | |
| alpha = 0.4 # Comfort weight | |
| beta = 0.3 # Emotional weight | |
| gamma = 0.2 # Divergence weight | |
| delta = 0.1 # Lifeguard weight | |
| comfort_factor = torch.sigmoid(torch.tensor(comfort_level * 5)) | |
| emotional_factor = torch.sigmoid(self.emotion.valence/128 + 0.5) | |
| divergence = torch.abs(combined - safe_space).mean() | |
| lifeguard_signal = torch.sigmoid(-divergence + comfort_level) | |
| safety_score = (alpha * comfort_factor + | |
| beta * emotional_factor + | |
| gamma * (1 - divergence) + | |
| delta * lifeguard_signal) | |
| # Apply safety modulation | |
| safe_exploration = combined * safety_score | |
| # Store in memory (if safe enough) | |
| if safety_score > 0.7: | |
| self.store_memory(safe_exploration, memory_type=4) | |
| metrics = { | |
| "safety_score": safety_score.item(), | |
| "comfort_level": comfort_level, | |
| "emotional_safety": emotional_factor.item(), | |
| "divergence": divergence.item(), | |
| "lifeguard_signal": lifeguard_signal.item(), | |
| } | |
| return { | |
| "safe_space": safe_space.cpu().numpy(), | |
| "exploration": combined.cpu().numpy(), | |
| "safe_result": safe_exploration.cpu().numpy(), | |
| "metrics": metrics, | |
| "emotion": { | |
| "valence": self.emotion.valence.item(), | |
| "arousal": self.emotion.arousal.item(), | |
| } | |
| } | |
| def visualize_wave_pattern(self, wave: np.ndarray, title: str = "Wave Pattern") -> go.Figure: | |
| """Create an interactive 3D visualization of a wave pattern.""" | |
| fig = go.Figure(data=[ | |
| go.Surface( | |
| z=wave, | |
| colorscale='viridis', | |
| showscale=True | |
| ) | |
| ]) | |
| fig.update_layout( | |
| title=title, | |
| scene=dict( | |
| xaxis_title="X", | |
| yaxis_title="Y", | |
| zaxis_title="Amplitude" | |
| ), | |
| width=600, | |
| height=600 | |
| ) | |
| return fig | |
| def visualize_emotional_history(self) -> go.Figure: | |
| """Create a visualization of emotional history.""" | |
| fig = make_subplots(rows=2, cols=1, | |
| subplot_titles=("Emotional Valence", "Emotional Arousal")) | |
| # Convert timestamps to relative time | |
| start_time = min(self.emotion.history['timestamps']) | |
| times = [(t - start_time) for t in self.emotion.history['timestamps']] | |
| # Plot valence | |
| fig.add_trace( | |
| go.Scatter(x=times, y=self.emotion.history['valence'], | |
| mode='lines+markers', | |
| name='Valence'), | |
| row=1, col=1 | |
| ) | |
| # Plot arousal | |
| fig.add_trace( | |
| go.Scatter(x=times, y=self.emotion.history['arousal'], | |
| mode='lines+markers', | |
| name='Arousal'), | |
| row=2, col=1 | |
| ) | |
| fig.update_layout( | |
| height=800, | |
| showlegend=True, | |
| title_text="Emotional History" | |
| ) | |
| return fig | |
| def save_memory_snapshot(self, operation: str) -> str: | |
| """Save current memory state to disk.""" | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| filename = f"memory_{operation}_{timestamp}.json" | |
| filepath = os.path.join(MEMORY_DIR, filename) | |
| # Prepare data for saving | |
| data = { | |
| 'operation': operation, | |
| 'timestamp': timestamp, | |
| 'emotion': { | |
| 'valence': float(self.emotion.valence.item()), | |
| 'arousal': float(self.emotion.arousal.item()) | |
| }, | |
| 'memory_types': { | |
| str(k): v.cpu().numpy().tolist() | |
| for k, v in self.memory_types.items() | |
| } | |
| } | |
| # Save to file | |
| with open(filepath, 'w') as f: | |
| json.dump(data, f) | |
| return filepath | |
| def process_memory_operation( | |
| memory_wave: MemoryWave, | |
| operation: str, | |
| emotion_valence: float, | |
| grid_size: int = DEFAULT_GRID_SIZE, | |
| comfort_level: float = 0.8, | |
| exploration_depth: float = 0.5, | |
| generate_art: bool = True, | |
| seed: int = 42 | |
| ) -> Tuple[str, go.Figure, go.Figure, Optional[np.ndarray]]: | |
| """Process memory operations with GPU acceleration.""" | |
| try: | |
| # Move to GPU | |
| memory_wave.to("cuda") | |
| # Resize grid if needed | |
| if grid_size != memory_wave.size: | |
| memory_wave.__init__(size=grid_size, device="cuda") | |
| # Process based on operation type | |
| if operation == "wave_memory": | |
| result = memory_wave.generate_wave_memory(emotion_valence) | |
| wave_title = "Wave Memory Pattern" | |
| wave_data = result["wave"] | |
| elif operation == "interference": | |
| result = memory_wave.generate_interference_pattern(emotion_valence) | |
| wave_title = "Interference Pattern" | |
| wave_data = result["wave"] | |
| elif operation == "resonance": | |
| result = memory_wave.generate_resonance_pattern(emotion_valence) | |
| wave_title = "Resonance Pattern" | |
| wave_data = result["wave"] | |
| elif operation == "reconstruction": | |
| result = memory_wave.generate_memory_reconstruction(emotion_valence) | |
| wave_title = "Memory Reconstruction" | |
| wave_data = result["reconstructed"] | |
| elif operation == "hot_tub": | |
| result = memory_wave.generate_hot_tub_simulation( | |
| emotion_valence, comfort_level, exploration_depth | |
| ) | |
| wave_title = "Hot Tub Exploration" | |
| wave_data = result["safe_result"] | |
| # Create visualizations | |
| wave_plot = memory_wave.visualize_wave_pattern(wave_data, wave_title) | |
| emotion_plot = memory_wave.visualize_emotional_history() | |
| # Generate artistic visualization if requested | |
| art_output = None | |
| if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None: | |
| prompt = generate_memory_prompt(operation, emotion_valence) | |
| art_output = generate_art_with_gpu(prompt, seed) | |
| # Format metrics for display | |
| metrics = result["metrics"] | |
| metrics_str = "π Analysis Results:\n\n" | |
| for key, value in metrics.items(): | |
| if key == "shape": | |
| metrics_str += f"β’ {key.replace('_', ' ').title()}: {list(value)}\n" | |
| elif key == "interference_type": # Handle string values | |
| metrics_str += f"β’ {key.replace('_', ' ').title()}: {value}\n" | |
| elif isinstance(value, (int, float)): # Format numbers only | |
| metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n" | |
| else: | |
| metrics_str += f"β’ {key.replace('_', ' ').title()}: {value}\n" | |
| metrics_str += f"\nπ Emotional Context:\n" | |
| metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n" | |
| metrics_str += f"β’ Arousal: {result['emotion']['arousal']:.2f}\n" | |
| # Save memory snapshot | |
| snapshot_path = memory_wave.save_memory_snapshot(operation) | |
| metrics_str += f"\nπΎ Memory snapshot saved: {snapshot_path}" | |
| # Move back to CPU | |
| memory_wave.to("cpu") | |
| return metrics_str, wave_plot, emotion_plot, art_output | |
| except torch.cuda.OutOfMemoryError: | |
| print("β οΈ GPU out of memory - falling back to CPU") | |
| memory_wave.to("cpu") | |
| if pipe is not None: | |
| pipe.to("cpu") | |
| return process_memory_operation( | |
| memory_wave, operation, emotion_valence, grid_size, | |
| comfort_level, exploration_depth, generate_art, seed | |
| ) | |
| except Exception as e: | |
| print(f"β Error during processing: {e}") | |
| # Ensure we're back on CPU | |
| memory_wave.to("cpu") | |
| if pipe is not None: | |
| pipe.to("cpu") | |
| return None, None, None, None | |
| def generate_art_with_gpu(prompt: str, seed: int = 42) -> Optional[np.ndarray]: | |
| """Generate art using Stable Diffusion with GPU acceleration.""" | |
| if not STABLE_DIFFUSION_AVAILABLE or pipe is None: | |
| return None | |
| try: | |
| # Move to GPU and optimize | |
| pipe.to("cuda", torch_dtype=torch.float32) # Ensure float32 on GPU | |
| pipe.enable_model_cpu_offload() | |
| pipe.enable_vae_slicing() | |
| pipe.enable_vae_tiling() | |
| pipe.enable_attention_slicing(slice_size="max") | |
| # Generate image | |
| generator = torch.Generator("cuda").manual_seed(seed) # Ensure generator is on CUDA | |
| image = pipe( | |
| prompt=prompt, | |
| negative_prompt="text, watermark, signature, blurry, distorted", | |
| guidance_scale=1.5, | |
| num_inference_steps=8, | |
| width=768, | |
| height=768, | |
| generator=generator, | |
| ).images[0] | |
| # Move back to CPU | |
| pipe.to("cpu") | |
| return image | |
| except Exception as e: | |
| print(f"β Error generating art: {e}") | |
| pipe.to("cpu") | |
| return None | |
| def generate_memory_prompt(operation: str, emotion_valence: float) -> str: | |
| """Generate an artistic prompt based on the memory operation and emotional context.""" | |
| # Base prompts for each operation type | |
| operation_prompts = { | |
| "wave_memory": "A serene ocean of consciousness with rippling waves of memory, ", | |
| "interference": "Multiple waves of thought intersecting and creating intricate patterns, ", | |
| "resonance": "Harmonious waves of memory resonating with emotional energy, ", | |
| "reconstruction": "Fragments of memory waves reforming into a complete pattern, ", | |
| "hot_tub": "A safe sanctuary of gentle memory waves with healing energy, " | |
| } | |
| # Emotional modifiers based on valence | |
| if emotion_valence < -3: | |
| emotion_desc = "dark and turbulent, with deep indigo and violet hues, expressing profound melancholy" | |
| elif emotion_valence < -1: | |
| emotion_desc = "muted and somber, with cool blues and grays, showing gentle sadness" | |
| elif emotion_valence < 1: | |
| emotion_desc = "balanced and neutral, with soft pastels, reflecting calm contemplation" | |
| elif emotion_valence < 3: | |
| emotion_desc = "warm and uplifting, with golden yellows and soft oranges, radiating joy" | |
| else: | |
| emotion_desc = "brilliant and ecstatic, with vibrant rainbow colors, bursting with happiness" | |
| # Artistic style modifiers | |
| style = ( | |
| "digital art in the style of a quantum visualization, " | |
| "highly detailed, smooth gradients, " | |
| "abstract yet meaningful, " | |
| "inspired by neural networks and consciousness" | |
| ) | |
| # Combine all elements | |
| base_prompt = operation_prompts.get(operation, operation_prompts["wave_memory"]) | |
| prompt = f"{base_prompt}{emotion_desc}, {style}" | |
| return prompt | |
| def create_interface(): | |
| """Create the Gradio interface for the Mem|8 Wave Memory Explorer.""" | |
| # Initialize everything on CPU | |
| memory_wave = MemoryWave(device_str="cpu") | |
| # Create the interface | |
| with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue")) as demo: | |
| gr.Markdown(""" | |
| # π Mem|8 Wave Memory Explorer | |
| Welcome to 8b.is's memory ocean demonstration! This showcase implements concepts from our Mem|8 | |
| wave-based memory architecture paper, visualizing how memories propagate and interact like waves | |
| in an ocean of consciousness. | |
| > "Memory is not a storage unit, but a living ocean of waves" - Mem|8 Paper | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| operation_input = gr.Radio( | |
| ["wave_memory", "interference", "resonance", "reconstruction", "hot_tub"], | |
| label="Memory Operation", | |
| value="wave_memory", | |
| info="Select the type of memory operation to visualize" | |
| ) | |
| emotion_input = gr.Slider( | |
| minimum=EMOTION_RANGE[0], | |
| maximum=EMOTION_RANGE[1], | |
| value=0, | |
| step=1, | |
| label="Emotional Valence", | |
| info="Emotional context from negative to positive" | |
| ) | |
| grid_size = gr.Slider( | |
| minimum=16, | |
| maximum=128, | |
| value=DEFAULT_GRID_SIZE, | |
| step=16, | |
| label="Memory Grid Size" | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| comfort_level = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.8, | |
| label="Comfort Level", | |
| info="Safety threshold for Hot Tub Mode" | |
| ) | |
| exploration_depth = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.5, | |
| label="Exploration Depth", | |
| info="How deep to explore in Hot Tub Mode" | |
| ) | |
| generate_art = gr.Checkbox( | |
| label="Generate Artistic Visualization", | |
| value=True, | |
| info="Use Stable Diffusion to create artistic representations" | |
| ) | |
| seed = gr.Slider( | |
| label="Art Generation Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=42 | |
| ) | |
| run_btn = gr.Button("Generate Memory Wave", variant="primary") | |
| with gr.Column(scale=2): | |
| output_text = gr.Textbox(label="Analysis Results", lines=10) | |
| with gr.Row(): | |
| wave_plot = gr.Plot(label="Wave Pattern") | |
| emotion_plot = gr.Plot(label="Emotional History") | |
| art_output = gr.Image(label="Artistic Visualization", visible=STABLE_DIFFUSION_AVAILABLE) | |
| # Set up event handlers | |
| def process_with_memory_wave(*args): | |
| """Wrapper to ensure memory_wave is passed as first argument.""" | |
| return process_memory_operation(memory_wave, *args) | |
| run_btn.click( | |
| process_with_memory_wave, # Use wrapper function | |
| inputs=[ | |
| operation_input, | |
| emotion_input, | |
| grid_size, | |
| comfort_level, | |
| exploration_depth, | |
| generate_art, | |
| seed | |
| ], | |
| outputs=[output_text, wave_plot, emotion_plot, art_output] | |
| ) | |
| gr.Markdown(""" | |
| ### π§ Understanding Wave Memory | |
| This demo visualizes key concepts from our Mem|8 paper: | |
| 1. **Wave Memory**: Memories as propagating waves with emotional modulation | |
| 2. **Interference**: How different memories interact and combine | |
| 3. **Resonance**: Emotional resonance patterns in memory formation | |
| 4. **Reconstruction**: How memories are rebuilt from partial patterns | |
| 5. **Hot Tub Mode**: Safe exploration of memory patterns | |
| The visualization shows mathematical wave patterns, emotional history, and artistic | |
| interpretations of how memories flow through our consciousness. | |
| All computations are accelerated using Hugging Face's Zero GPU technology! | |
| """) | |
| return demo | |
| if __name__ == "__main__": | |
| # Configure Gradio for Hugging Face Spaces | |
| demo = create_interface() | |
| # Initialize Stable Diffusion on CPU first | |
| init_stable_diffusion() | |
| # Enable queuing for better resource management | |
| demo.queue(max_size=10) | |
| # Launch with Spaces-compatible settings | |
| demo.launch( | |
| server_name="0.0.0.0", # Listen on all interfaces | |
| server_port=7860, # Default Spaces port | |
| show_api=False, # Hide API docs | |
| max_threads=10, # Limit to 10 threads | |
| ssr=False # Disable SSR for better stability | |
| ) | |