| |
| """ |
| Memory Compaction Scheduler Demonstration |
| Shows how the scheduler works without database dependencies |
| """ |
|
|
| import asyncio |
| from datetime import datetime, timedelta |
| from dataclasses import dataclass |
| from enum import Enum |
| from typing import Dict, Any, List, Optional |
| import json |
|
|
| |
|
|
| class ConsolidationType(Enum): |
| TEMPORAL = "temporal" |
| SEMANTIC = "semantic" |
| ASSOCIATIVE = "associative" |
| HIERARCHICAL = "hierarchical" |
| COMPRESSION = "compression" |
|
|
| class CompactionTrigger(Enum): |
| TIME_BASED = "time_based" |
| THRESHOLD_BASED = "threshold" |
| ACTIVITY_BASED = "activity" |
| IDLE_BASED = "idle" |
| EMERGENCY = "emergency" |
| QUALITY_BASED = "quality" |
|
|
| @dataclass |
| class CompactionSchedule: |
| schedule_id: str |
| trigger: CompactionTrigger |
| interval: Optional[timedelta] = None |
| threshold: Optional[Dict[str, Any]] = None |
| active: bool = True |
| last_run: Optional[datetime] = None |
| next_run: Optional[datetime] = None |
| run_count: int = 0 |
|
|
| class CompactionSchedulerDemo: |
| """Demonstration of the Memory Compaction Scheduler""" |
| |
| def __init__(self): |
| self.schedules: Dict[str, CompactionSchedule] = {} |
| self.compaction_log = [] |
| self.metrics = { |
| "total_compactions": 0, |
| "memories_processed": 0, |
| "space_recovered": 0, |
| "last_compaction": None |
| } |
| self._initialize_default_schedules() |
| |
| def _initialize_default_schedules(self): |
| """Initialize default compaction schedules""" |
| |
| |
| self.schedules["daily_consolidation"] = CompactionSchedule( |
| schedule_id="daily_consolidation", |
| trigger=CompactionTrigger.TIME_BASED, |
| interval=timedelta(days=1), |
| next_run=datetime.now() + timedelta(days=1) |
| ) |
| |
| |
| self.schedules["hourly_compression"] = CompactionSchedule( |
| schedule_id="hourly_compression", |
| trigger=CompactionTrigger.TIME_BASED, |
| interval=timedelta(hours=1), |
| next_run=datetime.now() + timedelta(hours=1) |
| ) |
| |
| |
| self.schedules["memory_threshold"] = CompactionSchedule( |
| schedule_id="memory_threshold", |
| trigger=CompactionTrigger.THRESHOLD_BASED, |
| threshold={"memory_count": 10000} |
| ) |
| |
| print("📅 Initialized default schedules:") |
| for schedule_id, schedule in self.schedules.items(): |
| print(f" • {schedule_id}: {schedule.trigger.value}") |
| |
| def demonstrate_compaction_cycle(self): |
| """Demonstrate a complete compaction cycle""" |
| print("\n🔄 Demonstrating Compaction Cycle") |
| print("=" * 60) |
| |
| |
| |
| |
| daily = self.schedules["daily_consolidation"] |
| print(f"\n1️⃣ Daily Consolidation Check:") |
| print(f" Next run: {daily.next_run.strftime('%Y-%m-%d %H:%M:%S')}") |
| print(f" Would trigger: {datetime.now() >= daily.next_run}") |
| |
| |
| if True: |
| print(" ✅ Triggering daily consolidation...") |
| self._run_compaction("daily_consolidation", ConsolidationType.TEMPORAL) |
| daily.last_run = datetime.now() |
| daily.next_run = datetime.now() + daily.interval |
| daily.run_count += 1 |
| |
| |
| threshold = self.schedules["memory_threshold"] |
| print(f"\n2️⃣ Memory Threshold Check:") |
| print(f" Threshold: {threshold.threshold['memory_count']} memories") |
| print(f" Current count: 12,345 (simulated)") |
| print(f" Would trigger: True") |
| |
| |
| print(" 🚨 Triggering emergency compaction...") |
| self._run_compaction("memory_threshold", ConsolidationType.COMPRESSION, emergency=True) |
| |
| |
| hourly = self.schedules["hourly_compression"] |
| print(f"\n3️⃣ Hourly Compression Check:") |
| print(f" Next run: {hourly.next_run.strftime('%Y-%m-%d %H:%M:%S')}") |
| print(f" Compresses memories older than 7 days") |
| |
| |
| self._show_metrics() |
| |
| def _run_compaction(self, schedule_id: str, compaction_type: ConsolidationType, emergency: bool = False): |
| """Simulate running a compaction""" |
| start_time = datetime.now() |
| |
| |
| memories_processed = 1000 |
| space_recovered = 1024 * 1024 * 5 |
| |
| |
| if compaction_type == ConsolidationType.TEMPORAL: |
| memories_processed = 5000 |
| space_recovered = 1024 * 1024 * 10 |
| print(f" • Grouped memories by time periods") |
| print(f" • Created daily summaries") |
| print(f" • Consolidated 5,000 memories") |
| |
| elif compaction_type == ConsolidationType.COMPRESSION: |
| memories_processed = 2000 |
| space_recovered = 1024 * 1024 * 50 |
| print(f" • Compressed old memories") |
| print(f" • Removed redundant data") |
| print(f" • Freed 50MB of space") |
| |
| if emergency: |
| print(f" • 🚨 EMERGENCY MODE: Maximum compression applied") |
| |
| elif compaction_type == ConsolidationType.SEMANTIC: |
| memories_processed = 3000 |
| space_recovered = 1024 * 1024 * 20 |
| print(f" • Identified semantic patterns") |
| print(f" • Merged related concepts") |
| print(f" • Consolidated 3,000 memories") |
| |
| |
| self.metrics["total_compactions"] += 1 |
| self.metrics["memories_processed"] += memories_processed |
| self.metrics["space_recovered"] += space_recovered |
| self.metrics["last_compaction"] = datetime.now() |
| |
| |
| self.compaction_log.append({ |
| "timestamp": start_time, |
| "schedule_id": schedule_id, |
| "type": compaction_type.value, |
| "memories_processed": memories_processed, |
| "space_recovered": space_recovered, |
| "duration": (datetime.now() - start_time).total_seconds() |
| }) |
| |
| def demonstrate_adaptive_strategies(self): |
| """Demonstrate adaptive compaction strategies""" |
| print("\n🎯 Demonstrating Adaptive Strategies") |
| print("=" * 60) |
| |
| |
| print("\n🌙 Sleep Cycle Compaction:") |
| print(" Mimics human sleep cycles for optimal consolidation") |
| |
| phases = [ |
| ("REM-like", "Light consolidation", ConsolidationType.TEMPORAL, 5), |
| ("Deep Sleep", "Semantic integration", ConsolidationType.SEMANTIC, 10), |
| ("Sleep Spindles", "Associative linking", ConsolidationType.ASSOCIATIVE, 5), |
| ("Cleanup", "Compression and optimization", ConsolidationType.COMPRESSION, 5) |
| ] |
| |
| for phase_name, description, comp_type, duration in phases: |
| print(f"\n Phase: {phase_name} ({duration} minutes)") |
| print(f" • {description}") |
| print(f" • Type: {comp_type.value}") |
| |
| |
| print("\n📊 Activity-Based Adaptation:") |
| |
| activity_levels = [ |
| (0.2, "Low", "Aggressive compression"), |
| (0.5, "Medium", "Balanced consolidation"), |
| (0.8, "High", "Minimal interference") |
| ] |
| |
| for level, name, strategy in activity_levels: |
| print(f"\n Activity Level: {level} ({name})") |
| print(f" • Strategy: {strategy}") |
| if level < 0.3: |
| print(f" • Actions: Full compression, memory cleanup") |
| elif level < 0.7: |
| print(f" • Actions: Hierarchical organization, moderate compression") |
| else: |
| print(f" • Actions: Quick temporal consolidation only") |
| |
| def demonstrate_manual_control(self): |
| """Demonstrate manual compaction control""" |
| print("\n🎮 Demonstrating Manual Control") |
| print("=" * 60) |
| |
| print("\n1. Adding Custom Schedule:") |
| custom_schedule = CompactionSchedule( |
| schedule_id="weekend_deep_clean", |
| trigger=CompactionTrigger.TIME_BASED, |
| interval=timedelta(days=7), |
| next_run=datetime.now() + timedelta(days=6) |
| ) |
| self.schedules["weekend_deep_clean"] = custom_schedule |
| print(f" ✅ Added 'weekend_deep_clean' schedule") |
| print(f" • Runs weekly on weekends") |
| print(f" • Deep semantic consolidation") |
| |
| print("\n2. Manual Trigger:") |
| print(" Triggering immediate semantic compaction...") |
| self._run_compaction("manual", ConsolidationType.SEMANTIC) |
| print(" ✅ Manual compaction completed") |
| |
| print("\n3. Emergency Response:") |
| print(" Memory pressure detected: 95%") |
| print(" 🚨 Initiating emergency protocol...") |
| print(" • Stopping non-essential schedules") |
| print(" • Maximum compression mode") |
| print(" • Priority: 1.0 (highest)") |
| self._run_compaction("emergency", ConsolidationType.COMPRESSION, emergency=True) |
| |
| def _show_metrics(self): |
| """Display current metrics""" |
| print("\n📊 Compaction Metrics:") |
| print(f" Total compactions: {self.metrics['total_compactions']}") |
| print(f" Memories processed: {self.metrics['memories_processed']:,}") |
| print(f" Space recovered: {self.metrics['space_recovered'] / (1024*1024):.1f} MB") |
| if self.metrics['last_compaction']: |
| print(f" Last compaction: {self.metrics['last_compaction'].strftime('%Y-%m-%d %H:%M:%S')}") |
| |
| def show_schedule_status(self): |
| """Show status of all schedules""" |
| print("\n📅 Schedule Status") |
| print("=" * 60) |
| |
| for schedule_id, schedule in self.schedules.items(): |
| print(f"\n{schedule_id}:") |
| print(f" • Trigger: {schedule.trigger.value}") |
| print(f" • Active: {'✅' if schedule.active else '❌'}") |
| print(f" • Run count: {schedule.run_count}") |
| |
| if schedule.last_run: |
| print(f" • Last run: {schedule.last_run.strftime('%Y-%m-%d %H:%M:%S')}") |
| |
| if schedule.next_run: |
| time_until = schedule.next_run - datetime.now() |
| hours = time_until.total_seconds() / 3600 |
| print(f" • Next run: {schedule.next_run.strftime('%Y-%m-%d %H:%M:%S')} ({hours:.1f} hours)") |
| |
| if schedule.threshold: |
| print(f" • Threshold: {schedule.threshold}") |
| |
| def show_architecture(self): |
| """Display the compaction architecture""" |
| print("\n🏗️ Memory Compaction Architecture") |
| print("=" * 60) |
| |
| architecture = """ |
| ┌─────────────────────────────────────────────────────────────┐ |
| │ Memory Compaction Scheduler │ |
| ├─────────────────────────────────────────────────────────────┤ |
| │ │ |
| │ ┌─────────────┐ ┌──────────────┐ ┌─────────────────┐ │ |
| │ │ Scheduler │ │ Triggers │ │ Workers │ │ |
| │ │ Loop │ │ │ │ │ │ |
| │ │ │ │ • Time-based │ │ • Worker 0 │ │ |
| │ │ • Check │ │ • Threshold │ │ • Worker 1 │ │ |
| │ │ schedules │ │ • Activity │ │ • Worker 2 │ │ |
| │ │ • Create │ │ • Idle │ │ │ │ |
| │ │ tasks │ │ • Emergency │ │ Concurrent │ │ |
| │ │ • Queue │ │ • Quality │ │ processing │ │ |
| │ │ tasks │ │ │ │ │ │ |
| │ └─────────────┘ └──────────────┘ └─────────────────┘ │ |
| │ │ |
| │ ┌─────────────────────────────────────────────────────┐ │ |
| │ │ Compaction Strategies │ │ |
| │ ├─────────────────────────────────────────────────────┤ │ |
| │ │ • Temporal Consolidation • Semantic Compression │ │ |
| │ │ • Hierarchical Ordering • Associative Linking │ │ |
| │ │ • Quality-based Decay • Emergency Compression │ │ |
| │ └─────────────────────────────────────────────────────┘ │ |
| │ │ |
| │ ┌─────────────────────────────────────────────────────┐ │ |
| │ │ Memory Layers (11-20) │ │ |
| │ ├─────────────────────────────────────────────────────┤ │ |
| │ │ • Consolidation Hub • Decay Management │ │ |
| │ │ • Compression Layer • Priority Optimization │ │ |
| │ │ • Integration Layer • Index Maintenance │ │ |
| │ └─────────────────────────────────────────────────────┘ │ |
| └─────────────────────────────────────────────────────────────┘ |
| """ |
| print(architecture) |
|
|
|
|
| def main(): |
| """Run the demonstration""" |
| print("🚀 Memory Compaction Scheduler Demonstration") |
| print("=" * 60) |
| print("This demonstration shows how the memory compaction scheduler") |
| print("manages automated memory maintenance in the Nova system.") |
| print() |
| |
| demo = CompactionSchedulerDemo() |
| |
| |
| demo.show_architecture() |
| |
| |
| demo.demonstrate_compaction_cycle() |
| |
| |
| demo.demonstrate_adaptive_strategies() |
| |
| |
| demo.demonstrate_manual_control() |
| |
| |
| demo.show_schedule_status() |
| |
| print("\n" + "=" * 60) |
| print("✅ Demonstration Complete!") |
| print("\nKey Takeaways:") |
| print("• Automatic scheduling reduces manual maintenance") |
| print("• Multiple trigger types handle different scenarios") |
| print("• Adaptive strategies optimize based on system state") |
| print("• Emergency handling ensures system stability") |
| print("• Comprehensive metrics track effectiveness") |
| print("\nThe Memory Compaction Scheduler ensures optimal memory") |
| print("performance through intelligent, automated maintenance.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |