File size: 4,476 Bytes
77bcbf1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
"""
CASCADE Color Logging Example
Shows how to integrate beautiful colored logs throughout your system.
"""

from .kleene_logger import get_kleene_logger, LogLevel
from .interpretive_logger import get_interpretive_logger, ImpactLevel

def example_data_processing():
    """Example: Data processing with beautiful logs"""
    kleene = get_kleene_logger("DataProcessor")
    interpretive = get_interpretive_logger("Data Pipeline")
    
    # Start processing
    kleene.log(LogLevel.INFO, "load_dataset_start", 
               state_before={"dataset": "smollm3-blueprint.pdf"})
    
    interpretive.log(ImpactLevel.LOW, "DataLoader", "Loading dataset",
                    context="Reading PDF file for analysis",
                    consequence="Will extract text and metadata",
                    metrics={"file_size": "1.0MB", "type": "PDF"})
    
    # Processing steps
    kleene.log(LogLevel.DEBUG, "extract_text",
               state_before={"page": 1},
               state_after={"pages_processed": 15})
    
    # Fixed point reached
    kleene.log(LogLevel.INFO, "processing_complete",
               state_after={"records": 500, "clean": True},
               fixed_point=True,
               iterations=3)
    
    interpretive.log(ImpactLevel.MEDIUM, "DataProcessor", "Processing complete",
                    context="Successfully extracted and cleaned data",
                    consequence="Ready for forensics analysis",
                    metrics={"records": 500, "pages": 15, "errors": 0})

def example_model_observation():
    """Example: Model observation with beautiful logs"""
    kleene = get_kleene_logger("ModelObserver")
    interpretive = get_interpretive_logger("Model Observatory")
    
    # Model loading
    kleene.log(LogLevel.INFO, "model_load_start",
               state_before={"model": "mistralai/Mixtral-8x22B-Instruct-v0.1"})
    
    interpretive.log(ImpactLevel.MEDIUM, "ModelLoader", "Loading Mixtral",
                    context="Loading 8x22B MoE model for inference",
                    consequence="Will consume significant VRAM",
                    metrics={"params": "141B", "active": "39B", "device": "cuda"})
    
    # Observation
    kleene.log(LogLevel.INFO, "observation_start",
               state_before={"layers": 0, "hash": "initial"})
    
    # Fixed point achieved
    kleene.log(LogLevel.INFO, "observation_fixed_point",
               state_after={"layers": 64, "merkle": "abc123..."},
               fixed_point=True,
               iterations=64)
    
    interpretive.log(ImpactLevel.LOW, "CASCADE", "Model observed",
                    context="Cryptographic proof generated for model execution",
                    consequence="Merkle root provides verifiable audit trail",
                    metrics={"model": "Mixtral", "layers": 64, "merkle": "abc123..."})

def example_error_handling():
    """Example: Error handling with colored logs"""
    kleene = get_kleene_logger("ErrorHandler")
    interpretive = get_interpretive_logger("System Monitor")
    
    # Error detected
    kleene.log(LogLevel.ERROR, "memory_exhaustion",
               state_before={"memory": "15.8/16GB", "operation": "inference"},
               fixed_point=False)
    
    interpretive.log(ImpactLevel.HIGH, "MemoryManager", "Out of memory",
                    context="GPU memory exhausted during model inference",
                    consequence="Inference failed, system degraded",
                    metrics={"used": "15.8GB", "total": "16GB", "available": "200MB"},
                    recommendation="Enable gradient checkpointing or use smaller batch size")
    
    # Recovery
    kleene.log(LogLevel.WARNING, "fallback_activated",
               state_after={"mode": "cpu_fallback", "batch_size": 1})
    
    interpretive.log(ImpactLevel.MEDIUM, "FallbackHandler", "CPU fallback activated",
                    context="Switched to CPU inference due to memory constraints",
                    consequence="Performance degraded but functionality preserved",
                    metrics={"device": "cpu", "batch_size": 1, "slowdown": "10x"})

# Run all examples
if __name__ == "__main__":
    print("\n🎨 CASCADE Color Logging Examples\n")
    print("="*60)
    
    example_data_processing()
    print("\n" + "="*60)
    
    example_model_observation()
    print("\n" + "="*60)
    
    example_error_handling()
    print("\n" + "="*60)
    
    print("\n✨ Beautiful logs are ready for production!")