File size: 6,501 Bytes
cacd4d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
"""
Clean Logger for GEPA + LLEGO Optimization
Provides simple, visual logging similar to diagram format.

Uses the centralized logging infrastructure with a custom handler
for clean, user-friendly console output.
"""

import logging
import sys
from typing import List, Optional

# Create dedicated logger for clean output
_clean_output_logger = logging.getLogger("gepa_optimizer.clean_output")


def _setup_clean_logger():
    """Setup the clean output logger with minimal formatting."""
    if not _clean_output_logger.handlers:
        handler = logging.StreamHandler(sys.stdout)
        handler.setLevel(logging.INFO)
        # Minimal formatter - just the message
        handler.setFormatter(logging.Formatter("%(message)s"))
        _clean_output_logger.addHandler(handler)
        _clean_output_logger.setLevel(logging.INFO)
        # Don't propagate to root logger to avoid duplicate output
        _clean_output_logger.propagate = False


# Initialize on module load
_setup_clean_logger()


class CleanLogger:
    """
    Simple, visual logging for optimization workflow.
    
    Uses a dedicated logger with minimal formatting to produce
    clean, user-friendly console output.
    """
    
    def __init__(self):
        self.current_iteration = 0
        self.gepa_reflection_count = 0
        self.llego_crossover_count = 0
        self.llego_mutation_count = 0
        self._logger = _clean_output_logger
        
    def log_iteration_start(self, iteration: int, seed_prompt: Optional[str] = None):
        """Log start of new iteration."""
        self.current_iteration = iteration
        self.gepa_reflection_count = 0
        self.llego_crossover_count = 0
        self.llego_mutation_count = 0
        
        self._logger.info("")
        self._logger.info("═" * 80)
        # FIX: More accurate description - we evaluate first, then generate
        if iteration == 1:
            self._logger.info(f"  ITERATION {iteration}: EVALUATING SEED PROMPT")
        else:
            self._logger.info(f"  ITERATION {iteration}: EVALUATING & GENERATING CANDIDATES")
        self._logger.info("═" * 80)
        
        if seed_prompt and iteration == 0:
            self._logger.info("")
            self._logger.info("SEED PROMPT:")
            self._logger.info("─" * 80)
            self._logger.info(seed_prompt)
            self._logger.info("─" * 80)
    
    def log_candidate_generation_summary(self):
        """Log summary of candidates generated this iteration."""
        total = self.gepa_reflection_count + self.llego_crossover_count + self.llego_mutation_count
        
        self._logger.info("")
        self._logger.info("CANDIDATES GENERATED THIS ITERATION:")
        self._logger.info(f"   GEPA Reflection:  {self.gepa_reflection_count}")
        self._logger.info(f"   LLEGO Crossover:  {self.llego_crossover_count}")
        self._logger.info(f"   LLEGO Mutation:   {self.llego_mutation_count}")
        self._logger.info(f"   TOTAL:            {total}")
    
    def log_gepa_reflection_candidate(self, candidate_num: int, prompt: str):
        """Log a GEPA reflection candidate."""
        self.gepa_reflection_count += 1
        self._logger.info("")
        self._logger.info(f"GEPA Reflection Candidate #{candidate_num}:")
        self._logger.info("─" * 80)
        if prompt and prompt.strip():
            self._logger.info(prompt)  # Show full prompt at INFO level
        else:
            self._logger.warning("⚠️  Empty candidate prompt!")
        self._logger.info("─" * 80)
    
    def log_llego_crossover_candidate(self, candidate_num: int, prompt: str):
        """Log a LLEGO crossover candidate."""
        self.llego_crossover_count += 1
        self._logger.info("")
        self._logger.info(f"LLEGO Crossover Candidate #{candidate_num}:")
        self._logger.info("─" * 80)
        if prompt and prompt.strip():
            self._logger.info(prompt)  # Show full prompt at INFO level
        else:
            self._logger.warning("⚠️  Empty candidate prompt!")
        self._logger.info("─" * 80)
    
    def log_llego_mutation_candidate(self, candidate_num: int, prompt: str):
        """Log a LLEGO mutation candidate."""
        self.llego_mutation_count += 1
        self._logger.info("")
        self._logger.info(f"LLEGO Mutation Candidate #{candidate_num}:")
        self._logger.info("─" * 80)
        if prompt and prompt.strip():
            self._logger.info(prompt)  # Show full prompt at INFO level
        else:
            self._logger.warning("⚠️  Empty candidate prompt!")
        self._logger.info("─" * 80)
    
    def log_evaluation_results(self, candidate_prompts: List[str], scores: List[float]):
        """Log evaluation results for all candidates."""
        self._logger.info("")
        self._logger.info("═" * 80)
        self._logger.info("  EVALUATION RESULTS")
        self._logger.info("═" * 80)
        
        for i, (prompt, score) in enumerate(zip(candidate_prompts, scores), 1):
            self._logger.info(f"")
            self._logger.info(f"Candidate #{i}:")
            self._logger.info(f"   Score: {score:.4f}")
            self._logger.info(f"   Prompt Preview: {prompt[:100]}...")
    
    def log_pareto_front_update(self, pareto_size: int, best_score: float):
        """Log Pareto front update."""
        self._logger.info("")
        self._logger.info("═" * 80)
        self._logger.info("  PARETO FRONT UPDATE")
        self._logger.info("═" * 80)
        self._logger.info(f"   Front Size: {pareto_size} candidates")
        self._logger.info(f"   Best Score: {best_score:.4f}")
    
    def log_iteration_summary(self, iteration: int, total_candidates: int, best_score: float):
        """Log iteration summary."""
        self._logger.info("")
        self._logger.info("═" * 80)
        self._logger.info(f"  ITERATION {iteration} SUMMARY")
        self._logger.info("═" * 80)
        self._logger.info(f"   Candidates Evaluated: {total_candidates}")
        self._logger.info(f"   Best Score: {best_score:.4f}")
        self._logger.info(f"   GEPA Reflection: {self.gepa_reflection_count}")
        self._logger.info(f"   LLEGO Crossover: {self.llego_crossover_count}")
        self._logger.info(f"   LLEGO Mutation: {self.llego_mutation_count}")


# Global instance
_clean_logger_instance = CleanLogger()


def get_clean_logger() -> CleanLogger:
    """Get global clean logger instance."""
    return _clean_logger_instance