Suhasdev's picture
Deploy Universal Prompt Optimizer to HF Spaces (clean)
cacd4d0
"""
Clean Logger for GEPA + LLEGO Optimization
Provides simple, visual logging similar to diagram format.
Uses the centralized logging infrastructure with a custom handler
for clean, user-friendly console output.
"""
import logging
import sys
from typing import List, Optional
# Create dedicated logger for clean output
_clean_output_logger = logging.getLogger("gepa_optimizer.clean_output")
def _setup_clean_logger():
"""Setup the clean output logger with minimal formatting."""
if not _clean_output_logger.handlers:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
# Minimal formatter - just the message
handler.setFormatter(logging.Formatter("%(message)s"))
_clean_output_logger.addHandler(handler)
_clean_output_logger.setLevel(logging.INFO)
# Don't propagate to root logger to avoid duplicate output
_clean_output_logger.propagate = False
# Initialize on module load
_setup_clean_logger()
class CleanLogger:
"""
Simple, visual logging for optimization workflow.
Uses a dedicated logger with minimal formatting to produce
clean, user-friendly console output.
"""
def __init__(self):
self.current_iteration = 0
self.gepa_reflection_count = 0
self.llego_crossover_count = 0
self.llego_mutation_count = 0
self._logger = _clean_output_logger
def log_iteration_start(self, iteration: int, seed_prompt: Optional[str] = None):
"""Log start of new iteration."""
self.current_iteration = iteration
self.gepa_reflection_count = 0
self.llego_crossover_count = 0
self.llego_mutation_count = 0
self._logger.info("")
self._logger.info("═" * 80)
# FIX: More accurate description - we evaluate first, then generate
if iteration == 1:
self._logger.info(f" ITERATION {iteration}: EVALUATING SEED PROMPT")
else:
self._logger.info(f" ITERATION {iteration}: EVALUATING & GENERATING CANDIDATES")
self._logger.info("═" * 80)
if seed_prompt and iteration == 0:
self._logger.info("")
self._logger.info("SEED PROMPT:")
self._logger.info("─" * 80)
self._logger.info(seed_prompt)
self._logger.info("─" * 80)
def log_candidate_generation_summary(self):
"""Log summary of candidates generated this iteration."""
total = self.gepa_reflection_count + self.llego_crossover_count + self.llego_mutation_count
self._logger.info("")
self._logger.info("CANDIDATES GENERATED THIS ITERATION:")
self._logger.info(f" GEPA Reflection: {self.gepa_reflection_count}")
self._logger.info(f" LLEGO Crossover: {self.llego_crossover_count}")
self._logger.info(f" LLEGO Mutation: {self.llego_mutation_count}")
self._logger.info(f" TOTAL: {total}")
def log_gepa_reflection_candidate(self, candidate_num: int, prompt: str):
"""Log a GEPA reflection candidate."""
self.gepa_reflection_count += 1
self._logger.info("")
self._logger.info(f"GEPA Reflection Candidate #{candidate_num}:")
self._logger.info("─" * 80)
if prompt and prompt.strip():
self._logger.info(prompt) # Show full prompt at INFO level
else:
self._logger.warning("⚠️ Empty candidate prompt!")
self._logger.info("─" * 80)
def log_llego_crossover_candidate(self, candidate_num: int, prompt: str):
"""Log a LLEGO crossover candidate."""
self.llego_crossover_count += 1
self._logger.info("")
self._logger.info(f"LLEGO Crossover Candidate #{candidate_num}:")
self._logger.info("─" * 80)
if prompt and prompt.strip():
self._logger.info(prompt) # Show full prompt at INFO level
else:
self._logger.warning("⚠️ Empty candidate prompt!")
self._logger.info("─" * 80)
def log_llego_mutation_candidate(self, candidate_num: int, prompt: str):
"""Log a LLEGO mutation candidate."""
self.llego_mutation_count += 1
self._logger.info("")
self._logger.info(f"LLEGO Mutation Candidate #{candidate_num}:")
self._logger.info("─" * 80)
if prompt and prompt.strip():
self._logger.info(prompt) # Show full prompt at INFO level
else:
self._logger.warning("⚠️ Empty candidate prompt!")
self._logger.info("─" * 80)
def log_evaluation_results(self, candidate_prompts: List[str], scores: List[float]):
"""Log evaluation results for all candidates."""
self._logger.info("")
self._logger.info("═" * 80)
self._logger.info(" EVALUATION RESULTS")
self._logger.info("═" * 80)
for i, (prompt, score) in enumerate(zip(candidate_prompts, scores), 1):
self._logger.info(f"")
self._logger.info(f"Candidate #{i}:")
self._logger.info(f" Score: {score:.4f}")
self._logger.info(f" Prompt Preview: {prompt[:100]}...")
def log_pareto_front_update(self, pareto_size: int, best_score: float):
"""Log Pareto front update."""
self._logger.info("")
self._logger.info("═" * 80)
self._logger.info(" PARETO FRONT UPDATE")
self._logger.info("═" * 80)
self._logger.info(f" Front Size: {pareto_size} candidates")
self._logger.info(f" Best Score: {best_score:.4f}")
def log_iteration_summary(self, iteration: int, total_candidates: int, best_score: float):
"""Log iteration summary."""
self._logger.info("")
self._logger.info("═" * 80)
self._logger.info(f" ITERATION {iteration} SUMMARY")
self._logger.info("═" * 80)
self._logger.info(f" Candidates Evaluated: {total_candidates}")
self._logger.info(f" Best Score: {best_score:.4f}")
self._logger.info(f" GEPA Reflection: {self.gepa_reflection_count}")
self._logger.info(f" LLEGO Crossover: {self.llego_crossover_count}")
self._logger.info(f" LLEGO Mutation: {self.llego_mutation_count}")
# Global instance
_clean_logger_instance = CleanLogger()
def get_clean_logger() -> CleanLogger:
"""Get global clean logger instance."""
return _clean_logger_instance