|
|
""" |
|
|
Evolution trace logging for OpenEvolve |
|
|
|
|
|
This module provides functionality to log detailed traces of program evolution, |
|
|
capturing state-action-reward transitions for RL training and analysis. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import logging |
|
|
import time |
|
|
from dataclasses import asdict, dataclass |
|
|
from pathlib import Path |
|
|
from typing import Any, Dict, List, Optional, Union |
|
|
|
|
|
from openevolve.utils.trace_export_utils import ( |
|
|
append_trace_jsonl, |
|
|
export_traces, |
|
|
export_traces_json, |
|
|
) |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class EvolutionTrace: |
|
|
"""Represents a single evolution trace entry""" |
|
|
|
|
|
iteration: int |
|
|
timestamp: float |
|
|
parent_id: str |
|
|
child_id: str |
|
|
parent_metrics: Dict[str, Any] |
|
|
child_metrics: Dict[str, Any] |
|
|
parent_code: Optional[str] = None |
|
|
child_code: Optional[str] = None |
|
|
code_diff: Optional[str] = None |
|
|
prompt: Optional[Dict[str, str]] = None |
|
|
llm_response: Optional[str] = None |
|
|
improvement_delta: Optional[Dict[str, float]] = None |
|
|
island_id: Optional[int] = None |
|
|
generation: Optional[int] = None |
|
|
artifacts: Optional[Dict[str, Any]] = None |
|
|
metadata: Optional[Dict[str, Any]] = None |
|
|
|
|
|
def to_dict(self) -> Dict[str, Any]: |
|
|
"""Convert trace to dictionary format""" |
|
|
return {k: v for k, v in asdict(self).items() if v is not None} |
|
|
|
|
|
def calculate_improvement(self) -> Dict[str, float]: |
|
|
"""Calculate improvement deltas between parent and child metrics""" |
|
|
improvement = {} |
|
|
for key in self.child_metrics: |
|
|
if key in self.parent_metrics: |
|
|
parent_val = self.parent_metrics[key] |
|
|
child_val = self.child_metrics[key] |
|
|
if isinstance(parent_val, (int, float)) and isinstance(child_val, (int, float)): |
|
|
improvement[key] = child_val - parent_val |
|
|
return improvement |
|
|
|
|
|
|
|
|
class EvolutionTracer: |
|
|
"""Manages evolution trace logging with support for multiple formats""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
output_path: Optional[str] = None, |
|
|
format: str = "jsonl", |
|
|
include_code: bool = False, |
|
|
include_prompts: bool = True, |
|
|
enabled: bool = True, |
|
|
buffer_size: int = 10, |
|
|
compress: bool = False, |
|
|
): |
|
|
""" |
|
|
Initialize the evolution tracer |
|
|
|
|
|
Args: |
|
|
output_path: Path to save trace data |
|
|
format: Output format ('jsonl', 'json', 'hdf5') |
|
|
include_code: Whether to include full code in traces |
|
|
include_prompts: Whether to include prompts and LLM responses |
|
|
enabled: Whether tracing is enabled |
|
|
buffer_size: Number of traces to buffer before writing |
|
|
compress: Whether to compress output files |
|
|
""" |
|
|
self.enabled = enabled |
|
|
self.format = format |
|
|
self.include_code = include_code |
|
|
self.include_prompts = include_prompts |
|
|
self.compress = compress |
|
|
self.buffer_size = buffer_size |
|
|
|
|
|
|
|
|
self.stats = { |
|
|
"total_traces": 0, |
|
|
"improvement_count": 0, |
|
|
"total_improvement": {}, |
|
|
"best_improvement": {}, |
|
|
"worst_decline": {}, |
|
|
} |
|
|
|
|
|
if not self.enabled: |
|
|
logger.info("Evolution tracing is disabled") |
|
|
return |
|
|
|
|
|
|
|
|
if output_path: |
|
|
self.output_path = Path(output_path) |
|
|
else: |
|
|
self.output_path = Path(f"evolution_trace.{format}") |
|
|
|
|
|
|
|
|
if self.compress and format == "jsonl": |
|
|
self.output_path = self.output_path.with_suffix(".jsonl.gz") |
|
|
|
|
|
|
|
|
self.output_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
self.buffer: List[EvolutionTrace] = [] |
|
|
|
|
|
|
|
|
if format == "json": |
|
|
self.json_traces = [] |
|
|
|
|
|
logger.info(f"Evolution tracer initialized: {self.output_path}") |
|
|
|
|
|
def log_trace( |
|
|
self, |
|
|
iteration: int, |
|
|
parent_program: Any, |
|
|
child_program: Any, |
|
|
prompt: Optional[Dict[str, str]] = None, |
|
|
llm_response: Optional[str] = None, |
|
|
artifacts: Optional[Dict[str, Any]] = None, |
|
|
island_id: Optional[int] = None, |
|
|
metadata: Optional[Dict[str, Any]] = None, |
|
|
) -> None: |
|
|
""" |
|
|
Log an evolution trace entry |
|
|
|
|
|
Args: |
|
|
iteration: Current iteration number |
|
|
parent_program: Parent program object |
|
|
child_program: Child program object |
|
|
prompt: Prompt used for evolution |
|
|
llm_response: LLM response |
|
|
artifacts: Any artifacts from evaluation |
|
|
island_id: Island ID if using island-based evolution |
|
|
metadata: Additional metadata |
|
|
""" |
|
|
if not self.enabled: |
|
|
return |
|
|
|
|
|
try: |
|
|
|
|
|
trace = EvolutionTrace( |
|
|
iteration=iteration, |
|
|
timestamp=time.time(), |
|
|
parent_id=parent_program.id, |
|
|
child_id=child_program.id, |
|
|
parent_metrics=parent_program.metrics, |
|
|
child_metrics=child_program.metrics, |
|
|
island_id=island_id, |
|
|
generation=child_program.generation, |
|
|
artifacts=artifacts, |
|
|
metadata=metadata, |
|
|
) |
|
|
|
|
|
|
|
|
if self.include_code: |
|
|
trace.parent_code = parent_program.code |
|
|
trace.child_code = child_program.code |
|
|
|
|
|
|
|
|
if self.include_prompts: |
|
|
trace.prompt = prompt |
|
|
trace.llm_response = llm_response |
|
|
|
|
|
|
|
|
trace.improvement_delta = trace.calculate_improvement() |
|
|
|
|
|
|
|
|
self._update_stats(trace) |
|
|
|
|
|
|
|
|
self.buffer.append(trace) |
|
|
|
|
|
|
|
|
if self.format == "json": |
|
|
self.json_traces.append(trace) |
|
|
|
|
|
|
|
|
if len(self.buffer) >= self.buffer_size: |
|
|
self.flush() |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error logging evolution trace: {e}") |
|
|
|
|
|
def _update_stats(self, trace: EvolutionTrace): |
|
|
"""Update running statistics""" |
|
|
self.stats["total_traces"] += 1 |
|
|
|
|
|
if trace.improvement_delta: |
|
|
|
|
|
if "combined_score" in trace.improvement_delta: |
|
|
delta = trace.improvement_delta["combined_score"] |
|
|
if delta > 0: |
|
|
self.stats["improvement_count"] += 1 |
|
|
|
|
|
|
|
|
for metric, delta in trace.improvement_delta.items(): |
|
|
if metric not in self.stats["total_improvement"]: |
|
|
self.stats["total_improvement"][metric] = 0 |
|
|
self.stats["best_improvement"][metric] = delta |
|
|
self.stats["worst_decline"][metric] = delta |
|
|
|
|
|
self.stats["total_improvement"][metric] += delta |
|
|
|
|
|
if delta > self.stats["best_improvement"][metric]: |
|
|
self.stats["best_improvement"][metric] = delta |
|
|
if delta < self.stats["worst_decline"][metric]: |
|
|
self.stats["worst_decline"][metric] = delta |
|
|
|
|
|
def flush(self): |
|
|
"""Write buffered traces to file""" |
|
|
if not self.enabled or not self.buffer: |
|
|
return |
|
|
|
|
|
try: |
|
|
if self.format == "jsonl": |
|
|
|
|
|
for trace in self.buffer: |
|
|
append_trace_jsonl(trace, self.output_path, compress=self.compress) |
|
|
elif self.format == "json": |
|
|
|
|
|
pass |
|
|
elif self.format == "hdf5": |
|
|
|
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
if self.format == "jsonl": |
|
|
self.buffer.clear() |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error flushing traces to file: {e}") |
|
|
|
|
|
def get_statistics(self) -> Dict[str, Any]: |
|
|
"""Get current tracing statistics""" |
|
|
return { |
|
|
**self.stats, |
|
|
"improvement_rate": ( |
|
|
self.stats["improvement_count"] / self.stats["total_traces"] |
|
|
if self.stats["total_traces"] > 0 |
|
|
else 0 |
|
|
), |
|
|
} |
|
|
|
|
|
def close(self): |
|
|
"""Close the tracer and flush remaining data""" |
|
|
if not self.enabled: |
|
|
return |
|
|
|
|
|
|
|
|
self.flush() |
|
|
|
|
|
|
|
|
if self.format == "json" and hasattr(self, "json_traces"): |
|
|
metadata = { |
|
|
"created_at": time.time(), |
|
|
"include_code": self.include_code, |
|
|
"include_prompts": self.include_prompts, |
|
|
"total_traces": len(self.json_traces), |
|
|
} |
|
|
export_traces_json(self.json_traces, self.output_path, metadata=metadata) |
|
|
elif self.format == "hdf5": |
|
|
|
|
|
all_traces = getattr(self, "json_traces", self.buffer) |
|
|
if all_traces: |
|
|
metadata = { |
|
|
"created_at": time.time(), |
|
|
"include_code": self.include_code, |
|
|
"include_prompts": self.include_prompts, |
|
|
} |
|
|
export_traces(all_traces, self.output_path, format="hdf5", metadata=metadata) |
|
|
|
|
|
|
|
|
stats = self.get_statistics() |
|
|
logger.info(f"Evolution tracing complete. Total traces: {stats['total_traces']}") |
|
|
logger.info(f"Improvement rate: {stats['improvement_rate']:.2%}") |
|
|
|
|
|
if stats["best_improvement"]: |
|
|
logger.info(f"Best improvements: {stats['best_improvement']}") |
|
|
if stats["worst_decline"]: |
|
|
logger.info(f"Worst declines: {stats['worst_decline']}") |
|
|
|
|
|
def __enter__(self): |
|
|
"""Context manager entry""" |
|
|
return self |
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb): |
|
|
"""Context manager exit""" |
|
|
self.close() |
|
|
|
|
|
|
|
|
def extract_evolution_trace_from_checkpoint( |
|
|
checkpoint_dir: Union[str, Path], |
|
|
output_path: Optional[str] = None, |
|
|
format: str = "jsonl", |
|
|
include_code: bool = True, |
|
|
include_prompts: bool = True, |
|
|
) -> List[EvolutionTrace]: |
|
|
""" |
|
|
Extract evolution traces from an existing checkpoint directory |
|
|
|
|
|
Args: |
|
|
checkpoint_dir: Path to checkpoint directory |
|
|
output_path: Optional path to save extracted traces |
|
|
format: Output format ('jsonl', 'json') |
|
|
include_code: Whether to include code in traces |
|
|
include_prompts: Whether to include prompts in traces |
|
|
|
|
|
Returns: |
|
|
List of EvolutionTrace objects |
|
|
""" |
|
|
checkpoint_path = Path(checkpoint_dir) |
|
|
if not checkpoint_path.exists(): |
|
|
raise FileNotFoundError(f"Checkpoint directory {checkpoint_dir} not found") |
|
|
|
|
|
programs_dir = checkpoint_path / "programs" |
|
|
if not programs_dir.exists(): |
|
|
raise FileNotFoundError(f"Programs directory not found in {checkpoint_dir}") |
|
|
|
|
|
logger.info(f"Extracting evolution traces from {checkpoint_dir}") |
|
|
|
|
|
|
|
|
programs = {} |
|
|
program_files = list(programs_dir.glob("*.json")) |
|
|
|
|
|
for prog_file in program_files: |
|
|
try: |
|
|
with open(prog_file, "r") as f: |
|
|
prog_data = json.load(f) |
|
|
programs[prog_data["id"]] = prog_data |
|
|
except (json.JSONDecodeError, KeyError) as e: |
|
|
logger.warning(f"Error loading program from {prog_file}: {e}") |
|
|
continue |
|
|
|
|
|
logger.info(f"Loaded {len(programs)} programs from checkpoint") |
|
|
|
|
|
|
|
|
traces = [] |
|
|
for prog_id, prog in programs.items(): |
|
|
|
|
|
parent_id = prog.get("parent_id") |
|
|
if not parent_id or parent_id not in programs: |
|
|
continue |
|
|
|
|
|
parent = programs[parent_id] |
|
|
|
|
|
|
|
|
trace = EvolutionTrace( |
|
|
iteration=prog.get("iteration_found", 0), |
|
|
timestamp=prog.get("timestamp", 0), |
|
|
parent_id=parent_id, |
|
|
child_id=prog_id, |
|
|
parent_metrics=parent.get("metrics", {}), |
|
|
child_metrics=prog.get("metrics", {}), |
|
|
generation=prog.get("generation", 0), |
|
|
island_id=prog.get("metadata", {}).get("island"), |
|
|
metadata=prog.get("metadata", {}), |
|
|
) |
|
|
|
|
|
|
|
|
if include_code: |
|
|
trace.parent_code = parent.get("code", "") |
|
|
trace.child_code = prog.get("code", "") |
|
|
|
|
|
|
|
|
if include_prompts: |
|
|
|
|
|
if "prompts" in prog: |
|
|
|
|
|
trace.prompt = prog["prompts"].get("prompt") |
|
|
trace.llm_response = prog["prompts"].get("response") |
|
|
|
|
|
|
|
|
trace.improvement_delta = trace.calculate_improvement() |
|
|
|
|
|
traces.append(trace) |
|
|
|
|
|
|
|
|
traces.sort(key=lambda x: (x.iteration, x.timestamp)) |
|
|
|
|
|
logger.info(f"Extracted {len(traces)} evolution traces") |
|
|
|
|
|
|
|
|
if output_path: |
|
|
metadata = { |
|
|
"total_traces": len(traces), |
|
|
"extracted_at": time.time(), |
|
|
"source": "checkpoint", |
|
|
} |
|
|
export_traces(traces, output_path, format=format, metadata=metadata) |
|
|
logger.info(f"Saved {len(traces)} traces to {output_path}") |
|
|
|
|
|
return traces |
|
|
|
|
|
|
|
|
def extract_full_lineage_traces( |
|
|
checkpoint_dir: Union[str, Path], output_path: Optional[str] = None, format: str = "json" |
|
|
) -> List[Dict[str, Any]]: |
|
|
""" |
|
|
Extract complete evolution traces with full lineage chains and prompts/actions |
|
|
|
|
|
This function builds the complete evolution history for each program, |
|
|
tracing back through all ancestors to create full lineage chains. |
|
|
|
|
|
Args: |
|
|
checkpoint_dir: Path to checkpoint directory |
|
|
output_path: Optional path to save extracted traces |
|
|
format: Output format ('json' or 'jsonl') |
|
|
|
|
|
Returns: |
|
|
List of lineage trace dictionaries |
|
|
""" |
|
|
checkpoint_path = Path(checkpoint_dir) |
|
|
if not checkpoint_path.exists(): |
|
|
raise FileNotFoundError(f"Checkpoint directory {checkpoint_dir} not found") |
|
|
|
|
|
programs_dir = checkpoint_path / "programs" |
|
|
if not programs_dir.exists(): |
|
|
raise FileNotFoundError(f"Programs directory not found in {checkpoint_dir}") |
|
|
|
|
|
logger.info(f"Extracting full lineage traces from {checkpoint_dir}") |
|
|
|
|
|
|
|
|
programs = {} |
|
|
program_files = list(programs_dir.glob("*.json")) |
|
|
|
|
|
for prog_file in program_files: |
|
|
try: |
|
|
with open(prog_file, "r") as f: |
|
|
prog_data = json.load(f) |
|
|
programs[prog_data["id"]] = prog_data |
|
|
except (json.JSONDecodeError, KeyError) as e: |
|
|
logger.warning(f"Error loading program from {prog_file}: {e}") |
|
|
continue |
|
|
|
|
|
logger.info(f"Loaded {len(programs)} programs from checkpoint") |
|
|
|
|
|
|
|
|
traces = [] |
|
|
|
|
|
for program_id, program in programs.items(): |
|
|
|
|
|
lineage = [] |
|
|
current = program |
|
|
|
|
|
while current: |
|
|
lineage.append(current) |
|
|
parent_id = current.get("parent_id") |
|
|
current = programs.get(parent_id) if parent_id else None |
|
|
|
|
|
|
|
|
lineage.reverse() |
|
|
|
|
|
|
|
|
improvements = [] |
|
|
for i in range(len(lineage) - 1): |
|
|
parent = lineage[i] |
|
|
child = lineage[i + 1] |
|
|
|
|
|
|
|
|
prompts = child.get("prompts", {}) |
|
|
action = None |
|
|
|
|
|
|
|
|
for template_key, prompt_data in prompts.items(): |
|
|
action = { |
|
|
"template": template_key, |
|
|
"system_prompt": prompt_data.get("system", ""), |
|
|
"user_prompt": prompt_data.get("user", ""), |
|
|
"llm_response": ( |
|
|
prompt_data.get("responses", [""])[0] |
|
|
if prompt_data.get("responses") |
|
|
else "" |
|
|
), |
|
|
} |
|
|
break |
|
|
|
|
|
|
|
|
improvement_deltas = {} |
|
|
if child.get("metrics") and parent.get("metrics"): |
|
|
for metric in child["metrics"].keys(): |
|
|
if metric in parent["metrics"]: |
|
|
parent_val = parent["metrics"][metric] |
|
|
child_val = child["metrics"][metric] |
|
|
if isinstance(parent_val, (int, float)) and isinstance( |
|
|
child_val, (int, float) |
|
|
): |
|
|
improvement_deltas[metric] = child_val - parent_val |
|
|
|
|
|
improvement = { |
|
|
"step": i, |
|
|
"parent_id": parent["id"], |
|
|
"child_id": child["id"], |
|
|
"parent_metrics": parent.get("metrics", {}), |
|
|
"child_metrics": child.get("metrics", {}), |
|
|
"improvement": improvement_deltas, |
|
|
"generation": child.get("generation", 0), |
|
|
"iteration_found": child.get("iteration_found", 0), |
|
|
"changes_description": child.get("metadata", {}).get("changes", ""), |
|
|
"island_id": child.get("metadata", {}).get("island"), |
|
|
"action": action, |
|
|
} |
|
|
improvements.append(improvement) |
|
|
|
|
|
|
|
|
if improvements: |
|
|
trace = { |
|
|
"final_program_id": program_id, |
|
|
"final_metrics": program.get("metrics", {}), |
|
|
"generation_depth": len(lineage), |
|
|
"total_iterations": program.get("iteration_found", 0), |
|
|
"improvement_steps": improvements, |
|
|
"metadata": { |
|
|
"language": program.get("language", ""), |
|
|
"timestamp": program.get("timestamp", 0), |
|
|
}, |
|
|
} |
|
|
traces.append(trace) |
|
|
|
|
|
|
|
|
traces.sort(key=lambda x: x["generation_depth"], reverse=True) |
|
|
|
|
|
logger.info(f"Extracted {len(traces)} lineage traces") |
|
|
|
|
|
|
|
|
if output_path: |
|
|
if format == "json": |
|
|
metadata = { |
|
|
"total_traces": len(traces), |
|
|
"extracted_at": time.time(), |
|
|
"source": "checkpoint", |
|
|
"type": "full_lineage", |
|
|
} |
|
|
export_traces_json(traces, output_path, metadata=metadata) |
|
|
elif format == "jsonl": |
|
|
|
|
|
output_path = Path(output_path) |
|
|
output_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
with open(output_path, "w") as f: |
|
|
for trace in traces: |
|
|
json.dump(trace, f) |
|
|
f.write("\n") |
|
|
else: |
|
|
raise ValueError(f"Unsupported format: {format}. Use 'json' or 'jsonl'") |
|
|
|
|
|
logger.info(f"Saved {len(traces)} lineage traces to {output_path}") |
|
|
|
|
|
return traces |
|
|
|