OpenEvolve / data /openevolve /config.py
introvoyz041's picture
Migrated from GitHub
5e4510c verified
"""
Configuration handling for OpenEvolve
"""
import os
import re
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
import dacite
import yaml
if TYPE_CHECKING:
from openevolve.llm.base import LLMInterface
_ENV_VAR_PATTERN = re.compile(r"^\$\{([^}]+)\}$") # ${VAR}
def _resolve_env_var(value: Optional[str]) -> Optional[str]:
"""
Resolve ${VAR} environment variable reference in a string value.
In current implementation pattern must match the entire string (e.g., "${OPENAI_API_KEY}"),
not embedded within other text.
Args:
value: The string value that may contain ${VAR} syntax
Returns:
The resolved value with environment variable expanded, or original value if no match
Raises:
ValueError: If the environment variable is referenced but not set
"""
if value is None:
return None
match = _ENV_VAR_PATTERN.match(value)
if not match:
return value
var_name = match.group(1)
env_value = os.environ.get(var_name)
if env_value is None:
raise ValueError(f"Environment variable {var_name} is not set")
return env_value
@dataclass
class LLMModelConfig:
"""Configuration for a single LLM model"""
# API configuration
api_base: str = None
api_key: Optional[str] = None
name: str = None
# Custom LLM client
init_client: Optional[Callable] = None
# Weight for model in ensemble
weight: float = 1.0
# Generation parameters
system_message: Optional[str] = None
temperature: float = None
top_p: float = None
max_tokens: int = None
# Request parameters
timeout: int = None
retries: int = None
retry_delay: int = None
# Reproducibility
random_seed: Optional[int] = None
# Reasoning parameters
reasoning_effort: Optional[str] = None
def __post_init__(self):
"""Post-initialization to resolve ${VAR} env var references in api_key"""
self.api_key = _resolve_env_var(self.api_key)
@dataclass
class LLMConfig(LLMModelConfig):
"""Configuration for LLM models"""
# API configuration
api_base: str = "https://api.openai.com/v1"
# Generation parameters
system_message: Optional[str] = "system_message"
temperature: float = 0.7
top_p: float = 0.95
max_tokens: int = 4096
# Request parameters
timeout: int = 60
retries: int = 3
retry_delay: int = 5
# n-model configuration for evolution LLM ensemble
models: List[LLMModelConfig] = field(default_factory=list)
# n-model configuration for evaluator LLM ensemble
evaluator_models: List[LLMModelConfig] = field(default_factory=lambda: [])
# Backwardes compatibility with primary_model(_weight) options
primary_model: str = None
primary_model_weight: float = None
secondary_model: str = None
secondary_model_weight: float = None
# Reasoning parameters (inherited from LLMModelConfig but can be overridden)
reasoning_effort: Optional[str] = None
def __post_init__(self):
"""Post-initialization to set up model configurations"""
super().__post_init__() # Resolve ${VAR} in api_key at LLMConfig level
# Handle backward compatibility for primary_model(_weight) and secondary_model(_weight).
if self.primary_model:
# Create primary model
primary_model = LLMModelConfig(
name=self.primary_model, weight=self.primary_model_weight or 1.0
)
self.models.append(primary_model)
if self.secondary_model:
# Create secondary model (only if weight > 0)
if self.secondary_model_weight is None or self.secondary_model_weight > 0:
secondary_model = LLMModelConfig(
name=self.secondary_model,
weight=(
self.secondary_model_weight
if self.secondary_model_weight is not None
else 0.2
),
)
self.models.append(secondary_model)
# Only validate if this looks like a user config (has some model info)
# Don't validate during internal/default initialization
if (
self.primary_model
or self.secondary_model
or self.primary_model_weight
or self.secondary_model_weight
) and not self.models:
raise ValueError(
"No LLM models configured. Please specify 'models' array or "
"'primary_model' in your configuration."
)
# If no evaluator models are defined, use the same models as for evolution
if not self.evaluator_models:
self.evaluator_models = self.models.copy()
# Update models with shared configuration values
shared_config = {
"api_base": self.api_base,
"api_key": self.api_key,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
"timeout": self.timeout,
"retries": self.retries,
"retry_delay": self.retry_delay,
"random_seed": self.random_seed,
"reasoning_effort": self.reasoning_effort,
}
self.update_model_params(shared_config)
def update_model_params(self, args: Dict[str, Any], overwrite: bool = False) -> None:
"""Update model parameters for all models"""
for model in self.models + self.evaluator_models:
for key, value in args.items():
if overwrite or getattr(model, key, None) is None:
setattr(model, key, value)
def rebuild_models(self) -> None:
"""Rebuild the models list after primary_model/secondary_model field changes"""
# Clear existing models lists
self.models = []
self.evaluator_models = []
# Re-run model generation logic from __post_init__
if self.primary_model:
# Create primary model
primary_model = LLMModelConfig(
name=self.primary_model, weight=self.primary_model_weight or 1.0
)
self.models.append(primary_model)
if self.secondary_model:
# Create secondary model (only if weight > 0)
if self.secondary_model_weight is None or self.secondary_model_weight > 0:
secondary_model = LLMModelConfig(
name=self.secondary_model,
weight=(
self.secondary_model_weight
if self.secondary_model_weight is not None
else 0.2
),
)
self.models.append(secondary_model)
# If no evaluator models are defined, use the same models as for evolution
if not self.evaluator_models:
self.evaluator_models = self.models.copy()
# Update models with shared configuration values
shared_config = {
"api_base": self.api_base,
"api_key": self.api_key,
"temperature": self.temperature,
"top_p": self.top_p,
"max_tokens": self.max_tokens,
"timeout": self.timeout,
"retries": self.retries,
"retry_delay": self.retry_delay,
"random_seed": self.random_seed,
"reasoning_effort": self.reasoning_effort,
}
self.update_model_params(shared_config)
@dataclass
class PromptConfig:
"""Configuration for prompt generation"""
template_dir: Optional[str] = None
system_message: str = "system_message"
evaluator_system_message: str = "evaluator_system_message"
# Number of examples to include in the prompt
num_top_programs: int = 3
num_diverse_programs: int = 2
# Template stochasticity
use_template_stochasticity: bool = True
template_variations: Dict[str, List[str]] = field(default_factory=dict)
# Meta-prompting
# Note: meta-prompting features not implemented
use_meta_prompting: bool = False
meta_prompt_weight: float = 0.1
# Artifact rendering
include_artifacts: bool = True
max_artifact_bytes: int = 20 * 1024 # 20KB in prompt
artifact_security_filter: bool = True
# Feature extraction and program labeling
suggest_simplification_after_chars: Optional[int] = (
500 # Suggest simplifying if program exceeds this many characters
)
include_changes_under_chars: Optional[int] = (
100 # Include change descriptions in features if under this length
)
concise_implementation_max_lines: Optional[int] = (
10 # Label as "concise" if program has this many lines or fewer
)
comprehensive_implementation_min_lines: Optional[int] = (
50 # Label as "comprehensive" if program has this many lines or more
)
# Backward compatibility - deprecated
code_length_threshold: Optional[int] = (
None # Deprecated: use suggest_simplification_after_chars
)
@dataclass
class DatabaseConfig:
"""Configuration for the program database"""
# General settings
db_path: Optional[str] = None # Path to store database on disk
in_memory: bool = True
# Prompt and response logging to programs/<id>.json
log_prompts: bool = True
# Evolutionary parameters
population_size: int = 1000
archive_size: int = 100
num_islands: int = 5
# Selection parameters
elite_selection_ratio: float = 0.1
exploration_ratio: float = 0.2
exploitation_ratio: float = 0.7
# Note: diversity_metric fixed to "edit_distance"
diversity_metric: str = "edit_distance" # Options: "edit_distance", "feature_based"
# Feature map dimensions for MAP-Elites
# Default to complexity and diversity for better exploration
# CRITICAL: For custom dimensions, evaluators must return RAW VALUES, not bin indices
# Built-in: "complexity", "diversity", "score" (always available)
# Custom: Any metric from your evaluator (must be continuous values)
feature_dimensions: List[str] = field(
default_factory=lambda: ["complexity", "diversity"],
metadata={
"help": "List of feature dimensions for MAP-Elites grid. "
"Built-in dimensions: 'complexity', 'diversity', 'score'. "
"Custom dimensions: Must match metric names from evaluator. "
"IMPORTANT: Evaluators must return raw continuous values for custom dimensions, "
"NOT pre-computed bin indices. OpenEvolve handles all scaling and binning internally."
},
)
feature_bins: Union[int, Dict[str, int]] = 10 # Can be int (all dims) or dict (per-dim)
diversity_reference_size: int = 20 # Size of reference set for diversity calculation
# Migration parameters for island-based evolution
migration_interval: int = 50 # Migrate every N generations
migration_rate: float = 0.1 # Fraction of population to migrate
# Random seed for reproducible sampling
random_seed: Optional[int] = 42
# Artifact storage
artifacts_base_path: Optional[str] = None # Defaults to db_path/artifacts
artifact_size_threshold: int = 32 * 1024 # 32KB threshold
cleanup_old_artifacts: bool = True
artifact_retention_days: int = 30
novelty_llm: Optional["LLMInterface"] = None
embedding_model: Optional[str] = None
similarity_threshold: float = 0.99
@dataclass
class EvaluatorConfig:
"""Configuration for program evaluation"""
# General settings
timeout: int = 300 # Maximum evaluation time in seconds
max_retries: int = 3
# Resource limits for evaluation
# Note: resource limits not implemented
memory_limit_mb: Optional[int] = None
cpu_limit: Optional[float] = None
# Evaluation strategies
cascade_evaluation: bool = True
cascade_thresholds: List[float] = field(default_factory=lambda: [0.5, 0.75, 0.9])
# Parallel evaluation
parallel_evaluations: int = 1
# Note: distributed evaluation not implemented
distributed: bool = False
# LLM-based feedback
use_llm_feedback: bool = False
llm_feedback_weight: float = 0.1
# Artifact handling
enable_artifacts: bool = True
max_artifact_storage: int = 100 * 1024 * 1024 # 100MB per program
@dataclass
class EvolutionTraceConfig:
"""Configuration for evolution trace logging"""
enabled: bool = False
format: str = "jsonl" # Options: "jsonl", "json", "hdf5"
include_code: bool = False
include_prompts: bool = True
output_path: Optional[str] = None
buffer_size: int = 10
compress: bool = False
@dataclass
class Config:
"""Master configuration for OpenEvolve"""
# General settings
max_iterations: int = 10000
checkpoint_interval: int = 100
log_level: str = "INFO"
log_dir: Optional[str] = None
random_seed: Optional[int] = 42
language: str = None
file_suffix: str = ".py"
# Component configurations
llm: LLMConfig = field(default_factory=LLMConfig)
prompt: PromptConfig = field(default_factory=PromptConfig)
database: DatabaseConfig = field(default_factory=DatabaseConfig)
evaluator: EvaluatorConfig = field(default_factory=EvaluatorConfig)
evolution_trace: EvolutionTraceConfig = field(default_factory=EvolutionTraceConfig)
# Evolution settings
diff_based_evolution: bool = True
max_code_length: int = 10000
diff_pattern: str = r"<<<<<<< SEARCH\n(.*?)=======\n(.*?)>>>>>>> REPLACE"
# Early stopping settings
early_stopping_patience: Optional[int] = None
convergence_threshold: float = 0.001
early_stopping_metric: str = "combined_score"
# Parallel controller settings
max_tasks_per_child: Optional[int] = None
@classmethod
def from_yaml(cls, path: Union[str, Path]) -> "Config":
"""Load configuration from a YAML file"""
with open(path, "r") as f:
config_dict = yaml.safe_load(f)
return cls.from_dict(config_dict)
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "Config":
if "diff_pattern" in config_dict:
try:
re.compile(config_dict["diff_pattern"])
except re.error as e:
raise ValueError(f"Invalid regex pattern in diff_pattern: {e}")
config: Config = dacite.from_dict(
data_class=cls,
data=config_dict,
config=dacite.Config(cast=[List, Union], forward_references={"LLMInterface": Any}),
)
if config.database.random_seed is None and config.random_seed is not None:
config.database.random_seed = config.random_seed
return config
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
def to_yaml(self, path: Union[str, Path]) -> None:
"""Save configuration to a YAML file"""
with open(path, "w") as f:
yaml.dump(self.to_dict(), f, default_flow_style=False)
def load_config(config_path: Optional[Union[str, Path]] = None) -> Config:
"""Load configuration from a YAML file or use defaults"""
if config_path and os.path.exists(config_path):
config = Config.from_yaml(config_path)
else:
config = Config()
# Use environment variables if available
api_key = os.environ.get("OPENAI_API_KEY")
api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
config.llm.update_model_params({"api_key": api_key, "api_base": api_base})
# Make the system message available to the individual models, in case it is not provided from the prompt sampler
config.llm.update_model_params({"system_message": config.prompt.system_message})
return config