| """ |
| Configuration handling for OpenEvolve |
| """ |
|
|
| import os |
| import re |
| from dataclasses import asdict, dataclass, field |
| from pathlib import Path |
| from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union |
|
|
| import dacite |
| import yaml |
|
|
| if TYPE_CHECKING: |
| from openevolve.llm.base import LLMInterface |
|
|
|
|
| _ENV_VAR_PATTERN = re.compile(r"^\$\{([^}]+)\}$") |
|
|
|
|
| def _resolve_env_var(value: Optional[str]) -> Optional[str]: |
| """ |
| Resolve ${VAR} environment variable reference in a string value. |
| In current implementation pattern must match the entire string (e.g., "${OPENAI_API_KEY}"), |
| not embedded within other text. |
| |
| Args: |
| value: The string value that may contain ${VAR} syntax |
| |
| Returns: |
| The resolved value with environment variable expanded, or original value if no match |
| |
| Raises: |
| ValueError: If the environment variable is referenced but not set |
| """ |
| if value is None: |
| return None |
|
|
| match = _ENV_VAR_PATTERN.match(value) |
| if not match: |
| return value |
|
|
| var_name = match.group(1) |
| env_value = os.environ.get(var_name) |
| if env_value is None: |
| raise ValueError(f"Environment variable {var_name} is not set") |
| return env_value |
|
|
|
|
| @dataclass |
| class LLMModelConfig: |
| """Configuration for a single LLM model""" |
|
|
| |
| api_base: str = None |
| api_key: Optional[str] = None |
| name: str = None |
|
|
| |
| init_client: Optional[Callable] = None |
|
|
| |
| weight: float = 1.0 |
|
|
| |
| system_message: Optional[str] = None |
| temperature: float = None |
| top_p: float = None |
| max_tokens: int = None |
|
|
| |
| timeout: int = None |
| retries: int = None |
| retry_delay: int = None |
|
|
| |
| random_seed: Optional[int] = None |
|
|
| |
| reasoning_effort: Optional[str] = None |
|
|
| def __post_init__(self): |
| """Post-initialization to resolve ${VAR} env var references in api_key""" |
| self.api_key = _resolve_env_var(self.api_key) |
|
|
|
|
| @dataclass |
| class LLMConfig(LLMModelConfig): |
| """Configuration for LLM models""" |
|
|
| |
| api_base: str = "https://api.openai.com/v1" |
|
|
| |
| system_message: Optional[str] = "system_message" |
| temperature: float = 0.7 |
| top_p: float = 0.95 |
| max_tokens: int = 4096 |
|
|
| |
| timeout: int = 60 |
| retries: int = 3 |
| retry_delay: int = 5 |
|
|
| |
| models: List[LLMModelConfig] = field(default_factory=list) |
|
|
| |
| evaluator_models: List[LLMModelConfig] = field(default_factory=lambda: []) |
|
|
| |
| primary_model: str = None |
| primary_model_weight: float = None |
| secondary_model: str = None |
| secondary_model_weight: float = None |
|
|
| |
| reasoning_effort: Optional[str] = None |
|
|
| def __post_init__(self): |
| """Post-initialization to set up model configurations""" |
| super().__post_init__() |
|
|
| |
| if self.primary_model: |
| |
| primary_model = LLMModelConfig( |
| name=self.primary_model, weight=self.primary_model_weight or 1.0 |
| ) |
| self.models.append(primary_model) |
|
|
| if self.secondary_model: |
| |
| if self.secondary_model_weight is None or self.secondary_model_weight > 0: |
| secondary_model = LLMModelConfig( |
| name=self.secondary_model, |
| weight=( |
| self.secondary_model_weight |
| if self.secondary_model_weight is not None |
| else 0.2 |
| ), |
| ) |
| self.models.append(secondary_model) |
|
|
| |
| |
| if ( |
| self.primary_model |
| or self.secondary_model |
| or self.primary_model_weight |
| or self.secondary_model_weight |
| ) and not self.models: |
| raise ValueError( |
| "No LLM models configured. Please specify 'models' array or " |
| "'primary_model' in your configuration." |
| ) |
|
|
| |
| if not self.evaluator_models: |
| self.evaluator_models = self.models.copy() |
|
|
| |
| shared_config = { |
| "api_base": self.api_base, |
| "api_key": self.api_key, |
| "temperature": self.temperature, |
| "top_p": self.top_p, |
| "max_tokens": self.max_tokens, |
| "timeout": self.timeout, |
| "retries": self.retries, |
| "retry_delay": self.retry_delay, |
| "random_seed": self.random_seed, |
| "reasoning_effort": self.reasoning_effort, |
| } |
| self.update_model_params(shared_config) |
|
|
| def update_model_params(self, args: Dict[str, Any], overwrite: bool = False) -> None: |
| """Update model parameters for all models""" |
| for model in self.models + self.evaluator_models: |
| for key, value in args.items(): |
| if overwrite or getattr(model, key, None) is None: |
| setattr(model, key, value) |
|
|
| def rebuild_models(self) -> None: |
| """Rebuild the models list after primary_model/secondary_model field changes""" |
| |
| self.models = [] |
| self.evaluator_models = [] |
|
|
| |
| if self.primary_model: |
| |
| primary_model = LLMModelConfig( |
| name=self.primary_model, weight=self.primary_model_weight or 1.0 |
| ) |
| self.models.append(primary_model) |
|
|
| if self.secondary_model: |
| |
| if self.secondary_model_weight is None or self.secondary_model_weight > 0: |
| secondary_model = LLMModelConfig( |
| name=self.secondary_model, |
| weight=( |
| self.secondary_model_weight |
| if self.secondary_model_weight is not None |
| else 0.2 |
| ), |
| ) |
| self.models.append(secondary_model) |
|
|
| |
| if not self.evaluator_models: |
| self.evaluator_models = self.models.copy() |
|
|
| |
| shared_config = { |
| "api_base": self.api_base, |
| "api_key": self.api_key, |
| "temperature": self.temperature, |
| "top_p": self.top_p, |
| "max_tokens": self.max_tokens, |
| "timeout": self.timeout, |
| "retries": self.retries, |
| "retry_delay": self.retry_delay, |
| "random_seed": self.random_seed, |
| "reasoning_effort": self.reasoning_effort, |
| } |
| self.update_model_params(shared_config) |
|
|
|
|
| @dataclass |
| class PromptConfig: |
| """Configuration for prompt generation""" |
|
|
| template_dir: Optional[str] = None |
| system_message: str = "system_message" |
| evaluator_system_message: str = "evaluator_system_message" |
|
|
| |
| num_top_programs: int = 3 |
| num_diverse_programs: int = 2 |
|
|
| |
| use_template_stochasticity: bool = True |
| template_variations: Dict[str, List[str]] = field(default_factory=dict) |
|
|
| |
| |
| use_meta_prompting: bool = False |
| meta_prompt_weight: float = 0.1 |
|
|
| |
| include_artifacts: bool = True |
| max_artifact_bytes: int = 20 * 1024 |
| artifact_security_filter: bool = True |
|
|
| |
| suggest_simplification_after_chars: Optional[int] = ( |
| 500 |
| ) |
| include_changes_under_chars: Optional[int] = ( |
| 100 |
| ) |
| concise_implementation_max_lines: Optional[int] = ( |
| 10 |
| ) |
| comprehensive_implementation_min_lines: Optional[int] = ( |
| 50 |
| ) |
|
|
| |
| code_length_threshold: Optional[int] = ( |
| None |
| ) |
|
|
|
|
| @dataclass |
| class DatabaseConfig: |
| """Configuration for the program database""" |
|
|
| |
| db_path: Optional[str] = None |
| in_memory: bool = True |
|
|
| |
| log_prompts: bool = True |
|
|
| |
| population_size: int = 1000 |
| archive_size: int = 100 |
| num_islands: int = 5 |
|
|
| |
| elite_selection_ratio: float = 0.1 |
| exploration_ratio: float = 0.2 |
| exploitation_ratio: float = 0.7 |
| |
| diversity_metric: str = "edit_distance" |
|
|
| |
| |
| |
| |
| |
| feature_dimensions: List[str] = field( |
| default_factory=lambda: ["complexity", "diversity"], |
| metadata={ |
| "help": "List of feature dimensions for MAP-Elites grid. " |
| "Built-in dimensions: 'complexity', 'diversity', 'score'. " |
| "Custom dimensions: Must match metric names from evaluator. " |
| "IMPORTANT: Evaluators must return raw continuous values for custom dimensions, " |
| "NOT pre-computed bin indices. OpenEvolve handles all scaling and binning internally." |
| }, |
| ) |
| feature_bins: Union[int, Dict[str, int]] = 10 |
| diversity_reference_size: int = 20 |
|
|
| |
| migration_interval: int = 50 |
| migration_rate: float = 0.1 |
|
|
| |
| random_seed: Optional[int] = 42 |
|
|
| |
| artifacts_base_path: Optional[str] = None |
| artifact_size_threshold: int = 32 * 1024 |
| cleanup_old_artifacts: bool = True |
| artifact_retention_days: int = 30 |
|
|
| novelty_llm: Optional["LLMInterface"] = None |
| embedding_model: Optional[str] = None |
| similarity_threshold: float = 0.99 |
|
|
|
|
| @dataclass |
| class EvaluatorConfig: |
| """Configuration for program evaluation""" |
|
|
| |
| timeout: int = 300 |
| max_retries: int = 3 |
|
|
| |
| |
| memory_limit_mb: Optional[int] = None |
| cpu_limit: Optional[float] = None |
|
|
| |
| cascade_evaluation: bool = True |
| cascade_thresholds: List[float] = field(default_factory=lambda: [0.5, 0.75, 0.9]) |
|
|
| |
| parallel_evaluations: int = 1 |
| |
| distributed: bool = False |
|
|
| |
| use_llm_feedback: bool = False |
| llm_feedback_weight: float = 0.1 |
|
|
| |
| enable_artifacts: bool = True |
| max_artifact_storage: int = 100 * 1024 * 1024 |
|
|
|
|
| @dataclass |
| class EvolutionTraceConfig: |
| """Configuration for evolution trace logging""" |
|
|
| enabled: bool = False |
| format: str = "jsonl" |
| include_code: bool = False |
| include_prompts: bool = True |
| output_path: Optional[str] = None |
| buffer_size: int = 10 |
| compress: bool = False |
|
|
|
|
| @dataclass |
| class Config: |
| """Master configuration for OpenEvolve""" |
|
|
| |
| max_iterations: int = 10000 |
| checkpoint_interval: int = 100 |
| log_level: str = "INFO" |
| log_dir: Optional[str] = None |
| random_seed: Optional[int] = 42 |
| language: str = None |
| file_suffix: str = ".py" |
|
|
| |
| llm: LLMConfig = field(default_factory=LLMConfig) |
| prompt: PromptConfig = field(default_factory=PromptConfig) |
| database: DatabaseConfig = field(default_factory=DatabaseConfig) |
| evaluator: EvaluatorConfig = field(default_factory=EvaluatorConfig) |
| evolution_trace: EvolutionTraceConfig = field(default_factory=EvolutionTraceConfig) |
|
|
| |
| diff_based_evolution: bool = True |
| max_code_length: int = 10000 |
| diff_pattern: str = r"<<<<<<< SEARCH\n(.*?)=======\n(.*?)>>>>>>> REPLACE" |
|
|
| |
| early_stopping_patience: Optional[int] = None |
| convergence_threshold: float = 0.001 |
| early_stopping_metric: str = "combined_score" |
|
|
| |
| max_tasks_per_child: Optional[int] = None |
|
|
| @classmethod |
| def from_yaml(cls, path: Union[str, Path]) -> "Config": |
| """Load configuration from a YAML file""" |
| with open(path, "r") as f: |
| config_dict = yaml.safe_load(f) |
| return cls.from_dict(config_dict) |
|
|
| @classmethod |
| def from_dict(cls, config_dict: Dict[str, Any]) -> "Config": |
| if "diff_pattern" in config_dict: |
| try: |
| re.compile(config_dict["diff_pattern"]) |
| except re.error as e: |
| raise ValueError(f"Invalid regex pattern in diff_pattern: {e}") |
|
|
| config: Config = dacite.from_dict( |
| data_class=cls, |
| data=config_dict, |
| config=dacite.Config(cast=[List, Union], forward_references={"LLMInterface": Any}), |
| ) |
|
|
| if config.database.random_seed is None and config.random_seed is not None: |
| config.database.random_seed = config.random_seed |
|
|
| return config |
|
|
| def to_dict(self) -> Dict[str, Any]: |
| return asdict(self) |
|
|
| def to_yaml(self, path: Union[str, Path]) -> None: |
| """Save configuration to a YAML file""" |
| with open(path, "w") as f: |
| yaml.dump(self.to_dict(), f, default_flow_style=False) |
|
|
|
|
| def load_config(config_path: Optional[Union[str, Path]] = None) -> Config: |
| """Load configuration from a YAML file or use defaults""" |
| if config_path and os.path.exists(config_path): |
| config = Config.from_yaml(config_path) |
| else: |
| config = Config() |
|
|
| |
| api_key = os.environ.get("OPENAI_API_KEY") |
| api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") |
|
|
| config.llm.update_model_params({"api_key": api_key, "api_base": api_base}) |
|
|
| |
| config.llm.update_model_params({"system_message": config.prompt.system_message}) |
|
|
| return config |
|
|