| |
| |
|
|
| |
| max_iterations: 100 |
| checkpoint_interval: 10 |
| log_level: "INFO" |
| diff_based_evolution: false |
| max_code_length: 10000 |
| language: "text" |
|
|
| |
| llm: |
| api_base: "https://openrouter.ai/api/v1" |
| models: |
| - name: "qwen/qwen3-8b" |
| weight: 1.0 |
| |
| temperature: 0.8 |
| max_tokens: 4096 |
| timeout: 60 |
| retries: 3 |
|
|
| |
| prompt: |
| template_dir: "templates" |
| num_top_programs: 5 |
| num_diverse_programs: 3 |
| include_artifacts: true |
| |
| system_message: | |
| You are an expert at creating effective prompts for language models. |
| Your goal is to evolve prompts that maximize accuracy on the given task. |
| |
| When creating new prompts: |
| 1. Build on successful patterns from the examples |
| 2. Be creative but maintain clarity |
| 3. Consider different reasoning strategies (direct, step-by-step, few-shot) |
| 4. Optimize for the specific task requirements |
|
|
| |
| database: |
| population_size: 50 |
| archive_size: 500 |
| num_islands: 4 |
| |
| feature_dimensions: ["prompt_length", "reasoning_strategy"] |
| feature_bins: 10 |
| |
| elite_selection_ratio: 0.4 |
| exploration_ratio: 0.3 |
| exploitation_ratio: 0.3 |
| |
| migration_interval: 20 |
| migration_rate: 0.1 |
|
|
| |
| evaluator: |
| timeout: 1800 |
| max_retries: 3 |
| parallel_evaluations: 4 |
| cascade_evaluation: true |
| cascade_thresholds: [0.9] |
| |
| |
| use_llm_feedback: true |
| llm_feedback_weight: 0.2 |