File size: 17,270 Bytes
5e4510c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
"""
High-level API for using OpenEvolve as a library
"""

import asyncio
import tempfile
import os
import uuid
import inspect
from typing import Union, Callable, Optional, List, Dict, Any, Tuple
from dataclasses import dataclass
from pathlib import Path

from openevolve.controller import OpenEvolve
from openevolve.config import Config, load_config, LLMModelConfig
from openevolve.database import Program


@dataclass
class EvolutionResult:
    """Result of an evolution run"""

    best_program: Optional[Program]
    best_score: float
    best_code: str
    metrics: Dict[str, Any]
    output_dir: Optional[str]

    def __repr__(self):
        return f"EvolutionResult(best_score={self.best_score:.4f})"


def run_evolution(
    initial_program: Union[str, Path, List[str]],
    evaluator: Union[str, Path, Callable],
    config: Union[str, Path, Config, None] = None,
    iterations: Optional[int] = None,
    output_dir: Optional[str] = None,
    cleanup: bool = True,
) -> EvolutionResult:
    """
    Run evolution with flexible inputs - the main library API

    Args:
        initial_program: Can be:
            - Path to a program file (str or Path)
            - Program code as a string
            - List of code lines
        evaluator: Can be:
            - Path to an evaluator file (str or Path)
            - Callable function that takes (program_path) and returns metrics dict
        config: Can be:
            - Path to config YAML file (str or Path)
            - Config object
            - None for defaults
        iterations: Number of iterations (overrides config)
        output_dir: Output directory (None for temp directory)
        cleanup: If True, clean up temp files after evolution

    Returns:
        EvolutionResult with best program and metrics

    Examples:
        # Using file paths (original way)
        result = run_evolution(
            'program.py',
            'evaluator.py'
        )

        # Using code strings
        result = run_evolution(
            initial_program='''
                # EVOLVE-BLOCK-START
                def solve(x):
                    return x * 2
                # EVOLVE-BLOCK-END
            ''',
            evaluator=lambda path: {"score": evaluate_program(path)},
            iterations=100
        )

        # Using a custom evaluator function
        def my_evaluator(program_path):
            # Run tests, benchmarks, etc.
            return {"score": 0.95, "runtime": 1.2}

        result = run_evolution(
            initial_program=generate_initial_code(),
            evaluator=my_evaluator
        )
    """
    return asyncio.run(
        _run_evolution_async(initial_program, evaluator, config, iterations, output_dir, cleanup)
    )


async def _run_evolution_async(
    initial_program: Union[str, Path, List[str]],
    evaluator: Union[str, Path, Callable],
    config: Union[str, Path, Config, None],
    iterations: Optional[int],
    output_dir: Optional[str],
    cleanup: bool,
) -> EvolutionResult:
    """Async implementation of run_evolution"""

    temp_dir = None
    temp_files = []

    try:
        # Handle configuration
        if config is None:
            config_obj = Config()
        elif isinstance(config, Config):
            config_obj = config
        else:
            config_obj = load_config(str(config))

        # Validate that LLM models are configured
        if not config_obj.llm.models:
            raise ValueError(
                "No LLM models configured. Please provide a config with LLM models, or set up "
                "your configuration with models. For example:\n\n"
                "from openevolve.config import Config, LLMModelConfig\n"
                "config = Config()\n"
                "config.llm.models = [LLMModelConfig(name='gpt-4', api_key='your-key')]\n"
                "result = run_evolution(program, evaluator, config=config)"
            )

        # Set up output directory
        if output_dir is None and cleanup:
            temp_dir = tempfile.mkdtemp(prefix="openevolve_")
            actual_output_dir = temp_dir
        else:
            actual_output_dir = output_dir or "openevolve_output"
            os.makedirs(actual_output_dir, exist_ok=True)

        # Process initial program
        program_path = _prepare_program(initial_program, temp_dir, temp_files)

        # Process evaluator
        evaluator_path = _prepare_evaluator(evaluator, temp_dir, temp_files)

        # Create and run controller
        controller = OpenEvolve(
            initial_program_path=program_path,
            evaluation_file=evaluator_path,
            config=config_obj,
            output_dir=actual_output_dir,
        )

        best_program = await controller.run(iterations=iterations)

        # Prepare result
        best_score = 0.0
        metrics = {}
        best_code = ""

        if best_program:
            best_code = best_program.code
            metrics = best_program.metrics or {}

            if "combined_score" in metrics:
                best_score = metrics["combined_score"]
            elif metrics:
                numeric_metrics = [v for v in metrics.values() if isinstance(v, (int, float))]
                if numeric_metrics:
                    best_score = sum(numeric_metrics) / len(numeric_metrics)

        return EvolutionResult(
            best_program=best_program,
            best_score=best_score,
            best_code=best_code,
            metrics=metrics,
            output_dir=actual_output_dir if not cleanup else None,
        )

    finally:
        # Cleanup temporary files if requested
        if cleanup:
            for temp_file in temp_files:
                try:
                    os.unlink(temp_file)
                except:
                    pass
            if temp_dir and os.path.exists(temp_dir):
                import shutil

                try:
                    shutil.rmtree(temp_dir)
                except:
                    pass


def _prepare_program(
    initial_program: Union[str, Path, List[str]], temp_dir: Optional[str], temp_files: List[str]
) -> str:
    """Convert program input to a file path"""

    # If already a file path, use it directly
    if isinstance(initial_program, (str, Path)):
        if os.path.exists(str(initial_program)):
            return str(initial_program)

    # Otherwise, treat as code and write to temp file
    if isinstance(initial_program, list):
        code = "\n".join(initial_program)
    else:
        code = str(initial_program)

    # Ensure code has evolution markers if it doesn't already
    if "EVOLVE-BLOCK-START" not in code:
        # Wrap entire code in evolution block
        code = f"""# EVOLVE-BLOCK-START
{code}
# EVOLVE-BLOCK-END"""

    # Write to temp file
    if temp_dir is None:
        temp_dir = tempfile.gettempdir()

    program_file = os.path.join(temp_dir, f"program_{uuid.uuid4().hex[:8]}.py")
    with open(program_file, "w") as f:
        f.write(code)
    temp_files.append(program_file)

    return program_file


def _prepare_evaluator(
    evaluator: Union[str, Path, Callable], temp_dir: Optional[str], temp_files: List[str]
) -> str:
    """Convert evaluator input to a file path"""

    # If already a file path, use it directly
    if isinstance(evaluator, (str, Path)):
        if os.path.exists(str(evaluator)):
            return str(evaluator)

    # If it's a callable, create a wrapper module
    if callable(evaluator):
        # Create a unique global name for this evaluator
        evaluator_id = f"_openevolve_evaluator_{uuid.uuid4().hex[:8]}"

        # Store in globals so the wrapper can find it
        globals()[evaluator_id] = evaluator

        evaluator_code = f"""
# Wrapper for user-provided evaluator function
import {__name__} as api_module

def evaluate(program_path):
    '''Wrapper for user-provided evaluator function'''
    user_evaluator = getattr(api_module, '{evaluator_id}')
    return user_evaluator(program_path)
"""
    else:
        # Treat as code string
        evaluator_code = str(evaluator)

        # Ensure it has an evaluate function
        if "def evaluate" not in evaluator_code:
            raise ValueError("Evaluator code must contain an 'evaluate(program_path)' function")

    # Write to temp file
    if temp_dir is None:
        temp_dir = tempfile.gettempdir()

    eval_file = os.path.join(temp_dir, f"evaluator_{uuid.uuid4().hex[:8]}.py")
    with open(eval_file, "w") as f:
        f.write(evaluator_code)
    temp_files.append(eval_file)

    return eval_file


# Additional helper functions for common use cases


def evolve_function(
    func: Callable, test_cases: List[Tuple[Any, Any]], iterations: int = 100, **kwargs
) -> EvolutionResult:
    """
    Evolve a Python function based on test cases

    Args:
        func: Initial function to evolve
        test_cases: List of (input, expected_output) tuples
        iterations: Number of evolution iterations
        **kwargs: Additional arguments for run_evolution

    Returns:
        EvolutionResult with optimized function

    Example:
        def initial_sort(arr):
            # Slow bubble sort
            for i in range(len(arr)):
                for j in range(len(arr)-1):
                    if arr[j] > arr[j+1]:
                        arr[j], arr[j+1] = arr[j+1], arr[j]
            return arr

        result = evolve_function(
            initial_sort,
            test_cases=[
                ([3, 1, 2], [1, 2, 3]),
                ([5, 2, 8, 1], [1, 2, 5, 8]),
            ],
            iterations=50
        )
        print(f"Optimized function score: {result.best_score}")
    """

    # Get function source code
    func_source = inspect.getsource(func)
    func_name = func.__name__

    # Ensure the function source has evolution markers
    if "EVOLVE-BLOCK-START" not in func_source:
        # Try to add markers around the function body
        lines = func_source.split("\n")
        func_def_line = next(i for i, line in enumerate(lines) if line.strip().startswith("def "))

        # Find the end of the function (simplified approach)
        indent = len(lines[func_def_line]) - len(lines[func_def_line].lstrip())
        func_end = len(lines)
        for i in range(func_def_line + 1, len(lines)):
            if lines[i].strip() and (len(lines[i]) - len(lines[i].lstrip())) <= indent:
                func_end = i
                break

        # Insert evolution markers
        lines.insert(func_def_line + 1, " " * (indent + 4) + "# EVOLVE-BLOCK-START")
        lines.insert(func_end + 1, " " * (indent + 4) + "# EVOLVE-BLOCK-END")
        func_source = "\n".join(lines)

    # Create evaluator that tests the function
    def evaluator(program_path):
        import importlib.util
        import sys

        # Load the evolved program
        spec = importlib.util.spec_from_file_location("evolved", program_path)
        if spec is None or spec.loader is None:
            return {"score": 0.0, "error": "Failed to load program"}

        module = importlib.util.module_from_spec(spec)

        try:
            spec.loader.exec_module(module)
        except Exception as e:
            return {"score": 0.0, "error": f"Failed to execute program: {str(e)}"}

        if not hasattr(module, func_name):
            return {"score": 0.0, "error": f"Function '{func_name}' not found"}

        evolved_func = getattr(module, func_name)
        correct = 0
        total = len(test_cases)
        errors = []

        for input_val, expected in test_cases:
            try:
                # Handle case where input is a list/mutable - make a copy
                if isinstance(input_val, list):
                    test_input = input_val.copy()
                else:
                    test_input = input_val

                result = evolved_func(test_input)
                if result == expected:
                    correct += 1
                else:
                    errors.append(f"Input {input_val}: expected {expected}, got {result}")
            except Exception as e:
                errors.append(f"Input {input_val}: {str(e)}")

        return {
            "score": correct / total,
            "test_pass_rate": correct / total,
            "tests_passed": correct,
            "total_tests": total,
            "errors": errors[:3],  # Limit error details
        }

    return run_evolution(
        initial_program=func_source, evaluator=evaluator, iterations=iterations, **kwargs
    )


def evolve_algorithm(
    algorithm_class: type, benchmark: Callable, iterations: int = 100, **kwargs
) -> EvolutionResult:
    """
    Evolve an algorithm class based on a benchmark

    Args:
        algorithm_class: Initial algorithm class to evolve
        benchmark: Function that takes an instance and returns metrics
        iterations: Number of evolution iterations
        **kwargs: Additional arguments for run_evolution

    Returns:
        EvolutionResult with optimized algorithm

    Example:
        class SortAlgorithm:
            def sort(self, arr):
                # Simple bubble sort
                return sorted(arr)  # placeholder

        def benchmark_sort(instance):
            import time
            test_data = [list(range(100, 0, -1))]  # Reverse sorted

            start = time.time()
            for data in test_data:
                result = instance.sort(data.copy())
                if result != sorted(data):
                    return {"score": 0.0}

            duration = time.time() - start
            return {
                "score": 1.0,
                "runtime": duration,
                "performance": 1.0 / (duration + 0.001)
            }

        result = evolve_algorithm(SortAlgorithm, benchmark_sort, iterations=50)
    """

    # Get class source code
    class_source = inspect.getsource(algorithm_class)

    # Ensure the class has evolution markers
    if "EVOLVE-BLOCK-START" not in class_source:
        lines = class_source.split("\n")
        # Find class definition
        class_def_line = next(
            i for i, line in enumerate(lines) if line.strip().startswith("class ")
        )

        # Add evolution markers around the class body
        indent = len(lines[class_def_line]) - len(lines[class_def_line].lstrip())
        lines.insert(class_def_line + 1, " " * (indent + 4) + "# EVOLVE-BLOCK-START")
        lines.append(" " * (indent + 4) + "# EVOLVE-BLOCK-END")
        class_source = "\n".join(lines)

    # Create evaluator
    def evaluator(program_path):
        import importlib.util

        # Load the evolved program
        spec = importlib.util.spec_from_file_location("evolved", program_path)
        if spec is None or spec.loader is None:
            return {"score": 0.0, "error": "Failed to load program"}

        module = importlib.util.module_from_spec(spec)

        try:
            spec.loader.exec_module(module)
        except Exception as e:
            return {"score": 0.0, "error": f"Failed to execute program: {str(e)}"}

        if not hasattr(module, algorithm_class.__name__):
            return {"score": 0.0, "error": f"Class '{algorithm_class.__name__}' not found"}

        AlgorithmClass = getattr(module, algorithm_class.__name__)

        try:
            instance = AlgorithmClass()
            metrics = benchmark(instance)
            return metrics if isinstance(metrics, dict) else {"score": metrics}
        except Exception as e:
            return {"score": 0.0, "error": str(e)}

    return run_evolution(
        initial_program=class_source, evaluator=evaluator, iterations=iterations, **kwargs
    )


def evolve_code(
    initial_code: str, evaluator: Callable[[str], Dict[str, Any]], iterations: int = 100, **kwargs
) -> EvolutionResult:
    """
    Evolve arbitrary code with a custom evaluator

    Args:
        initial_code: Initial code to evolve
        evaluator: Function that takes a program path and returns metrics
        iterations: Number of evolution iterations
        **kwargs: Additional arguments for run_evolution

    Returns:
        EvolutionResult with optimized code

    Example:
        initial_code = '''
        def fibonacci(n):
            if n <= 1:
                return n
            return fibonacci(n-1) + fibonacci(n-2)
        '''

        def eval_fib(program_path):
            # Evaluate fibonacci implementation
            import importlib.util
            import time

            spec = importlib.util.spec_from_file_location("fib", program_path)
            module = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(module)

            try:
                start = time.time()
                result = module.fibonacci(20)
                duration = time.time() - start

                correct = result == 6765
                return {
                    "score": 1.0 if correct else 0.0,
                    "runtime": duration,
                    "correctness": correct
                }
            except:
                return {"score": 0.0}

        result = evolve_code(initial_code, eval_fib, iterations=50)
    """
    return run_evolution(
        initial_program=initial_code, evaluator=evaluator, iterations=iterations, **kwargs
    )