| """ |
| Tests for iteration counting and checkpoint behavior |
| """ |
|
|
| import asyncio |
| import os |
| import tempfile |
| import unittest |
| from unittest.mock import Mock, patch, MagicMock |
|
|
| |
| os.environ["OPENAI_API_KEY"] = "test" |
|
|
| from openevolve.config import Config |
| from openevolve.controller import OpenEvolve |
| from openevolve.database import Program, ProgramDatabase |
|
|
|
|
| class TestIterationCounting(unittest.TestCase): |
| """Tests for correct iteration counting behavior""" |
|
|
| def setUp(self): |
| """Set up test environment""" |
| self.test_dir = tempfile.mkdtemp() |
|
|
| |
| self.program_content = """# EVOLVE-BLOCK-START |
| def compute(x): |
| return x * 2 |
| # EVOLVE-BLOCK-END |
| """ |
| self.program_file = os.path.join(self.test_dir, "test_program.py") |
| with open(self.program_file, "w") as f: |
| f.write(self.program_content) |
|
|
| |
| self.eval_content = """ |
| def evaluate(program_path): |
| return {"score": 0.5, "performance": 0.6} |
| """ |
| self.eval_file = os.path.join(self.test_dir, "evaluator.py") |
| with open(self.eval_file, "w") as f: |
| f.write(self.eval_content) |
|
|
| def tearDown(self): |
| """Clean up test environment""" |
| import shutil |
|
|
| shutil.rmtree(self.test_dir, ignore_errors=True) |
|
|
| def test_fresh_start_iteration_counting(self): |
| """Test that fresh start correctly handles iteration 0 as special""" |
| |
| config = Config() |
| config.max_iterations = 20 |
| config.checkpoint_interval = 10 |
|
|
| |
| start_iteration = 0 |
| should_add_initial = True |
|
|
| |
| evolution_start = start_iteration |
| evolution_iterations = config.max_iterations |
|
|
| if should_add_initial and start_iteration == 0: |
| evolution_start = 1 |
|
|
| |
| self.assertEqual(evolution_start, 1, "Evolution should start at iteration 1") |
| self.assertEqual(evolution_iterations, 20, "Should run 20 evolution iterations") |
|
|
| |
| total_iterations = evolution_start + evolution_iterations |
| self.assertEqual(total_iterations, 21, "Total range should be 21 (1 through 20)") |
|
|
| |
| expected_checkpoints = [] |
| for i in range(evolution_start, total_iterations): |
| if i > 0 and i % config.checkpoint_interval == 0: |
| expected_checkpoints.append(i) |
|
|
| self.assertEqual(expected_checkpoints, [10, 20], "Checkpoints should be at 10 and 20") |
|
|
| def test_resume_iteration_counting(self): |
| """Test that resume correctly continues from checkpoint""" |
| config = Config() |
| config.max_iterations = 10 |
| config.checkpoint_interval = 10 |
|
|
| |
| start_iteration = 11 |
| should_add_initial = False |
|
|
| |
| evolution_start = start_iteration |
| evolution_iterations = config.max_iterations |
|
|
| if should_add_initial and start_iteration == 0: |
| evolution_start = 1 |
|
|
| |
| self.assertEqual(evolution_start, 11, "Evolution should continue from iteration 11") |
| self.assertEqual(evolution_iterations, 10, "Should run 10 more iterations") |
|
|
| |
| total_iterations = evolution_start + evolution_iterations |
| self.assertEqual(total_iterations, 21, "Should run through iteration 20") |
|
|
| |
| expected_checkpoints = [] |
| for i in range(evolution_start, total_iterations): |
| if i > 0 and i % config.checkpoint_interval == 0: |
| expected_checkpoints.append(i) |
|
|
| self.assertEqual(expected_checkpoints, [20], "Should checkpoint at 20") |
|
|
| def test_checkpoint_boundary_conditions(self): |
| """Test checkpoint behavior at various boundaries""" |
| test_cases = [ |
| |
| (1, 100, 10, list(range(10, 101, 10))), |
| (1, 99, 10, list(range(10, 100, 10))), |
| (1, 101, 10, list(range(10, 101, 10))), |
| (0, 20, 5, [5, 10, 15, 20]), |
| ] |
|
|
| for start, max_iter, interval, expected in test_cases: |
| |
| evolution_start = start |
| if start == 0: |
| evolution_start = 1 |
|
|
| total = evolution_start + max_iter |
|
|
| checkpoints = [] |
| for i in range(evolution_start, total): |
| if i > 0 and i % interval == 0: |
| checkpoints.append(i) |
|
|
| self.assertEqual( |
| checkpoints, |
| expected, |
| f"Failed for start={start}, max={max_iter}, interval={interval}", |
| ) |
|
|
| def test_controller_iteration_behavior(self): |
| """Test actual controller behavior with iteration counting - requires optillm server""" |
| |
| try: |
| import requests |
| response = requests.get("http://localhost:8000/health", timeout=2) |
| if response.status_code != 200: |
| self.skipTest("optillm server not available at localhost:8000") |
| except: |
| self.skipTest("optillm server not available at localhost:8000") |
| |
| async def async_test(): |
| from openevolve.config import LLMModelConfig |
| |
| config = Config() |
| config.max_iterations = 8 |
| config.checkpoint_interval = 4 |
| config.database.in_memory = True |
| config.evaluator.parallel_evaluations = 1 |
| config.evaluator.timeout = 30 |
|
|
| |
| config.llm.api_base = "http://localhost:8000/v1" |
| config.llm.models = [ |
| LLMModelConfig( |
| name="google/gemma-3-270m-it", |
| api_key="optillm", |
| api_base="http://localhost:8000/v1", |
| weight=1.0 |
| ) |
| ] |
|
|
| controller = OpenEvolve( |
| initial_program_path=self.program_file, |
| evaluation_file=self.eval_file, |
| config=config, |
| output_dir=self.test_dir, |
| ) |
|
|
| |
| checkpoint_calls = [] |
| original_save = controller._save_checkpoint |
| controller._save_checkpoint = lambda i: checkpoint_calls.append(i) or original_save(i) |
|
|
| |
| await controller.run(iterations=8) |
|
|
| |
| print(f"Checkpoint calls: {checkpoint_calls}") |
| print(f"Total programs: {len(controller.database.programs)}") |
|
|
| |
| self.assertGreaterEqual( |
| len(controller.database.programs), |
| 1, |
| "Should have at least the initial program", |
| ) |
|
|
| |
| if len(controller.database.programs) > 1: |
| |
| print("Evolution succeeded - verifying checkpoint behavior") |
| |
| expected_checkpoints = [4, 8] |
| successful_checkpoints = [cp for cp in expected_checkpoints if cp in checkpoint_calls] |
| |
| if 8 in checkpoint_calls: |
| print("Final checkpoint found as expected") |
|
|
| |
| asyncio.run(async_test()) |
|
|
|
|
| if __name__ == "__main__": |
| |
| suite = unittest.TestLoader().loadTestsFromTestCase(TestIterationCounting) |
| runner = unittest.TextTestRunner(verbosity=2) |
| result = runner.run(suite) |
|
|
| |
| async def run_async_test(): |
| test = TestIterationCounting() |
| test.setUp() |
| try: |
| await test.test_controller_iteration_behavior() |
| print("✓ test_controller_iteration_behavior passed") |
| except Exception as e: |
| print(f"✗ test_controller_iteration_behavior failed: {e}") |
| finally: |
| test.tearDown() |
|
|
| asyncio.run(run_async_test()) |
|
|