File size: 11,728 Bytes
5e4510c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
"""
Integration tests for artifacts functionality
"""
import asyncio
import os
import tempfile
import unittest
from unittest.mock import Mock, patch
from openevolve.config import Config, DatabaseConfig, EvaluatorConfig, PromptConfig
from openevolve.database import Program, ProgramDatabase
from openevolve.evaluation_result import EvaluationResult
from openevolve.evaluator import Evaluator
from openevolve.prompt.sampler import PromptSampler
class TestArtifactsIntegration(unittest.TestCase):
"""Test full integration of artifacts feature"""
def setUp(self):
# Set up event loop for async operations in tests
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
# Create temporary directory for database
self.temp_dir = tempfile.mkdtemp()
# Create evaluation file that can return EvaluationResult
self.eval_file = tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False)
self.eval_file.write(
"""
import traceback
from openevolve.evaluation_result import EvaluationResult
def evaluate(program_path):
try:
# Try to compile the program
with open(program_path, 'r') as f:
code = f.read()
compile(code, program_path, 'exec')
# If compilation succeeds, return good metrics
return EvaluationResult(
metrics={"compile_ok": 1.0, "score": 0.8},
artifacts={"stdout": "Compilation successful"}
)
except Exception as e:
# If compilation fails, capture the error
return EvaluationResult(
metrics={"compile_ok": 0.0, "score": 0.0},
artifacts={
"stderr": str(e),
"traceback": traceback.format_exc(),
"failure_stage": "compilation"
}
)
def evaluate_stage1(program_path):
# Basic compilation check
try:
with open(program_path, 'r') as f:
code = f.read()
compile(code, program_path, 'exec')
return {"stage1_passed": 1.0, "compile_ok": 1.0}
except Exception as e:
return EvaluationResult(
metrics={"stage1_passed": 0.0, "compile_ok": 0.0},
artifacts={
"stderr": str(e),
"failure_stage": "stage1_compilation"
}
)
"""
)
self.eval_file.close()
# Set up config
self.config = Config()
self.config.database.db_path = self.temp_dir
self.config.evaluator.cascade_evaluation = True
self.config.prompt.include_artifacts = True
# Initialize components
self.database = ProgramDatabase(self.config.database)
self.evaluator = Evaluator(self.config.evaluator, self.eval_file.name)
self.prompt_sampler = PromptSampler(self.config.prompt)
def tearDown(self):
os.unlink(self.eval_file.name)
# Clean up event loop if we created one
if hasattr(self, "loop") and self.loop and not self.loop.is_closed():
# Cancel any pending tasks
pending = asyncio.all_tasks(self.loop)
for task in pending:
task.cancel()
# Run the loop briefly to let cancellations process
if pending:
self.loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
def test_compile_failure_artifact_capture(self):
"""Test that compilation failures are captured as artifacts"""
async def run_test():
# Program with syntax error
bad_code = "print('hello'\n # Missing closing parenthesis"
program_id = "bad_program_1"
# Evaluate the program
metrics = await self.evaluator.evaluate_program(bad_code, program_id)
# Should get failure metrics
self.assertEqual(metrics.get("compile_ok"), 0.0)
# Should have pending artifacts
artifacts = self.evaluator.get_pending_artifacts(program_id)
self.assertIsNotNone(artifacts)
self.assertIn("stderr", artifacts)
# Note: stage1 evaluation doesn't include traceback, only stderr and failure_stage
self.assertIn("failure_stage", artifacts)
return artifacts
artifacts = asyncio.run(run_test())
# Verify artifact content - should have captured the compilation error
self.assertIn("stderr", artifacts)
self.assertTrue(len(artifacts["stderr"]) > 0, "stderr should not be empty")
self.assertIn("failure_stage", artifacts)
self.assertEqual(artifacts["failure_stage"], "stage1_compilation")
def test_end_to_end_artifact_flow(self):
"""Test full flow: eval failure -> artifact -> prompt -> next gen"""
async def run_test():
# 1. Create a program with compilation error
bad_code = "def broken_function(\n return 'incomplete'"
program_id = "flow_test_1"
# 2. Evaluate and get artifacts
metrics = await self.evaluator.evaluate_program(bad_code, program_id)
artifacts = self.evaluator.get_pending_artifacts(program_id)
# 3. Create program and store in database
program = Program(id=program_id, code=bad_code, language="python", metrics=metrics)
self.database.add(program)
# 4. Store artifacts
if artifacts:
self.database.store_artifacts(program_id, artifacts)
# 5. Retrieve artifacts and build prompt
stored_artifacts = self.database.get_artifacts(program_id)
prompt = self.prompt_sampler.build_prompt(
current_program=bad_code,
parent_program=bad_code,
program_metrics=metrics,
previous_programs=[],
top_programs=[],
program_artifacts=stored_artifacts,
)
return prompt, stored_artifacts
prompt, artifacts = asyncio.run(run_test())
# Verify artifacts appear in prompt
self.assertIn("stderr", prompt["user"].lower())
self.assertIn("Last Execution Output", prompt["user"])
# Verify artifacts were stored and retrieved correctly
self.assertIn("stderr", artifacts)
self.assertTrue(len(artifacts["stderr"]) > 0, "stderr should not be empty")
def test_cascade_evaluation_with_artifacts(self):
"""Test cascade evaluation captures artifacts at each stage"""
async def run_test():
# Program that will fail at stage 1
invalid_code = "invalid syntax here"
program_id = "cascade_test_1"
# Run cascade evaluation
result = await self.evaluator._cascade_evaluate(f"/tmp/test_program.py")
# Should be an EvaluationResult with artifacts
if isinstance(result, EvaluationResult):
return result
else:
# If it returns a dict, wrap it
return EvaluationResult.from_dict(result)
# Mock the actual file operations since we're testing the cascade logic
with patch("openevolve.evaluator.run_in_executor") as mock_executor:
# Mock stage1 to return an error with artifacts
mock_executor.return_value = EvaluationResult(
metrics={"stage1_passed": 0.0}, artifacts={"stderr": "Stage 1 compilation error"}
)
result = asyncio.run(run_test())
# Should have failure metrics and artifacts
self.assertEqual(result.metrics.get("stage1_passed"), 0.0)
self.assertIn("stderr", result.artifacts)
def test_artifacts_disabled_integration(self):
"""Test that the full system works with artifacts disabled"""
with patch.dict(os.environ, {"ENABLE_ARTIFACTS": "false"}):
async def run_test():
# Program with error
bad_code = "invalid syntax"
program_id = "disabled_test_1"
# Evaluate
metrics = await self.evaluator.evaluate_program(bad_code, program_id)
# Should not have pending artifacts when disabled
artifacts = self.evaluator.get_pending_artifacts(program_id)
return metrics, artifacts
metrics, artifacts = asyncio.run(run_test())
# Should still get metrics but no artifacts
self.assertIsInstance(metrics, dict)
self.assertIsNone(artifacts)
def test_successful_evaluation_with_artifacts(self):
"""Test that successful evaluations can also have artifacts"""
async def run_test():
# Valid Python code
good_code = "print('Hello, world!')"
program_id = "success_test_1"
# Evaluate
metrics = await self.evaluator.evaluate_program(good_code, program_id)
artifacts = self.evaluator.get_pending_artifacts(program_id)
return metrics, artifacts
metrics, artifacts = asyncio.run(run_test())
# Should get successful metrics
self.assertEqual(metrics.get("compile_ok"), 1.0)
# Should have artifacts from successful compilation
if artifacts:
self.assertIn("stdout", artifacts)
self.assertIn("successful", artifacts["stdout"].lower())
class TestArtifactsPersistence(unittest.TestCase):
"""Test that artifacts persist correctly across save/load cycles"""
def setUp(self):
# Set up event loop for async operations in tests
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.temp_dir = tempfile.mkdtemp()
config = DatabaseConfig(db_path=self.temp_dir)
self.database = ProgramDatabase(config)
def tearDown(self):
# Clean up event loop if we created one
if hasattr(self, "loop") and self.loop and not self.loop.is_closed():
# Cancel any pending tasks
pending = asyncio.all_tasks(self.loop)
for task in pending:
task.cancel()
# Run the loop briefly to let cancellations process
if pending:
self.loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
def test_save_load_artifacts(self):
"""Test that artifacts survive database save/load cycle"""
# Create program with artifacts
program = Program(id="persist_test_1", code="print('test')", metrics={"score": 0.8})
artifacts = {
"stderr": "error message",
"stdout": "output message",
"large_log": "x" * (50 * 1024), # Large artifact
}
# Add program and artifacts
self.database.add(program)
self.database.store_artifacts(program.id, artifacts)
# Save database
self.database.save()
# Create new database instance and load
new_database = ProgramDatabase(DatabaseConfig(db_path=self.temp_dir))
new_database.load(self.temp_dir)
# Check that artifacts are preserved
loaded_artifacts = new_database.get_artifacts(program.id)
self.assertEqual(loaded_artifacts["stderr"], artifacts["stderr"])
self.assertEqual(loaded_artifacts["stdout"], artifacts["stdout"])
self.assertEqual(loaded_artifacts["large_log"], artifacts["large_log"])
if __name__ == "__main__":
unittest.main()
|