File size: 12,436 Bytes
5e4510c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
"""
Tests for cascade evaluation validation functionality in openevolve.evaluator
"""

import unittest
import tempfile
import os
from unittest.mock import patch, MagicMock
from openevolve.config import Config
from openevolve.evaluator import Evaluator
from openevolve.evaluation_result import EvaluationResult


class TestCascadeValidation(unittest.IsolatedAsyncioTestCase):
    """Tests for cascade evaluation configuration validation"""

    def setUp(self):
        """Set up test evaluator with cascade validation"""
        self.config = Config()

        # Create temporary evaluator files for testing
        self.temp_dir = tempfile.mkdtemp()

    def tearDown(self):
        """Clean up temporary files"""
        # Clean up temp files more safely
        import shutil

        shutil.rmtree(self.temp_dir, ignore_errors=True)

    def _create_evaluator_file(self, filename: str, content: str) -> str:
        """Helper to create temporary evaluator file"""
        file_path = os.path.join(self.temp_dir, filename)
        with open(file_path, "w") as f:
            f.write(content)
        return file_path

    def test_cascade_validation_with_valid_evaluator(self):
        """Test cascade validation with evaluator that has cascade functions"""
        # Create evaluator with cascade functions
        evaluator_content = """
def evaluate_stage1(program_path):
    return {"stage1_score": 0.5}

def evaluate_stage2(program_path):
    return {"stage2_score": 0.7}

def evaluate_stage3(program_path):
    return {"stage3_score": 0.9}

def evaluate(program_path):
    return {"final_score": 1.0}
"""
        evaluator_path = self._create_evaluator_file("valid_cascade.py", evaluator_content)

        # Configure for cascade evaluation
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = evaluator_path

        # Should not raise warnings for valid cascade evaluator
        with patch("openevolve.evaluator.logger") as mock_logger:
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

            # Should not have called warning
            mock_logger.warning.assert_not_called()

    def test_cascade_validation_warning_for_missing_functions(self):
        """Test cascade validation warns when cascade functions are missing"""
        # Create evaluator without cascade functions
        evaluator_content = """
def evaluate(program_path):
    return {"score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("no_cascade.py", evaluator_content)

        # Configure for cascade evaluation
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = evaluator_path

        # Should warn about missing cascade functions
        with patch("openevolve.evaluator.logger") as mock_logger:
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

            # Should have warned about missing stage functions
            mock_logger.warning.assert_called()
            warning_call = mock_logger.warning.call_args[0][0]
            self.assertIn("cascade_evaluation: true", warning_call)
            self.assertIn("evaluate_stage1", warning_call)

    def test_cascade_validation_partial_functions(self):
        """Test cascade validation with only some cascade functions"""
        # Create evaluator with only stage1
        evaluator_content = """
def evaluate_stage1(program_path):
    return {"stage1_score": 0.5}

def evaluate(program_path):
    return {"score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("partial_cascade.py", evaluator_content)

        # Configure for cascade evaluation
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = evaluator_path

        # Should warn about missing additional stages
        with patch("openevolve.evaluator.logger") as mock_logger:
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

            # Should warn about missing stage2/stage3
            mock_logger.warning.assert_called_once()
            warning_call = mock_logger.warning.call_args[0][0]
            self.assertIn(
                "defines 'evaluate_stage1' but no additional cascade stages", warning_call
            )

    def test_no_cascade_validation_when_disabled(self):
        """Test no validation when cascade evaluation is disabled"""
        # Create evaluator without cascade functions
        evaluator_content = """
def evaluate(program_path):
    return {"score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("no_cascade.py", evaluator_content)

        # Configure WITHOUT cascade evaluation
        self.config.evaluator.cascade_evaluation = False
        self.config.evaluator.evaluation_file = evaluator_path

        # Should not perform validation or warn
        with patch("openevolve.evaluator.logger") as mock_logger:
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

            # Should not warn when cascade evaluation is disabled
            mock_logger.warning.assert_not_called()

    async def test_direct_evaluate_supports_evaluation_result(self):
        """Test that _direct_evaluate supports EvaluationResult returns"""
        # Create evaluator that returns EvaluationResult
        evaluator_content = """
from openevolve.evaluation_result import EvaluationResult

def evaluate(program_path):
    return EvaluationResult(
        metrics={"score": 0.8, "accuracy": 0.9},
        artifacts={"debug_info": "test data"}
    )
"""
        evaluator_path = self._create_evaluator_file("result_evaluator.py", evaluator_content)

        self.config.evaluator.cascade_evaluation = False
        self.config.evaluator.evaluation_file = evaluator_path
        self.config.evaluator.timeout = 10

        evaluator = Evaluator(self.config.evaluator, evaluator_path)

        # Create a dummy program file
        program_path = self._create_evaluator_file("test_program.py", "def test(): pass")

        # Mock the evaluation function
        def mock_evaluate(path):
            return EvaluationResult(
                metrics={"score": 0.8, "accuracy": 0.9}, artifacts={"debug_info": "test data"}
            )

        evaluator.evaluate_function = mock_evaluate

        # Should handle EvaluationResult without issues
        result = await evaluator._direct_evaluate(program_path)

        # Should return the EvaluationResult as-is
        self.assertIsInstance(result, EvaluationResult)
        self.assertEqual(result.metrics["score"], 0.8)
        self.assertEqual(result.artifacts["debug_info"], "test data")

    async def test_direct_evaluate_supports_dict_result(self):
        """Test that _direct_evaluate still supports dict returns"""
        # Create evaluator that returns dict
        evaluator_content = """
def evaluate(program_path):
    return {"score": 0.7, "performance": 0.85}
"""
        evaluator_path = self._create_evaluator_file("dict_evaluator.py", evaluator_content)

        self.config.evaluator.cascade_evaluation = False
        self.config.evaluator.evaluation_file = evaluator_path
        self.config.evaluator.timeout = 10

        evaluator = Evaluator(self.config.evaluator, evaluator_path)

        # Create a dummy program file
        program_path = self._create_evaluator_file("test_program.py", "def test(): pass")

        # Mock the evaluation function directly
        def mock_evaluate(path):
            return {"score": 0.7, "performance": 0.85}

        evaluator.evaluate_function = mock_evaluate

        # Should handle dict result without issues
        result = await evaluator._direct_evaluate(program_path)

        # Should return the dict as-is
        self.assertIsInstance(result, dict)
        self.assertEqual(result["score"], 0.7)
        self.assertEqual(result["performance"], 0.85)

    def test_cascade_validation_with_class_based_evaluator(self):
        """Test cascade validation with class-based evaluator"""
        # Create class-based evaluator with all stages
        evaluator_content = """
class Evaluator:
    def evaluate_stage1(self, program_path):
        return {"stage1_score": 0.5}
    
    def evaluate_stage2(self, program_path):
        return {"stage2_score": 0.7}
    
    def evaluate(self, program_path):
        return {"score": 0.5}

# Module-level functions (what validation looks for)
def evaluate_stage1(program_path):
    evaluator = Evaluator()
    return evaluator.evaluate_stage1(program_path)

def evaluate_stage2(program_path):
    evaluator = Evaluator()
    return evaluator.evaluate_stage2(program_path)

def evaluate(program_path):
    evaluator = Evaluator()
    return evaluator.evaluate(program_path)
"""
        evaluator_path = self._create_evaluator_file("class_cascade.py", evaluator_content)

        # Configure for cascade evaluation
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = evaluator_path

        # Should not warn since module-level functions exist
        with patch("openevolve.evaluator.logger") as mock_logger:
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

            mock_logger.warning.assert_not_called()

    def test_cascade_validation_with_syntax_error(self):
        """Test cascade validation handles syntax errors gracefully"""
        # Create evaluator with syntax error
        evaluator_content = """
def evaluate_stage1(program_path)  # Missing colon
    return {"stage1_score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("syntax_error.py", evaluator_content)

        # Configure for cascade evaluation
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = evaluator_path

        # Should raise an error due to syntax error
        with self.assertRaises(Exception):  # Could be SyntaxError or other import error
            evaluator = Evaluator(self.config.evaluator, evaluator_path)

    def test_cascade_validation_nonexistent_file(self):
        """Test cascade validation with nonexistent evaluator file"""
        # Configure with nonexistent file
        nonexistent_path = "/nonexistent/path.py"
        self.config.evaluator.cascade_evaluation = True
        self.config.evaluator.evaluation_file = nonexistent_path

        # Should raise ValueError for missing file
        with self.assertRaises(ValueError) as context:
            evaluator = Evaluator(self.config.evaluator, nonexistent_path)

        self.assertIn("not found", str(context.exception))

    def test_process_evaluation_result_with_artifacts(self):
        """Test that _process_evaluation_result handles artifacts correctly"""
        evaluator_content = """
def evaluate(program_path):
    return {"score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("dummy.py", evaluator_content)

        self.config.evaluator.cascade_evaluation = False  # Disable cascade to avoid warnings
        self.config.evaluator.evaluation_file = evaluator_path
        evaluator = Evaluator(self.config.evaluator, evaluator_path)

        # Test with EvaluationResult containing artifacts
        eval_result = EvaluationResult(
            metrics={"score": 0.9}, artifacts={"log": "test log", "data": [1, 2, 3]}
        )

        result = evaluator._process_evaluation_result(eval_result)

        self.assertEqual(result.metrics, {"score": 0.9})
        self.assertEqual(result.artifacts, {"log": "test log", "data": [1, 2, 3]})

    def test_process_evaluation_result_with_dict(self):
        """Test that _process_evaluation_result handles dict results correctly"""
        evaluator_content = """
def evaluate(program_path):
    return {"score": 0.5}
"""
        evaluator_path = self._create_evaluator_file("dummy.py", evaluator_content)

        self.config.evaluator.cascade_evaluation = False  # Disable cascade to avoid warnings
        self.config.evaluator.evaluation_file = evaluator_path
        evaluator = Evaluator(self.config.evaluator, evaluator_path)

        # Test with dict result
        dict_result = {"score": 0.7, "accuracy": 0.8}

        result = evaluator._process_evaluation_result(dict_result)

        self.assertEqual(result.metrics, {"score": 0.7, "accuracy": 0.8})
        self.assertEqual(result.artifacts, {})


if __name__ == "__main__":
    unittest.main()