File size: 9,373 Bytes
5e4510c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
"""
Evaluator for K-Module Pipeline Configuration Problem

This evaluator scores pipeline configurations based on how many modules
match the target configuration. The key property is that there's NO
gradient information - you only know the count of correct modules,
not WHICH ones are correct.

This creates a challenging landscape for iterative refinement but
allows evolutionary crossover to combine good "building blocks"
from different individuals.

Set RICH_FEEDBACK=1 to enable rich feedback mode, which tells you
exactly which modules are correct/incorrect. This demonstrates that
iterative refinement works well when feedback is attributable.
"""

import os
import sys
import time
import traceback
import importlib.util

# Rich feedback mode - when enabled, reveals which modules are correct
RICH_FEEDBACK = os.environ.get("RICH_FEEDBACK", "0") == "1"

# The correct solution (hidden from the optimizer)
# This represents the "optimal" pipeline configuration discovered through
# extensive testing/domain expertise
CORRECT_CONFIG = {
    'loader': 'csv_reader',
    'preprocess': 'normalize',
    'algorithm': 'quicksort',
    'formatter': 'json',
}

# Valid options for each module
VALID_OPTIONS = {
    'loader': ['csv_reader', 'json_reader', 'xml_reader', 'parquet_reader', 'sql_reader'],
    'preprocess': ['normalize', 'standardize', 'minmax', 'scale', 'none'],
    'algorithm': ['quicksort', 'mergesort', 'heapsort', 'bubblesort', 'insertion'],
    'formatter': ['json', 'xml', 'csv', 'yaml', 'protobuf'],
}

NUM_MODULES = len(CORRECT_CONFIG)


def evaluate(program_path: str) -> dict:
    """
    Evaluate a pipeline configuration program.

    Args:
        program_path: Path to the Python file containing configure_pipeline()

    Returns:
        dict with 'metrics' and optionally 'artifacts'
    """
    start_time = time.time()

    try:
        # Load and execute the program
        spec = importlib.util.spec_from_file_location("program", program_path)
        module = importlib.util.module_from_spec(spec)
        sys.modules["program"] = module
        spec.loader.exec_module(module)

        # Get the configuration
        if hasattr(module, 'run_pipeline'):
            config = module.run_pipeline()
        elif hasattr(module, 'configure_pipeline'):
            config = module.configure_pipeline()
        else:
            return _error_result("Program must define run_pipeline() or configure_pipeline()")

        # Validate the configuration
        validation_errors = validate_config(config)
        if validation_errors:
            return _validation_error_result(validation_errors)

        # Score the configuration
        correct_count, module_results = score_config(config)

        # Calculate metrics
        accuracy = correct_count / NUM_MODULES

        # The combined score rewards finding more correct modules
        # but gives NO information about which modules are correct
        combined_score = accuracy

        eval_time = time.time() - start_time

        # Build artifacts - provide feedback that helps evolution
        # but doesn't reveal which specific modules are wrong
        artifacts = build_artifacts(config, correct_count, module_results, eval_time)

        # Return metrics at top level for OpenEvolve compatibility
        return {
            "correct_modules": correct_count,
            "total_modules": NUM_MODULES,
            "accuracy": accuracy,
            "combined_score": combined_score,
            "eval_time": eval_time,
            "artifacts": artifacts,
        }

    except Exception as e:
        return _exception_result(e)


def validate_config(config: dict) -> list:
    """Validate that the configuration has valid values."""
    errors = []

    if not isinstance(config, dict):
        errors.append(f"Configuration must be a dict, got {type(config).__name__}")
        return errors

    # Check all required modules are present
    for module_name in CORRECT_CONFIG.keys():
        if module_name not in config:
            errors.append(f"Missing required module: '{module_name}'")
        elif config[module_name] not in VALID_OPTIONS[module_name]:
            errors.append(
                f"Invalid value for '{module_name}': '{config[module_name]}'. "
                f"Valid options: {VALID_OPTIONS[module_name]}"
            )

    return errors


def score_config(config: dict) -> tuple:
    """
    Score the configuration against the target.

    Returns:
        tuple: (correct_count, module_results dict)
    """
    correct_count = 0
    module_results = {}

    for module_name, correct_value in CORRECT_CONFIG.items():
        is_correct = config.get(module_name) == correct_value
        if is_correct:
            correct_count += 1
        module_results[module_name] = is_correct

    return correct_count, module_results


def build_artifacts(config: dict, correct_count: int, module_results: dict, eval_time: float) -> dict:
    """
    Build artifacts that provide useful feedback.

    In normal mode: Only reveals how many modules are correct, not which ones.
    In rich feedback mode (RICH_FEEDBACK=1): Reveals exactly which modules are correct/incorrect.
    """
    artifacts = {}

    # Configuration summary
    artifacts["configuration"] = str(config)

    # Rich feedback mode - reveals which modules are correct/incorrect
    if RICH_FEEDBACK:
        correct_modules = [m for m, is_correct in module_results.items() if is_correct]
        incorrect_modules = [m for m, is_correct in module_results.items() if not is_correct]

        artifacts["module_feedback"] = {
            "correct": correct_modules,
            "incorrect": incorrect_modules,
        }

        if incorrect_modules:
            hints = []
            for module in incorrect_modules:
                hints.append(f"'{module}' is WRONG - try a different option from {VALID_OPTIONS[module]}")
            artifacts["actionable_hints"] = hints
        else:
            artifacts["actionable_hints"] = ["All modules are correct!"]

    # Score feedback - tells you how many are correct, but not which ones
    if correct_count == NUM_MODULES:
        artifacts["status"] = "PERFECT! All modules correctly configured!"
        artifacts["suggestion"] = "Optimal configuration found."
    elif correct_count >= NUM_MODULES - 1:
        artifacts["status"] = f"Very close! {correct_count}/{NUM_MODULES} modules correct."
        artifacts["suggestion"] = "One module may need adjustment. Try variations."
    elif correct_count >= NUM_MODULES // 2:
        artifacts["status"] = f"Good progress: {correct_count}/{NUM_MODULES} modules correct."
        artifacts["suggestion"] = "Some modules are correct. Explore different combinations."
    else:
        artifacts["status"] = f"Needs improvement: {correct_count}/{NUM_MODULES} modules correct."
        artifacts["suggestion"] = "Try different options for each module. Consider the problem domain."

    # Hints about the problem structure (not the solution)
    artifacts["problem_hints"] = (
        "Each module choice is independent. "
        "The optimal loader processes the most common data format. "
        "The optimal preprocessing creates unit variance. "
        "The optimal algorithm has O(n log n) average case. "
        "The optimal formatter is widely used for APIs."
    )

    artifacts["search_space"] = f"{5**NUM_MODULES} possible combinations"
    artifacts["eval_time"] = f"{eval_time:.3f}s"

    return artifacts


def _error_result(message: str) -> dict:
    """Return an error result."""
    return {
        "metrics": {
            "correct_modules": 0,
            "total_modules": NUM_MODULES,
            "accuracy": 0.0,
            "combined_score": 0.0,
        },
        "artifacts": {
            "error": message,
            "status": "ERROR",
        },
    }


def _validation_error_result(errors: list) -> dict:
    """Return a validation error result."""
    return {
        "metrics": {
            "correct_modules": 0,
            "total_modules": NUM_MODULES,
            "accuracy": 0.0,
            "combined_score": 0.0,
        },
        "artifacts": {
            "validation_errors": "\n".join(errors),
            "status": "VALIDATION_ERROR",
            "suggestion": "Fix the configuration to use valid module options.",
        },
    }


def _exception_result(e: Exception) -> dict:
    """Return an exception result."""
    return {
        "metrics": {
            "correct_modules": 0,
            "total_modules": NUM_MODULES,
            "accuracy": 0.0,
            "combined_score": 0.0,
        },
        "artifacts": {
            "exception": str(e),
            "traceback": traceback.format_exc(),
            "status": "EXCEPTION",
        },
    }


# For standalone testing
if __name__ == "__main__":
    if len(sys.argv) > 1:
        result = evaluate(sys.argv[1])
        print(f"Metrics: {result['metrics']}")
        print(f"Artifacts: {result.get('artifacts', {})}")
    else:
        # Test with the initial program
        import os
        script_dir = os.path.dirname(os.path.abspath(__file__))
        initial_program = os.path.join(script_dir, "initial_program.py")
        result = evaluate(initial_program)
        print(f"Metrics: {result['metrics']}")
        print(f"Artifacts: {result.get('artifacts', {})}")