| | """ |
| | Evaluator for R robust regression example |
| | """ |
| |
|
| | import asyncio |
| | import json |
| | import os |
| | import subprocess |
| | import tempfile |
| | import time |
| | from pathlib import Path |
| | from typing import Dict, Any |
| |
|
| | import numpy as np |
| |
|
| | from openevolve.evaluation_result import EvaluationResult |
| |
|
| |
|
| | async def evaluate(program_path: str) -> EvaluationResult: |
| | """ |
| | Evaluate an R program implementing robust regression. |
| | |
| | Tests the program on synthetic data with outliers to measure: |
| | - Accuracy (MSE, MAE, R-squared) |
| | - Robustness to outliers |
| | - Computational efficiency |
| | """ |
| | try: |
| | |
| | test_cases = [ |
| | generate_regression_data(n_samples=100, n_features=3, outlier_fraction=0.0, noise=0.1), |
| | generate_regression_data(n_samples=100, n_features=3, outlier_fraction=0.1, noise=0.1), |
| | generate_regression_data(n_samples=100, n_features=3, outlier_fraction=0.2, noise=0.1), |
| | generate_regression_data(n_samples=200, n_features=5, outlier_fraction=0.15, noise=0.2), |
| | ] |
| |
|
| | total_score = 0 |
| | total_mse = 0 |
| | total_mae = 0 |
| | total_medae = 0 |
| | total_r_squared = 0 |
| | total_outlier_robustness = 0 |
| | total_time = 0 |
| |
|
| | artifacts = {"test_results": []} |
| |
|
| | for i, (X, y, true_coeffs) in enumerate(test_cases): |
| | |
| | with tempfile.NamedTemporaryFile(mode="w", suffix=".r", delete=False) as f: |
| | f.write( |
| | f""" |
| | # Source the program |
| | source("{program_path}") |
| | |
| | # Load test data |
| | X <- as.matrix(read.csv("{X}", header=FALSE)) |
| | y <- as.vector(as.matrix(read.csv("{y}", header=FALSE))) |
| | |
| | # Time the execution |
| | start_time <- Sys.time() |
| | metrics <- main() |
| | end_time <- Sys.time() |
| | exec_time <- as.numeric(end_time - start_time, units="secs") |
| | |
| | # Add execution time |
| | metrics$execution_time <- exec_time |
| | |
| | # Save results |
| | write(jsonlite::toJSON(metrics, auto_unbox=TRUE), "results.json") |
| | """ |
| | ) |
| | test_script = f.name |
| |
|
| | |
| | X_file = tempfile.NamedTemporaryFile(mode="w", suffix=".csv", delete=False) |
| | y_file = tempfile.NamedTemporaryFile(mode="w", suffix=".csv", delete=False) |
| | np.savetxt(X_file.name, X, delimiter=",", fmt="%.6f") |
| | np.savetxt(y_file.name, y, delimiter=",", fmt="%.6f") |
| | X_file.close() |
| | y_file.close() |
| |
|
| | |
| | try: |
| | result = subprocess.run( |
| | ["Rscript", test_script], |
| | capture_output=True, |
| | text=True, |
| | timeout=30, |
| | cwd=os.path.dirname(test_script), |
| | ) |
| |
|
| | if result.returncode != 0: |
| | artifacts["test_results"].append( |
| | {"test_case": i, "error": "R execution failed", "stderr": result.stderr} |
| | ) |
| | continue |
| |
|
| | |
| | results_path = os.path.join(os.path.dirname(test_script), "results.json") |
| | if not os.path.exists(results_path): |
| | artifacts["test_results"].append( |
| | {"test_case": i, "error": "No results file produced"} |
| | ) |
| | continue |
| |
|
| | with open(results_path, "r") as f: |
| | metrics = json.load(f) |
| |
|
| | |
| | outlier_fraction = [0.0, 0.1, 0.2, 0.15][i] |
| | if outlier_fraction > 0: |
| | |
| | case_score = ( |
| | 0.2 * (1 - min(metrics.get("mse", 1), 1)) |
| | + 0.3 * (1 - min(metrics.get("medae", 1), 1)) |
| | + 0.4 * metrics.get("outlier_robustness", 0) |
| | + 0.1 * max(0, metrics.get("r_squared", 0)) |
| | ) |
| | else: |
| | |
| | case_score = ( |
| | 0.4 * (1 - min(metrics.get("mse", 1), 1)) |
| | + 0.3 * (1 - min(metrics.get("mae", 1), 1)) |
| | + 0.2 * max(0, metrics.get("r_squared", 0)) |
| | + 0.1 * metrics.get("outlier_robustness", 0) |
| | ) |
| |
|
| | total_score += case_score |
| | total_mse += metrics.get("mse", 1) |
| | total_mae += metrics.get("mae", 1) |
| | total_medae += metrics.get("medae", 1) |
| | total_r_squared += max(0, metrics.get("r_squared", 0)) |
| | total_outlier_robustness += metrics.get("outlier_robustness", 0) |
| | total_time += metrics.get("execution_time", 1) |
| |
|
| | artifacts["test_results"].append( |
| | { |
| | "test_case": i, |
| | "outlier_fraction": outlier_fraction, |
| | "metrics": metrics, |
| | "case_score": case_score, |
| | } |
| | ) |
| |
|
| | except subprocess.TimeoutExpired: |
| | artifacts["test_results"].append({"test_case": i, "error": "Timeout"}) |
| | except Exception as e: |
| | artifacts["test_results"].append({"test_case": i, "error": str(e)}) |
| | finally: |
| | |
| | os.unlink(test_script) |
| | os.unlink(X_file.name) |
| | os.unlink(y_file.name) |
| | if os.path.exists(os.path.join(os.path.dirname(test_script), "results.json")): |
| | os.unlink(os.path.join(os.path.dirname(test_script), "results.json")) |
| |
|
| | |
| | n_successful = len([r for r in artifacts["test_results"] if "error" not in r]) |
| | if n_successful == 0: |
| | return EvaluationResult( |
| | metrics={ |
| | "score": 0.0, |
| | "mse": float("inf"), |
| | "mae": float("inf"), |
| | "medae": float("inf"), |
| | "r_squared": 0.0, |
| | "outlier_robustness": 0.0, |
| | "execution_time": float("inf"), |
| | }, |
| | artifacts=artifacts, |
| | ) |
| |
|
| | avg_score = total_score / n_successful |
| | avg_mse = total_mse / n_successful |
| | avg_mae = total_mae / n_successful |
| | avg_medae = total_medae / n_successful |
| | avg_r_squared = total_r_squared / n_successful |
| | avg_outlier_robustness = total_outlier_robustness / n_successful |
| | avg_time = total_time / n_successful |
| |
|
| | |
| | efficiency_bonus = max(0, 1 - avg_time) * 0.1 |
| | final_score = min(1.0, avg_score + efficiency_bonus) |
| |
|
| | return EvaluationResult( |
| | metrics={ |
| | "score": final_score, |
| | "mse": avg_mse, |
| | "mae": avg_mae, |
| | "medae": avg_medae, |
| | "r_squared": avg_r_squared, |
| | "outlier_robustness": avg_outlier_robustness, |
| | "execution_time": avg_time, |
| | }, |
| | artifacts=artifacts, |
| | ) |
| |
|
| | except Exception as e: |
| | return EvaluationResult( |
| | metrics={ |
| | "score": 0.0, |
| | "mse": float("inf"), |
| | "mae": float("inf"), |
| | "medae": float("inf"), |
| | "r_squared": 0.0, |
| | "outlier_robustness": 0.0, |
| | "execution_time": float("inf"), |
| | }, |
| | artifacts={"error": str(e), "type": "evaluation_error"}, |
| | ) |
| |
|
| |
|
| | def generate_regression_data(n_samples=100, n_features=3, outlier_fraction=0.1, noise=0.1): |
| | """Generate synthetic regression data with outliers.""" |
| | np.random.seed(42) |
| |
|
| | |
| | X = np.random.randn(n_samples, n_features) |
| |
|
| | |
| | true_coeffs = np.random.randn(n_features + 1) |
| |
|
| | |
| | y = true_coeffs[0] + X @ true_coeffs[1:] + noise * np.random.randn(n_samples) |
| |
|
| | |
| | n_outliers = int(n_samples * outlier_fraction) |
| | if n_outliers > 0: |
| | outlier_indices = np.random.choice(n_samples, n_outliers, replace=False) |
| | |
| | y[outlier_indices] += np.random.choice([-1, 1], n_outliers) * np.random.uniform( |
| | 3, 10, n_outliers |
| | ) |
| |
|
| | return X, y, true_coeffs |
| |
|
| |
|
| | |
| | if __name__ == "__main__": |
| | import sys |
| |
|
| | if len(sys.argv) > 1: |
| | result = asyncio.run(evaluate(sys.argv[1])) |
| | print(f"Score: {result.metrics['score']:.4f}") |
| | print(f"MSE: {result.metrics['mse']:.4f}") |
| | print(f"Outlier Robustness: {result.metrics['outlier_robustness']:.4f}") |
| |
|