File size: 22,762 Bytes
5e4510c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 |
"""
Evaluator for the lu_factorization task with baseline comparison
This evaluator compares OpenEvolve's evolved solutions against the reference
AlgoTune baseline implementation to measure performance improvements.
The speedup becomes the primary fitness score for evolution.
"""
import importlib.util
import numpy as np
import time
import concurrent.futures
import traceback
import logging
import sys
import os
from pathlib import Path
from typing import Dict, Any, Optional, List, Tuple
# Import EvaluationResult for artifacts support
from openevolve.evaluation_result import EvaluationResult
# Add AlgoTune to path for importing reference tasks
# These paths will be dynamically determined based on the AlgoTune installation
# The adapter will handle path setup when the evaluator is created
# Setup AlgoTune paths dynamically
def setup_algotune_paths():
"""Setup Python import paths for AlgoTune modules."""
# The AlgoTune path should be passed as a parameter to the evaluator
possible_algotune_paths = [
Path("/Users/asankhaya/Documents/GitHub/AlgoTune"),
Path(__file__).parent.parent.parent.parent / "AlgoTune",
Path.home() / "github" / "AlgoTune",
]
algotune_base = None
for path in possible_algotune_paths:
if path.exists():
algotune_base = path
break
if algotune_base is None:
print("Warning: Could not find AlgoTune installation")
return False
# Add AlgoTune base directory to path
if str(algotune_base) not in sys.path:
sys.path.insert(0, str(algotune_base))
return True
# Setup paths and try to import AlgoTune tasks
if setup_algotune_paths():
try:
from AlgoTuneTasks.base import TASK_REGISTRY
# Import the specific lu_factorization task to register it
from AlgoTuneTasks.lu_factorization.lu_factorization import LUFactorization
print("Successfully imported AlgoTune tasks and lu_factorization")
except ImportError as e:
print(f"Error: Could not import AlgoTune tasks: {e}")
print("Make sure AlgoTune is properly installed and accessible")
TASK_REGISTRY = {}
else:
print("Warning: Could not setup AlgoTune paths")
TASK_REGISTRY = {}
def run_with_timeout(func, args=(), kwargs={}, timeout_seconds=30):
"""
Run a function with a timeout using concurrent.futures
Args:
func: Function to run
args: Arguments to pass to the function
kwargs: Keyword arguments to pass to the function
timeout_seconds: Timeout in seconds
Returns:
Result of the function or raises TimeoutError
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(func, *args, **kwargs)
try:
result = future.result(timeout=timeout_seconds)
return result
except concurrent.futures.TimeoutError:
raise TimeoutError(f"Function timed out after {timeout_seconds} seconds")
def safe_convert(value):
"""Convert a value safely for evaluation"""
try:
if isinstance(value, (list, tuple)):
return [safe_convert(v) for v in value]
elif isinstance(value, np.ndarray):
return value.tolist()
else:
return value
except Exception:
return value
def calculate_speedup(baseline_time_ms: float, evolved_time_ms: float, is_valid: bool) -> Optional[float]:
"""
Calculate speedup between baseline and evolved solution.
Speedup = (Baseline Time) / (Evolved Time)
Higher is better.
Args:
baseline_time_ms: Time taken by baseline implementation
evolved_time_ms: Time taken by evolved solution
is_valid: Whether the evolved solution is valid
Returns:
Speedup value or None if calculation is not possible
"""
if not is_valid:
return None
if baseline_time_ms is None or baseline_time_ms <= 0:
return None
if evolved_time_ms is None:
return None
if evolved_time_ms <= 0:
return float('inf') # Infinite speedup for instant solution
return baseline_time_ms / evolved_time_ms
def measure_baseline_performance(task_instance, problem, num_runs=3, warmup_runs=1):
"""
Measure baseline performance using the original AlgoTune implementation.
Args:
task_instance: The AlgoTune task instance
problem: Problem to solve
num_runs: Number of timing runs
warmup_runs: Number of warmup runs
Returns:
Dictionary with baseline timing results
"""
try:
# Warmup runs
for _ in range(warmup_runs):
try:
task_instance.solve(problem)
except Exception:
pass # Ignore warmup errors
# Timing runs
times = []
for _ in range(num_runs):
start_time = time.perf_counter()
try:
result = run_with_timeout(task_instance.solve, args=(problem,), timeout_seconds=30)
end_time = time.perf_counter()
if result is not None:
elapsed_ms = (end_time - start_time) * 1000
times.append(elapsed_ms)
except Exception as e:
print(f"Baseline run failed: {e}")
continue
if not times:
return {
"success": False,
"error": "All baseline runs failed",
"avg_time_ms": None,
"min_time_ms": None,
"std_time_ms": None
}
return {
"success": True,
"avg_time_ms": float(np.mean(times)),
"min_time_ms": float(np.min(times)),
"std_time_ms": float(np.std(times)),
"times": times
}
except Exception as e:
return {
"success": False,
"error": str(e),
"avg_time_ms": None,
"min_time_ms": None,
"std_time_ms": None
}
def measure_evolved_performance(program, problem, num_runs=3, warmup_runs=1, timeout_seconds=30):
"""
Measure evolved solution performance.
Args:
program: The evolved program module
problem: Problem to solve
num_runs: Number of timing runs
warmup_runs: Number of warmup runs
timeout_seconds: Timeout per run
Returns:
Dictionary with evolved timing results
"""
try:
# Warmup runs
for _ in range(warmup_runs):
try:
run_with_timeout(program.run_solver, args=(problem,), timeout_seconds=timeout_seconds)
except Exception:
pass # Ignore warmup errors
# Timing runs
times = []
results = []
for _ in range(num_runs):
start_time = time.perf_counter()
try:
result = run_with_timeout(program.run_solver, args=(problem,), timeout_seconds=timeout_seconds)
end_time = time.perf_counter()
elapsed_ms = (end_time - start_time) * 1000
times.append(elapsed_ms)
results.append(result)
except TimeoutError:
print(f"Evolved solution timed out after {timeout_seconds} seconds")
continue
except Exception as e:
print(f"Evolved run failed: {e}")
continue
if not times:
return {
"success": False,
"error": "All evolved runs failed",
"avg_time_ms": None,
"min_time_ms": None,
"std_time_ms": None,
"results": []
}
return {
"success": True,
"avg_time_ms": float(np.mean(times)),
"min_time_ms": float(np.min(times)),
"std_time_ms": float(np.std(times)),
"times": times,
"results": results
}
except Exception as e:
return {
"success": False,
"error": str(e),
"avg_time_ms": None,
"min_time_ms": None,
"std_time_ms": None,
"results": []
}
def evaluate(program_path, config=None):
"""
Enhanced evaluation with baseline comparison for lu_factorization task.
This evaluator:
1. Loads the evolved solve method from initial_program.py
2. Generates test problems using the original AlgoTune task
3. Measures baseline performance using original AlgoTune implementation
4. Measures evolved solution performance
5. Calculates speedup as primary fitness score
6. Validates correctness using the original task's validation method
Args:
program_path: Path to the evolved program file (initial_program.py)
config: Configuration dictionary with evaluator settings
Returns:
Dictionary of metrics including speedup as primary fitness score
"""
try:
# Load configuration
if config is None:
# Try to load config from YAML file first
try:
import yaml
from pathlib import Path
config_path = Path(__file__).parent / "config.yaml"
if config_path.exists():
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise FileNotFoundError("config.yaml not found")
except Exception as e:
# Could not load config.yaml, using defaults
config = {
"algotune": {
"num_trials": 5,
"data_size": 100,
"timeout": 300,
"num_runs": 3,
"warmup_runs": 1
}
}
# Extract AlgoTune task-specific settings from config
algotune_config = config.get("algotune", {})
num_trials = algotune_config.get("num_trials", 5)
data_size = algotune_config.get("data_size", 100)
timeout_seconds = algotune_config.get("timeout", 300)
num_runs = algotune_config.get("num_runs", 3)
warmup_runs = algotune_config.get("warmup_runs", 1)
# Load the program
spec = importlib.util.spec_from_file_location("program", program_path)
program = importlib.util.module_from_spec(spec)
spec.loader.exec_module(program)
# Check if the required function exists
if not hasattr(program, "run_solver"):
print(f"Error: program does not have 'run_solver' function")
return {
"correctness_score": 0.0,
"performance_score": 0.0,
"combined_score": 0.0,
"speedup_score": 0.0, # Primary fitness score
"baseline_comparison": {
"mean_speedup": None,
"median_speedup": None,
"success_rate": 0.0,
"baseline_times": [],
"evolved_times": [],
"speedups": []
},
"error": "Missing run_solver function",
}
# Get the original task for reference solutions and problem generation
task_class = None
if "lu_factorization" in TASK_REGISTRY:
task_class = TASK_REGISTRY["lu_factorization"]
print(f"Successfully loaded lu_factorization task from registry")
else:
print(f"Error: lu_factorization task not found in TASK_REGISTRY")
print(f"Available tasks: {list(TASK_REGISTRY.keys())}")
raise Exception("Could not load lu_factorization task from AlgoTune registry")
# Generate test problems and evaluate
correctness_scores = []
performance_scores = []
baseline_times = []
evolved_times = []
speedups = []
valid_count = 0
success_count = 0
for trial in range(num_trials):
try:
# Generate a test problem using the original task
if task_class:
task_instance = task_class()
problem = task_instance.generate_problem(n=data_size, random_seed=trial)
else:
raise Exception("Could not load original AlgoTune task for problem generation")
# Measure baseline performance
baseline_result = measure_baseline_performance(
task_instance, problem, num_runs, warmup_runs
)
if not baseline_result["success"]:
print(f"Trial {trial}: Baseline measurement failed: {baseline_result.get('error', 'Unknown error')}")
continue
# Measure evolved performance
evolved_result = measure_evolved_performance(
program, problem, num_runs, warmup_runs, timeout_seconds
)
if not evolved_result["success"]:
print(f"Trial {trial}: Evolved measurement failed: {evolved_result.get('error', 'Unknown error')}")
continue
# Validate evolved solution
correctness_score = 0.0
is_valid = False
if evolved_result["results"]:
# Use the first result for validation
evolved_solution = evolved_result["results"][0]
evolved_solution = safe_convert(evolved_solution)
try:
# Use the evolved program's own is_solution method for validation
# This ensures consistency between the extracted solve and validation logic
evolved_solver = program.LUFactorization()
is_valid = evolved_solver.is_solution(problem, evolved_solution)
correctness_score = 1.0 if is_valid else 0.0
except Exception as e:
print(f"Trial {trial}: Error checking solution validity with evolved is_solution: {e}")
correctness_score = 0.0
is_valid = False
# Calculate speedup
baseline_time = baseline_result["min_time_ms"] # Use minimum time for fair comparison
evolved_time = evolved_result["min_time_ms"]
speedup = calculate_speedup(baseline_time, evolved_time, is_valid)
# Store results
correctness_scores.append(correctness_score)
baseline_times.append(baseline_time)
evolved_times.append(evolved_time)
if speedup is not None:
speedups.append(speedup)
valid_count += 1
# Performance score based on execution time
performance_score = 1.0 / (1.0 + evolved_time) if evolved_time > 0 else 0.0
performance_scores.append(performance_score)
success_count += 1
except Exception as e:
print(f"Trial {trial}: Error - {str(e)}")
print(traceback.format_exc())
continue
# If all trials failed, return zero scores
if success_count == 0:
return {
"correctness_score": 0.0,
"performance_score": 0.0,
"combined_score": 0.0,
"speedup_score": 0.0, # Primary fitness score
"baseline_comparison": {
"mean_speedup": None,
"median_speedup": None,
"success_rate": 0.0,
"baseline_times": [],
"evolved_times": [],
"speedups": []
},
"error": "All trials failed",
}
# Calculate metrics
avg_correctness = float(np.mean(correctness_scores))
avg_performance = float(np.mean(performance_scores))
reliability_score = float(success_count / num_trials)
# Calculate speedup as primary fitness score
if speedups:
mean_speedup = float(np.mean(speedups))
# Use speedup as primary fitness score (higher is better)
speedup_score = mean_speedup
else:
speedup_score = 0.0
mean_speedup = None
# Combined score prioritizing correctness (kept for compatibility)
combined_score = float(
0.7 * avg_correctness + 0.2 * avg_performance + 0.1 * reliability_score
)
# Calculate baseline comparison metrics
baseline_comparison = {
"mean_speedup": mean_speedup,
"median_speedup": float(np.median(speedups)) if speedups else None,
"success_rate": float(valid_count / success_count) if success_count > 0 else 0.0,
"baseline_times": baseline_times,
"evolved_times": evolved_times,
"speedups": speedups,
"num_valid_solutions": valid_count,
"num_total_trials": success_count
}
return {
"correctness_score": avg_correctness,
"performance_score": avg_performance,
"reliability_score": reliability_score,
"combined_score": combined_score,
"speedup_score": speedup_score, # Primary fitness score for evolution
"success_rate": reliability_score,
"baseline_comparison": baseline_comparison,
}
except Exception as e:
print(f"Evaluation failed completely: {str(e)}")
print(traceback.format_exc())
return {
"correctness_score": 0.0,
"performance_score": 0.0,
"combined_score": 0.0,
"speedup_score": 0.0, # Primary fitness score
"baseline_comparison": {
"mean_speedup": None,
"median_speedup": None,
"success_rate": 0.0,
"baseline_times": [],
"evolved_times": [],
"speedups": []
},
"error": str(e),
}
# Stage-based evaluation for cascade evaluation
def evaluate_stage1(program_path, config=None):
"""First stage evaluation with basic functionality check of the evolved solve method"""
try:
# Load configuration
if config is None:
# Try to load config from YAML file first
try:
import yaml
from pathlib import Path
config_path = Path(__file__).parent / "config.yaml"
if config_path.exists():
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise FileNotFoundError("config.yaml not found")
except Exception as e:
# Could not load config.yaml, using defaults
config = {
"algotune": {
"num_trials": 5,
"data_size": 100,
"timeout": 300
}
}
algotune_config = config.get("algotune", {})
data_size = algotune_config.get("data_size", 100)
timeout_seconds = algotune_config.get("timeout", 300)
# Load the program
spec = importlib.util.spec_from_file_location("program", program_path)
program = importlib.util.module_from_spec(spec)
spec.loader.exec_module(program)
# Check if the required function exists
if not hasattr(program, "run_solver"):
return EvaluationResult(
metrics={"runs_successfully": 0.0},
artifacts={"error": "Missing run_solver function", "traceback": traceback.format_exc() if "Missing run_solver function" != "Timeout" else "Timeout occurred"}
)
# Get the original task for reference solutions and problem generation
task_class = None
if "lu_factorization" in TASK_REGISTRY:
task_class = TASK_REGISTRY["lu_factorization"]
else:
print(f"Error: lu_factorization task not found in TASK_REGISTRY")
print(f"Available tasks: {list(TASK_REGISTRY.keys())}")
try:
# Run a single trial with timeout using proper task-specific problem
if task_class:
task_instance = task_class()
test_problem = task_instance.generate_problem(n=data_size, random_seed=42)
else:
# Generic fallback test problem
test_problem = {"test_data": [1, 2, 3], "random_seed": 42}
result = run_with_timeout(program.run_solver, args=(test_problem,), timeout_seconds=timeout_seconds)
# Basic validity check
if result is not None:
return EvaluationResult(
metrics={
"runs_successfully": 1.0,
"basic_functionality": 1.0
},
artifacts={}
)
else:
return EvaluationResult(
metrics={
"runs_successfully": 0.5,
"basic_functionality": 0.0
},
artifacts={"error": "Function returned None", "failure_stage": "stage1"}
)
except TimeoutError as e:
return EvaluationResult(
metrics={"runs_successfully": 0.0},
artifacts={"error": "Timeout", "traceback": traceback.format_exc() if "Timeout" != "Timeout" else "Timeout occurred"}
)
except Exception as e:
return EvaluationResult(
metrics={"runs_successfully": 0.0},
artifacts={"error": str(e), "traceback": traceback.format_exc() if str(e) != "Timeout" else "Timeout occurred"}
)
except Exception as e:
return EvaluationResult(
metrics={"runs_successfully": 0.0},
artifacts={"error": str(e), "traceback": traceback.format_exc() if str(e) != "Timeout" else "Timeout occurred"}
)
def evaluate_stage2(program_path, config=None):
"""Second stage evaluation with more thorough testing of the evolved solve method"""
return evaluate(program_path, config)
|