|
|
|
|
|
"""Run multiple trials of OpenEvolve to get statistics.""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
import shutil |
|
|
import subprocess |
|
|
import sys |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
os.chdir(Path(__file__).parent) |
|
|
|
|
|
|
|
|
def run_trial(trial_num: int, max_iterations: int = 100, seed: int = None): |
|
|
"""Run a single OpenEvolve trial.""" |
|
|
output_dir = f"openevolve_output_trial_{trial_num}" |
|
|
|
|
|
|
|
|
if os.path.exists(output_dir): |
|
|
shutil.rmtree(output_dir) |
|
|
|
|
|
|
|
|
if seed is not None: |
|
|
|
|
|
with open("config.yaml", "r") as f: |
|
|
config_content = f.read() |
|
|
|
|
|
|
|
|
import re |
|
|
config_content = re.sub(r'random_seed:\s*\d+', f'random_seed: {seed}', config_content) |
|
|
|
|
|
|
|
|
temp_config = f"config_trial_{trial_num}.yaml" |
|
|
with open(temp_config, "w") as f: |
|
|
f.write(config_content) |
|
|
else: |
|
|
temp_config = "config.yaml" |
|
|
|
|
|
|
|
|
cmd = [ |
|
|
"openevolve-run", |
|
|
"initial_program.py", |
|
|
"evaluator.py", |
|
|
"--config", temp_config, |
|
|
"--iterations", str(max_iterations), |
|
|
"--output", output_dir, |
|
|
] |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print(f"TRIAL {trial_num + 1}: Running OpenEvolve with seed {seed}") |
|
|
print('='*60) |
|
|
|
|
|
result = subprocess.run(cmd, capture_output=True, text=True) |
|
|
|
|
|
|
|
|
if seed is not None and os.path.exists(temp_config): |
|
|
os.remove(temp_config) |
|
|
|
|
|
|
|
|
solution_found_at = None |
|
|
best_score = 0.0 |
|
|
|
|
|
log_dir = Path(output_dir) / "logs" |
|
|
if log_dir.exists(): |
|
|
log_files = list(log_dir.glob("*.log")) |
|
|
if log_files: |
|
|
with open(log_files[0], "r") as f: |
|
|
log_content = f.read() |
|
|
|
|
|
import re |
|
|
|
|
|
|
|
|
score_matches = re.findall(r'combined_score[=:]\s*([\d.]+)', log_content) |
|
|
if score_matches: |
|
|
best_score = max(float(s) for s in score_matches) |
|
|
|
|
|
|
|
|
new_best_matches = re.findall(r'New best solution found at iteration (\d+):', log_content) |
|
|
perfect_matches = re.findall(r'Iteration (\d+):.*?combined_score=1\.0000', log_content) |
|
|
|
|
|
if perfect_matches: |
|
|
solution_found_at = int(perfect_matches[0]) |
|
|
elif best_score >= 1.0 and new_best_matches: |
|
|
|
|
|
solution_found_at = int(new_best_matches[-1]) |
|
|
|
|
|
return { |
|
|
"trial": trial_num, |
|
|
"seed": seed, |
|
|
"solution_found_at": solution_found_at, |
|
|
"best_score": best_score, |
|
|
"max_iterations": max_iterations, |
|
|
} |
|
|
|
|
|
|
|
|
def run_trials(num_trials: int = 3, max_iterations: int = 100, base_seed: int = 100): |
|
|
"""Run multiple trials and collect statistics.""" |
|
|
results = [] |
|
|
solutions_found = [] |
|
|
|
|
|
for trial in range(num_trials): |
|
|
seed = base_seed + trial * 111 |
|
|
result = run_trial(trial, max_iterations, seed) |
|
|
results.append(result) |
|
|
|
|
|
if result["solution_found_at"] is not None: |
|
|
solutions_found.append(result["solution_found_at"]) |
|
|
print(f"Trial {trial + 1}: SUCCESS at iteration {result['solution_found_at']}") |
|
|
else: |
|
|
print(f"Trial {trial + 1}: FAILED (best score: {result['best_score']:.2%})") |
|
|
|
|
|
|
|
|
success_rate = len(solutions_found) / num_trials |
|
|
avg_iterations = sum(solutions_found) / len(solutions_found) if solutions_found else float('inf') |
|
|
min_iterations = min(solutions_found) if solutions_found else None |
|
|
max_iterations_found = max(solutions_found) if solutions_found else None |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("OPENEVOLVE TRIAL RESULTS") |
|
|
print('='*60) |
|
|
print(f"Trials: {num_trials}") |
|
|
print(f"Max iterations per trial: {max_iterations}") |
|
|
print(f"Success rate: {success_rate:.0%} ({len(solutions_found)}/{num_trials})") |
|
|
if solutions_found: |
|
|
print(f"Avg iterations to solution: {avg_iterations:.1f}") |
|
|
print(f"Min iterations: {min_iterations}") |
|
|
print(f"Max iterations: {max_iterations_found}") |
|
|
print('='*60) |
|
|
|
|
|
|
|
|
summary = { |
|
|
"config": { |
|
|
"num_trials": num_trials, |
|
|
"max_iterations": max_iterations, |
|
|
}, |
|
|
"summary": { |
|
|
"success_rate": success_rate, |
|
|
"avg_iterations_to_solution": avg_iterations if solutions_found else None, |
|
|
"min_iterations": min_iterations, |
|
|
"max_iterations": max_iterations_found, |
|
|
"solutions_found": len(solutions_found), |
|
|
}, |
|
|
"trials": results, |
|
|
} |
|
|
|
|
|
with open("openevolve_trials_results.json", "w") as f: |
|
|
json.dump(summary, f, indent=2) |
|
|
|
|
|
print(f"\nResults saved to: openevolve_trials_results.json") |
|
|
|
|
|
|
|
|
for trial in range(num_trials): |
|
|
output_dir = f"openevolve_output_trial_{trial}" |
|
|
if os.path.exists(output_dir): |
|
|
shutil.rmtree(output_dir) |
|
|
|
|
|
return summary |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import argparse |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--trials", type=int, default=3, help="Number of trials") |
|
|
parser.add_argument("--iterations", type=int, default=100, help="Max iterations per trial") |
|
|
parser.add_argument("--seed", type=int, default=100, help="Base random seed") |
|
|
args = parser.parse_args() |
|
|
|
|
|
run_trials(num_trials=args.trials, max_iterations=args.iterations, base_seed=args.seed) |
|
|
|