|
|
|
|
|
"""
|
|
|
CLI interface for Frontier-CS evaluation.
|
|
|
|
|
|
Usage:
|
|
|
# Single problem evaluation
|
|
|
frontier-eval flash_attn solution.py
|
|
|
frontier-eval --algorithmic 1 solution.cpp
|
|
|
|
|
|
# With SkyPilot
|
|
|
frontier-eval flash_attn solution.py --skypilot
|
|
|
|
|
|
# All problems for a solution
|
|
|
frontier-eval --all-problems solution.py
|
|
|
|
|
|
# Specific problems
|
|
|
frontier-eval --problems flash_attn,cross_entropy solution.py
|
|
|
|
|
|
# List problems
|
|
|
frontier-eval --list
|
|
|
frontier-eval --list --algorithmic
|
|
|
|
|
|
# Batch evaluation (scans solutions/ by default)
|
|
|
frontier-eval batch
|
|
|
frontier-eval batch --solutions-dir path/to/solutions
|
|
|
frontier-eval batch --resume --results-dir results/batch1
|
|
|
"""
|
|
|
|
|
|
import argparse
|
|
|
import logging
|
|
|
import sys
|
|
|
from pathlib import Path
|
|
|
from typing import List, Optional
|
|
|
|
|
|
from .evaluator import FrontierCSEvaluator
|
|
|
from .runner import EvaluationResult
|
|
|
from .batch.pair import read_solution_config
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
def detect_solution_dir(path: Path) -> tuple[bool, Optional[str], Optional[Path]]:
|
|
|
"""
|
|
|
Detect if a path is a solution directory with config.yaml.
|
|
|
|
|
|
Returns:
|
|
|
(is_solution_dir, problem, solution_file)
|
|
|
- is_solution_dir: True if path is a directory with config.yaml
|
|
|
- problem: Problem ID from config.yaml (or None)
|
|
|
- solution_file: Path to solution file in the directory (or None)
|
|
|
"""
|
|
|
if not path.is_dir():
|
|
|
return False, None, None
|
|
|
|
|
|
problem = read_solution_config(path)
|
|
|
if not problem:
|
|
|
return False, None, None
|
|
|
|
|
|
|
|
|
|
|
|
for name in ["solve.sh", "solution.py", "solution.cpp"]:
|
|
|
candidate = path / name
|
|
|
if candidate.exists():
|
|
|
return True, problem, candidate
|
|
|
|
|
|
|
|
|
py_files = list(path.glob("*.py"))
|
|
|
if py_files:
|
|
|
return True, problem, py_files[0]
|
|
|
|
|
|
return True, problem, None
|
|
|
|
|
|
|
|
|
def create_parser() -> argparse.ArgumentParser:
|
|
|
"""Create argument parser."""
|
|
|
parser = argparse.ArgumentParser(
|
|
|
prog="frontier-eval",
|
|
|
description="Evaluate solutions for Frontier-CS problems",
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
epilog="""
|
|
|
Examples:
|
|
|
# Evaluate a solution directory (auto-detects problem from config.yaml)
|
|
|
frontier-eval solutions/gpt5_flash_attn
|
|
|
|
|
|
# Evaluate a research problem with solution file
|
|
|
frontier-eval flash_attn solution.py
|
|
|
|
|
|
# Override problem when evaluating a solution directory
|
|
|
frontier-eval solutions/gpt5_flash_attn --problems other_problem
|
|
|
|
|
|
# Evaluate an algorithmic problem
|
|
|
frontier-eval --algorithmic 1 solution.cpp
|
|
|
|
|
|
# Evaluate with SkyPilot (cloud)
|
|
|
frontier-eval flash_attn solution.py --skypilot
|
|
|
|
|
|
# Evaluate multiple problems
|
|
|
frontier-eval --problems flash_attn,cross_entropy solution.py
|
|
|
|
|
|
# Evaluate all research problems
|
|
|
frontier-eval --all-problems solution.py
|
|
|
|
|
|
# List available problems
|
|
|
frontier-eval --list
|
|
|
""",
|
|
|
)
|
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
|
"problem_id",
|
|
|
nargs="?",
|
|
|
default=None,
|
|
|
help="Problem ID (e.g., flash_attn) or solution directory with config.yaml",
|
|
|
)
|
|
|
parser.add_argument(
|
|
|
"solution",
|
|
|
nargs="?",
|
|
|
default=None,
|
|
|
help="Path to solution file",
|
|
|
)
|
|
|
|
|
|
|
|
|
problem_group = parser.add_argument_group("Problem Selection")
|
|
|
problem_group.add_argument(
|
|
|
"--algorithmic",
|
|
|
action="store_true",
|
|
|
help="Evaluate algorithmic problem (expects numeric ID)",
|
|
|
)
|
|
|
problem_group.add_argument(
|
|
|
"--research",
|
|
|
action="store_true",
|
|
|
help="Evaluate research problem (default track)",
|
|
|
)
|
|
|
problem_group.add_argument(
|
|
|
"--problems",
|
|
|
type=str,
|
|
|
help="Comma-separated list of problem IDs to evaluate",
|
|
|
)
|
|
|
problem_group.add_argument(
|
|
|
"--all-problems",
|
|
|
action="store_true",
|
|
|
help="Evaluate all problems in the track",
|
|
|
)
|
|
|
problem_group.add_argument(
|
|
|
"--problems-file",
|
|
|
type=Path,
|
|
|
help="File containing problem IDs (one per line)",
|
|
|
)
|
|
|
|
|
|
|
|
|
backend_group = parser.add_argument_group("Backend Options")
|
|
|
backend_group.add_argument(
|
|
|
"--skypilot",
|
|
|
action="store_true",
|
|
|
help="Use SkyPilot for cloud evaluation",
|
|
|
)
|
|
|
backend_group.add_argument(
|
|
|
"--cloud",
|
|
|
type=str,
|
|
|
default="gcp",
|
|
|
help="Cloud provider for SkyPilot (default: gcp)",
|
|
|
)
|
|
|
backend_group.add_argument(
|
|
|
"--region",
|
|
|
type=str,
|
|
|
help="Cloud region for SkyPilot",
|
|
|
)
|
|
|
backend_group.add_argument(
|
|
|
"--judge-url",
|
|
|
type=str,
|
|
|
default="http://localhost:8081",
|
|
|
help="Judge server URL for algorithmic problems",
|
|
|
)
|
|
|
|
|
|
|
|
|
eval_group = parser.add_argument_group("Evaluation Options")
|
|
|
eval_group.add_argument(
|
|
|
"--timeout",
|
|
|
type=int,
|
|
|
help="Timeout in seconds per problem",
|
|
|
)
|
|
|
eval_group.add_argument(
|
|
|
"--code",
|
|
|
type=str,
|
|
|
help="Solution code as string (alternative to file)",
|
|
|
)
|
|
|
eval_group.add_argument(
|
|
|
"--unbounded",
|
|
|
action="store_true",
|
|
|
help="Use unbounded score (for algorithmic problems, shows score without clipping)",
|
|
|
)
|
|
|
|
|
|
|
|
|
output_group = parser.add_argument_group("Output Options")
|
|
|
output_group.add_argument(
|
|
|
"--json",
|
|
|
action="store_true",
|
|
|
help="Output results as JSON",
|
|
|
)
|
|
|
output_group.add_argument(
|
|
|
"--quiet", "-q",
|
|
|
action="store_true",
|
|
|
help="Only output scores",
|
|
|
)
|
|
|
output_group.add_argument(
|
|
|
"--verbose", "-v",
|
|
|
action="store_true",
|
|
|
help="Verbose output including logs",
|
|
|
)
|
|
|
|
|
|
|
|
|
info_group = parser.add_argument_group("Info Commands")
|
|
|
info_group.add_argument(
|
|
|
"--list",
|
|
|
action="store_true",
|
|
|
help="List available problems",
|
|
|
)
|
|
|
info_group.add_argument(
|
|
|
"--show",
|
|
|
action="store_true",
|
|
|
help="Show problem statement",
|
|
|
)
|
|
|
|
|
|
|
|
|
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
|
|
|
|
|
|
|
batch_parser = subparsers.add_parser(
|
|
|
"batch",
|
|
|
help="Batch evaluation with incremental progress",
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
epilog="""
|
|
|
Examples:
|
|
|
# Evaluate all solutions (scans solutions/ directory)
|
|
|
frontier-eval batch
|
|
|
|
|
|
# Evaluate from specific solutions directory
|
|
|
frontier-eval batch --solutions-dir path/to/solutions
|
|
|
|
|
|
# Evaluate specific pairs
|
|
|
frontier-eval batch --pairs "sol1:flash_attn,sol2:cross_entropy"
|
|
|
|
|
|
# Resume interrupted evaluation
|
|
|
frontier-eval batch --resume --results-dir results/batch1
|
|
|
|
|
|
# Check evaluation status
|
|
|
frontier-eval batch --status --results-dir results/batch1
|
|
|
|
|
|
Each solution directory should have a config.yaml with:
|
|
|
problem: flash_attn
|
|
|
""",
|
|
|
)
|
|
|
|
|
|
|
|
|
pairs_group = batch_parser.add_mutually_exclusive_group()
|
|
|
pairs_group.add_argument(
|
|
|
"--pairs",
|
|
|
type=str,
|
|
|
help="Comma-separated pairs (solution:problem,solution:problem)",
|
|
|
)
|
|
|
pairs_group.add_argument(
|
|
|
"--pairs-file",
|
|
|
type=Path,
|
|
|
help="Pairs file (solution:problem per line)",
|
|
|
)
|
|
|
pairs_group.add_argument(
|
|
|
"--solutions-dir",
|
|
|
type=Path,
|
|
|
help="Solutions directory to scan (default: solutions/)",
|
|
|
)
|
|
|
|
|
|
batch_output = batch_parser.add_argument_group("Output Options")
|
|
|
batch_output.add_argument(
|
|
|
"--results-dir",
|
|
|
type=Path,
|
|
|
default=Path("results/batch"),
|
|
|
help="Directory for results and state (default: results/batch)",
|
|
|
)
|
|
|
|
|
|
batch_backend = batch_parser.add_argument_group("Backend Options")
|
|
|
batch_backend.add_argument(
|
|
|
"--skypilot",
|
|
|
action="store_true",
|
|
|
help="Use SkyPilot for cloud evaluation",
|
|
|
)
|
|
|
batch_backend.add_argument(
|
|
|
"--max-concurrent",
|
|
|
type=int,
|
|
|
default=1,
|
|
|
help="Maximum concurrent evaluations (default: 1)",
|
|
|
)
|
|
|
batch_backend.add_argument(
|
|
|
"--timeout",
|
|
|
type=int,
|
|
|
help="Timeout per evaluation in seconds",
|
|
|
)
|
|
|
batch_backend.add_argument(
|
|
|
"--bucket-url",
|
|
|
type=str,
|
|
|
help="Bucket URL for result storage (s3://... or gs://...). "
|
|
|
"Results are written directly to the bucket by each worker and "
|
|
|
"synced incrementally. Enables reliable resume across runs.",
|
|
|
)
|
|
|
|
|
|
batch_control = batch_parser.add_argument_group("Control Options")
|
|
|
batch_control.add_argument(
|
|
|
"--resume",
|
|
|
action="store_true",
|
|
|
help="Resume interrupted evaluation",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--no-resume",
|
|
|
action="store_true",
|
|
|
help="Start fresh, ignoring previous state",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--status",
|
|
|
action="store_true",
|
|
|
help="Show evaluation status and exit",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--retry-failed",
|
|
|
action="store_true",
|
|
|
help="Retry all failed pairs",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--report",
|
|
|
action="store_true",
|
|
|
help="Show aggregated report and exit",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--export-failed",
|
|
|
type=Path,
|
|
|
help="Export failed pairs to file",
|
|
|
)
|
|
|
batch_control.add_argument(
|
|
|
"--sync-bucket",
|
|
|
action="store_true",
|
|
|
help="Sync results from bucket to local state and export reports",
|
|
|
)
|
|
|
|
|
|
return parser
|
|
|
|
|
|
|
|
|
def print_result(result: EvaluationResult, quiet: bool = False, verbose: bool = False, unbounded: bool = False) -> None:
|
|
|
"""Print evaluation result."""
|
|
|
if quiet:
|
|
|
if result.success:
|
|
|
score = result.score_unbounded if unbounded and hasattr(result, 'score_unbounded') else result.score
|
|
|
print(f"{result.problem_id}: {score}")
|
|
|
else:
|
|
|
print(f"{result.problem_id}: ERROR")
|
|
|
return
|
|
|
|
|
|
print(f"\n{'='*60}")
|
|
|
print(f"Problem: {result.problem_id}")
|
|
|
print(f"Status: {result.status.value}")
|
|
|
|
|
|
if result.success:
|
|
|
if unbounded and hasattr(result, 'score_unbounded'):
|
|
|
print(f"Score (unbounded): {result.score_unbounded}")
|
|
|
print(f"Score (bounded): {result.score}")
|
|
|
else:
|
|
|
print(f"Score: {result.score}")
|
|
|
else:
|
|
|
print(f"Message: {result.message}")
|
|
|
|
|
|
if result.duration_seconds:
|
|
|
print(f"Duration: {result.duration_seconds:.1f}s")
|
|
|
|
|
|
if verbose and result.logs:
|
|
|
print(f"\n--- Logs ---\n{result.logs}")
|
|
|
|
|
|
print("=" * 60)
|
|
|
|
|
|
|
|
|
def print_results_json(results: List[EvaluationResult], unbounded: bool = False) -> None:
|
|
|
"""Print results as JSON."""
|
|
|
import json
|
|
|
|
|
|
data = []
|
|
|
for r in results:
|
|
|
item = {
|
|
|
"problem_id": r.problem_id,
|
|
|
"score": r.score,
|
|
|
"status": r.status.value,
|
|
|
"message": r.message,
|
|
|
"duration_seconds": r.duration_seconds,
|
|
|
}
|
|
|
if unbounded and hasattr(r, 'score_unbounded'):
|
|
|
item["score_unbounded"] = r.score_unbounded
|
|
|
data.append(item)
|
|
|
print(json.dumps(data, indent=2))
|
|
|
|
|
|
|
|
|
def get_problem_ids(
|
|
|
args: argparse.Namespace,
|
|
|
evaluator: FrontierCSEvaluator,
|
|
|
track: str,
|
|
|
) -> List[str]:
|
|
|
"""Get list of problem IDs to evaluate."""
|
|
|
if args.all_problems:
|
|
|
return evaluator.list_problems(track)
|
|
|
|
|
|
if args.problems:
|
|
|
return [p.strip() for p in args.problems.split(",")]
|
|
|
|
|
|
if args.problems_file:
|
|
|
if not args.problems_file.exists():
|
|
|
print(f"Error: Problems file not found: {args.problems_file}", file=sys.stderr)
|
|
|
sys.exit(1)
|
|
|
problems = []
|
|
|
for line in args.problems_file.read_text().splitlines():
|
|
|
line = line.strip()
|
|
|
if line and not line.startswith("#"):
|
|
|
problems.append(line)
|
|
|
return problems
|
|
|
|
|
|
if args.problem_id:
|
|
|
return [args.problem_id]
|
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
def run_batch(args: argparse.Namespace) -> int:
|
|
|
"""Run batch evaluation command."""
|
|
|
from .batch import BatchEvaluator
|
|
|
from .batch.pair import Pair
|
|
|
|
|
|
|
|
|
logging.basicConfig(
|
|
|
level=logging.INFO,
|
|
|
format="[%(levelname)s] %(message)s",
|
|
|
)
|
|
|
|
|
|
|
|
|
backend = "skypilot" if args.skypilot else "docker"
|
|
|
bucket_url = getattr(args, "bucket_url", None)
|
|
|
batch = BatchEvaluator(
|
|
|
results_dir=args.results_dir,
|
|
|
backend=backend,
|
|
|
max_concurrent=args.max_concurrent,
|
|
|
timeout=args.timeout,
|
|
|
bucket_url=bucket_url,
|
|
|
)
|
|
|
|
|
|
|
|
|
if args.status:
|
|
|
status = batch.get_status()
|
|
|
print("\nBatch Evaluation Status")
|
|
|
print("=" * 40)
|
|
|
print(f"Total pairs: {status['total_pairs']}")
|
|
|
print(f"Completed: {status['completed']}")
|
|
|
print(f"Successful: {status['successful']}")
|
|
|
print(f"Errors: {status['errors']}")
|
|
|
print(f"Pending: {status['pending']}")
|
|
|
print(f"Started: {status['started_at'] or 'N/A'}")
|
|
|
print(f"Updated: {status['updated_at'] or 'N/A'}")
|
|
|
return 0
|
|
|
|
|
|
|
|
|
if getattr(args, "sync_bucket", False):
|
|
|
if not bucket_url:
|
|
|
print("Error: --sync-bucket requires --bucket-url", file=sys.stderr)
|
|
|
return 1
|
|
|
print(f"\nSyncing results from {bucket_url}...")
|
|
|
count = batch.sync_from_bucket()
|
|
|
print(f"Merged {count} results from bucket")
|
|
|
|
|
|
|
|
|
batch._export_all_results()
|
|
|
status = batch.get_status()
|
|
|
print(f"\nStatus: {status['completed']}/{status['total_pairs']} completed")
|
|
|
print(f"Results exported to {args.results_dir}")
|
|
|
return 0
|
|
|
|
|
|
|
|
|
if args.report:
|
|
|
print("\nAggregated Results by Model")
|
|
|
print("=" * 60)
|
|
|
by_model = batch.state.aggregate_by_model()
|
|
|
for model, stats in sorted(by_model.items()):
|
|
|
avg = f"{stats['avg_score']:.4f}" if stats['avg_score'] is not None else "N/A"
|
|
|
print(f" {model}: {stats['successful']}/{stats['total']} successful, avg={avg}")
|
|
|
|
|
|
print("\nAggregated Results by Problem")
|
|
|
print("=" * 60)
|
|
|
by_problem = batch.state.aggregate_by_problem()
|
|
|
for problem, stats in sorted(by_problem.items()):
|
|
|
avg = f"{stats['avg_score']:.4f}" if stats['avg_score'] is not None else "N/A"
|
|
|
print(f" {problem}: {stats['successful']}/{stats['total']} successful, avg={avg}")
|
|
|
return 0
|
|
|
|
|
|
|
|
|
if args.export_failed:
|
|
|
count = batch.state.export_failed(args.export_failed)
|
|
|
print(f"Exported {count} failed pairs to {args.export_failed}")
|
|
|
return 0
|
|
|
|
|
|
|
|
|
if args.retry_failed:
|
|
|
print(f"\nRetrying failed pairs from {args.results_dir}")
|
|
|
state = batch.retry_failed()
|
|
|
print(f"\nComplete: {state.success_count}/{state.total_pairs} successful")
|
|
|
return 0 if state.error_count == 0 else 1
|
|
|
|
|
|
|
|
|
if args.resume:
|
|
|
print(f"\nResuming batch evaluation from {args.results_dir}")
|
|
|
state = batch.resume()
|
|
|
print(f"\nComplete: {state.success_count}/{state.total_pairs} successful")
|
|
|
return 0 if state.error_count == 0 else 1
|
|
|
|
|
|
|
|
|
resume = not args.no_resume
|
|
|
state = None
|
|
|
|
|
|
if args.pairs:
|
|
|
|
|
|
pairs = []
|
|
|
for p in args.pairs.split(","):
|
|
|
p = p.strip()
|
|
|
if ":" not in p:
|
|
|
print(f"Error: Invalid pair format (expected solution:problem): {p}", file=sys.stderr)
|
|
|
return 1
|
|
|
solution, problem = p.split(":", 1)
|
|
|
pairs.append(Pair(solution=solution.strip(), problem=problem.strip()))
|
|
|
|
|
|
print(f"\nBatch evaluation: {len(pairs)} pairs")
|
|
|
state = batch.evaluate_pairs(pairs, resume=resume)
|
|
|
|
|
|
elif args.pairs_file:
|
|
|
|
|
|
if not args.pairs_file.exists():
|
|
|
print(f"Error: Pairs file not found: {args.pairs_file}", file=sys.stderr)
|
|
|
return 1
|
|
|
|
|
|
print(f"\nBatch evaluation from pairs file: {args.pairs_file}")
|
|
|
state = batch.evaluate_pairs_file(args.pairs_file, resume=resume)
|
|
|
|
|
|
else:
|
|
|
|
|
|
from .batch import scan_solutions_dir
|
|
|
|
|
|
solutions_dir = args.solutions_dir
|
|
|
if solutions_dir is None:
|
|
|
|
|
|
for candidate in [Path("solutions"), Path("../solutions"), Path("../../solutions")]:
|
|
|
if candidate.is_dir():
|
|
|
solutions_dir = candidate.resolve()
|
|
|
break
|
|
|
|
|
|
if solutions_dir is None or not solutions_dir.is_dir():
|
|
|
print("Error: No solutions directory found. Use --solutions-dir or --pairs-file", file=sys.stderr)
|
|
|
return 1
|
|
|
|
|
|
pairs = scan_solutions_dir(solutions_dir)
|
|
|
if not pairs:
|
|
|
print(f"Error: No solutions with config.yaml found in {solutions_dir}", file=sys.stderr)
|
|
|
return 1
|
|
|
|
|
|
print(f"\nBatch evaluation: {len(pairs)} solutions from {solutions_dir}")
|
|
|
state = batch.evaluate_pairs(pairs, resume=resume)
|
|
|
|
|
|
|
|
|
print(f"\n{'='*40}")
|
|
|
print("Batch Evaluation Summary")
|
|
|
print("=" * 40)
|
|
|
print(f"Total: {state.total_pairs}")
|
|
|
print(f"Successful: {state.success_count}")
|
|
|
print(f"Errors: {state.error_count}")
|
|
|
print(f"Results saved to: {args.results_dir}")
|
|
|
print(f"\nOutput files:")
|
|
|
print(f" - results.csv: All results")
|
|
|
print(f" - by_model.csv: Aggregated by model")
|
|
|
print(f" - by_problem.csv: Aggregated by problem")
|
|
|
if state.error_count > 0:
|
|
|
print(f" - failed.txt: {state.error_count} failed pairs")
|
|
|
|
|
|
return 0 if state.error_count == 0 else 1
|
|
|
|
|
|
|
|
|
def main(argv: Optional[List[str]] = None) -> int:
|
|
|
"""Main entry point."""
|
|
|
if argv is None:
|
|
|
argv = sys.argv[1:]
|
|
|
|
|
|
|
|
|
subcommands = {"batch"}
|
|
|
if argv and argv[0] in subcommands:
|
|
|
parser = create_parser()
|
|
|
args = parser.parse_args(argv)
|
|
|
if args.command == "batch":
|
|
|
return run_batch(args)
|
|
|
|
|
|
|
|
|
|
|
|
parser = create_parser()
|
|
|
|
|
|
args = parser.parse_args(argv + ["batch", "--status"]) if not any(a in argv for a in subcommands) else parser.parse_args(argv)
|
|
|
|
|
|
args.command = None
|
|
|
args.status = False
|
|
|
|
|
|
|
|
|
track = "algorithmic" if args.algorithmic else "research"
|
|
|
|
|
|
|
|
|
backend = "skypilot" if args.skypilot else "docker"
|
|
|
evaluator = FrontierCSEvaluator(
|
|
|
backend=backend,
|
|
|
judge_url=args.judge_url,
|
|
|
cloud=args.cloud,
|
|
|
region=args.region,
|
|
|
)
|
|
|
|
|
|
|
|
|
if args.list:
|
|
|
if args.algorithmic:
|
|
|
|
|
|
problems = evaluator.list_problems("algorithmic")
|
|
|
print(f"\nAlgorithmic Problems ({len(problems)} total):\n")
|
|
|
|
|
|
ids_per_line = 10
|
|
|
for i in range(0, len(problems), ids_per_line):
|
|
|
line_ids = problems[i:i+ids_per_line]
|
|
|
print(" " + ", ".join(line_ids))
|
|
|
elif args.research:
|
|
|
|
|
|
all_research = evaluator.list_problems("research")
|
|
|
research_problems = [p for p in all_research if not p.startswith("algorithmic/")]
|
|
|
print(f"\nResearch Problems ({len(research_problems)} total):\n")
|
|
|
for p in research_problems:
|
|
|
print(f" {p}")
|
|
|
else:
|
|
|
|
|
|
|
|
|
all_research = evaluator.list_problems("research")
|
|
|
research_problems = [p for p in all_research if not p.startswith("algorithmic/")]
|
|
|
print(f"\nResearch Problems ({len(research_problems)} total):\n")
|
|
|
for p in research_problems:
|
|
|
print(f" {p}")
|
|
|
|
|
|
|
|
|
alg_problems = evaluator.list_problems("algorithmic")
|
|
|
print(f"\nAlgorithmic Problems ({len(alg_problems)} total):\n")
|
|
|
|
|
|
ids_per_line = 10
|
|
|
for i in range(0, len(alg_problems), ids_per_line):
|
|
|
line_ids = alg_problems[i:i+ids_per_line]
|
|
|
print(" " + ", ".join(line_ids))
|
|
|
return 0
|
|
|
|
|
|
if args.show:
|
|
|
if not args.problem_id:
|
|
|
print("Error: --show requires a problem_id", file=sys.stderr)
|
|
|
return 1
|
|
|
statement = evaluator.get_problem_statement(track, args.problem_id)
|
|
|
if statement:
|
|
|
print(statement)
|
|
|
else:
|
|
|
print(f"Problem not found: {args.problem_id}", file=sys.stderr)
|
|
|
return 1
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
|
|
solution_dir_mode = False
|
|
|
detected_problem = None
|
|
|
detected_solution_file = None
|
|
|
|
|
|
if args.problem_id:
|
|
|
candidate = Path(args.problem_id)
|
|
|
is_sol_dir, detected_problem, detected_solution_file = detect_solution_dir(candidate)
|
|
|
if is_sol_dir:
|
|
|
solution_dir_mode = True
|
|
|
if not args.quiet:
|
|
|
print(f"Detected solution directory: {candidate}")
|
|
|
print(f" Problem (from config.yaml): {detected_problem}")
|
|
|
if detected_solution_file:
|
|
|
print(f" Solution file: {detected_solution_file.name}")
|
|
|
|
|
|
|
|
|
if solution_dir_mode:
|
|
|
|
|
|
if args.problems:
|
|
|
problem_ids = [p.strip() for p in args.problems.split(",")]
|
|
|
elif detected_problem:
|
|
|
problem_ids = [detected_problem]
|
|
|
else:
|
|
|
print("Error: No problem found in config.yaml", file=sys.stderr)
|
|
|
return 1
|
|
|
else:
|
|
|
problem_ids = get_problem_ids(args, evaluator, track)
|
|
|
|
|
|
if not problem_ids:
|
|
|
print("Error: No problems specified. Use --help for usage.", file=sys.stderr)
|
|
|
return 1
|
|
|
|
|
|
|
|
|
if args.code:
|
|
|
code = args.code
|
|
|
elif solution_dir_mode and detected_solution_file:
|
|
|
|
|
|
code = detected_solution_file.read_text(encoding="utf-8")
|
|
|
elif args.solution:
|
|
|
solution_path = Path(args.solution)
|
|
|
if not solution_path.exists():
|
|
|
print(f"Error: Solution file not found: {solution_path}", file=sys.stderr)
|
|
|
return 1
|
|
|
code = solution_path.read_text(encoding="utf-8")
|
|
|
elif solution_dir_mode:
|
|
|
print(f"Error: No solution.py found in {args.problem_id}", file=sys.stderr)
|
|
|
return 1
|
|
|
else:
|
|
|
print("Error: No solution provided. Use --code or provide a file path.", file=sys.stderr)
|
|
|
return 1
|
|
|
|
|
|
|
|
|
results = []
|
|
|
for pid in problem_ids:
|
|
|
if not args.quiet:
|
|
|
print(f"Evaluating {pid}...", end=" ", flush=True)
|
|
|
|
|
|
result = evaluator.evaluate(track, pid, code, timeout=args.timeout, unbounded=args.unbounded)
|
|
|
results.append(result)
|
|
|
|
|
|
if not args.quiet:
|
|
|
if result.success:
|
|
|
score = result.score_unbounded if args.unbounded and hasattr(result, 'score_unbounded') else result.score
|
|
|
print(f"Score: {score}")
|
|
|
else:
|
|
|
print(f"ERROR: {result.message}")
|
|
|
|
|
|
|
|
|
if args.json:
|
|
|
print_results_json(results, unbounded=args.unbounded)
|
|
|
elif not args.quiet:
|
|
|
print(f"\n{'='*60}")
|
|
|
print("Summary")
|
|
|
print("=" * 60)
|
|
|
|
|
|
successful = [r for r in results if r.success]
|
|
|
failed = [r for r in results if not r.success]
|
|
|
|
|
|
print(f"Total: {len(results)}")
|
|
|
print(f"Successful: {len(successful)}")
|
|
|
print(f"Failed: {len(failed)}")
|
|
|
|
|
|
if successful:
|
|
|
if args.unbounded and all(hasattr(r, 'score_unbounded') for r in successful):
|
|
|
avg_score = sum(r.score_unbounded for r in successful) / len(successful)
|
|
|
print(f"Average Score (unbounded): {avg_score:.2f}")
|
|
|
else:
|
|
|
avg_score = sum(r.score for r in successful) / len(successful)
|
|
|
print(f"Average Score: {avg_score:.2f}")
|
|
|
|
|
|
if failed and args.verbose:
|
|
|
print("\nFailed problems:")
|
|
|
for r in failed:
|
|
|
print(f" {r.problem_id}: {r.message}")
|
|
|
|
|
|
|
|
|
return 0 if all(r.success for r in results) else 1
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
sys.exit(main())
|
|
|
|