File size: 11,897 Bytes
5fed0fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
"""
Unified evaluation API for Frontier-CS.
Provides a single interface for evaluating both algorithmic and research problems,
with support for different backends (local Docker, SkyPilot cloud).
"""
from pathlib import Path
from typing import Iterator, List, Literal, Optional, Union
from .runner import EvaluationResult, DockerRunner, AlgorithmicRunner
from .runner.base import Runner
TrackType = Literal["algorithmic", "research"]
BackendType = Literal["docker", "skypilot"]
class FrontierCSEvaluator:
"""
Unified evaluator for Frontier-CS problems.
Example usage:
evaluator = FrontierCSEvaluator()
# Algorithmic problem
result = evaluator.evaluate("algorithmic", problem_id=1, code=cpp_code)
# Research problem (local Docker)
result = evaluator.evaluate("research", problem_id="flash_attn", code=py_code)
# Research problem (SkyPilot)
result = evaluator.evaluate("research", problem_id="flash_attn", code=py_code,
backend="skypilot")
# Batch evaluation
results = evaluator.evaluate_batch("research",
problem_ids=["flash_attn", "cross_entropy"],
code=py_code)
"""
def __init__(
self,
backend: BackendType = "docker",
base_dir: Optional[Path] = None,
judge_url: str = "http://localhost:8081",
cloud: str = "gcp",
region: Optional[str] = None,
):
"""
Initialize FrontierCSEvaluator.
Args:
backend: Default backend for research problems ("docker" or "skypilot")
base_dir: Base directory of Frontier-CS repo (auto-detected if None)
judge_url: URL of the algorithmic judge server
cloud: Cloud provider for SkyPilot ("gcp", "aws", "azure")
region: Cloud region for SkyPilot
"""
self.default_backend = backend
self.base_dir = base_dir
self.judge_url = judge_url
self.cloud = cloud
self.region = region
# Lazy-initialized runners
self._algorithmic_runner: Optional[AlgorithmicRunner] = None
self._docker_runner: Optional[DockerRunner] = None
self._skypilot_runner: Optional[Runner] = None
@property
def algorithmic_runner(self) -> AlgorithmicRunner:
"""Get or create the algorithmic runner."""
if self._algorithmic_runner is None:
self._algorithmic_runner = AlgorithmicRunner(judge_url=self.judge_url)
return self._algorithmic_runner
@property
def docker_runner(self) -> DockerRunner:
"""Get or create the Docker runner."""
if self._docker_runner is None:
self._docker_runner = DockerRunner(base_dir=self.base_dir)
return self._docker_runner
@property
def skypilot_runner(self) -> Runner:
"""Get or create the SkyPilot runner."""
if self._skypilot_runner is None:
from .runner.skypilot import SkyPilotRunner
self._skypilot_runner = SkyPilotRunner(
base_dir=self.base_dir,
cloud=self.cloud,
region=self.region,
)
return self._skypilot_runner
def _get_runner(self, track: TrackType, backend: Optional[BackendType] = None) -> Runner:
"""Get the appropriate runner for a track and backend."""
if track == "algorithmic":
return self.algorithmic_runner
effective_backend = backend or self.default_backend
if effective_backend == "skypilot":
return self.skypilot_runner
return self.docker_runner
def evaluate(
self,
track: TrackType,
problem_id: Union[str, int],
code: str,
*,
backend: Optional[BackendType] = None,
timeout: Optional[int] = None,
unbounded: bool = False,
) -> EvaluationResult:
"""
Evaluate a solution for a single problem.
Args:
track: Problem track ("algorithmic" or "research")
problem_id: Problem identifier (int for algorithmic, str for research)
code: Solution code (C++ for algorithmic, Python for research)
backend: Backend to use ("docker" or "skypilot"), defaults to init value
timeout: Optional timeout in seconds
unbounded: For algorithmic problems, use unbounded score (no clipping)
Returns:
EvaluationResult with score and status
"""
runner = self._get_runner(track, backend)
# Pass unbounded to runner if it's algorithmic
if track == "algorithmic" and hasattr(runner, 'evaluate'):
return runner.evaluate(str(problem_id), code, timeout=timeout, unbounded=unbounded)
return runner.evaluate(str(problem_id), code, timeout=timeout)
def evaluate_file(
self,
track: TrackType,
problem_id: Union[str, int],
solution_path: Path,
*,
backend: Optional[BackendType] = None,
timeout: Optional[int] = None,
) -> EvaluationResult:
"""
Evaluate a solution file for a single problem.
Args:
track: Problem track
problem_id: Problem identifier
solution_path: Path to solution file
backend: Backend to use
timeout: Optional timeout in seconds
Returns:
EvaluationResult with score and status
"""
runner = self._get_runner(track, backend)
return runner.evaluate_file(str(problem_id), solution_path, timeout=timeout)
def evaluate_batch(
self,
track: TrackType,
problem_ids: List[Union[str, int]],
code: str,
*,
backend: Optional[BackendType] = None,
timeout: Optional[int] = None,
) -> List[EvaluationResult]:
"""
Evaluate a solution against multiple problems.
Args:
track: Problem track
problem_ids: List of problem identifiers
code: Solution code (same code for all problems)
backend: Backend to use
timeout: Optional timeout per problem
Returns:
List of EvaluationResult, one per problem
"""
runner = self._get_runner(track, backend)
results = []
for pid in problem_ids:
result = runner.evaluate(str(pid), code, timeout=timeout)
results.append(result)
return results
def evaluate_batch_iter(
self,
track: TrackType,
problem_ids: List[Union[str, int]],
code: str,
*,
backend: Optional[BackendType] = None,
timeout: Optional[int] = None,
) -> Iterator[EvaluationResult]:
"""
Evaluate a solution against multiple problems, yielding results as they complete.
Args:
track: Problem track
problem_ids: List of problem identifiers
code: Solution code
backend: Backend to use
timeout: Optional timeout per problem
Yields:
EvaluationResult for each problem as it completes
"""
runner = self._get_runner(track, backend)
for pid in problem_ids:
yield runner.evaluate(str(pid), code, timeout=timeout)
def list_problems(self, track: TrackType) -> List[str]:
"""
List all available problems for a track.
Args:
track: Problem track
Returns:
List of problem identifiers
"""
if track == "algorithmic":
# Read from local ./algorithmic/problems directory
try:
alg_base = self.docker_runner.base_dir / "algorithmic" / "problems"
except Exception:
return []
if not alg_base or not alg_base.exists():
return []
problems = []
for item in alg_base.iterdir():
if item.is_dir() and not item.name.startswith("."):
problems.append(item.name)
# Sort numerically if possible
def sort_key(name):
try:
return (0, int(name))
except ValueError:
return (1, name)
return sorted(problems, key=sort_key)
# Research problems - count by evaluator.py files (matches update_problem_count.py logic)
research_problems_dir = self.docker_runner.research_dir / "problems"
if not research_problems_dir.exists():
return []
problems = []
# Special case: poc_generation has 4 subcategories
poc_dir = research_problems_dir / "poc_generation"
if poc_dir.exists():
# List the 4 subcategories directly
problems.extend([
"research/poc_generation/heap_buffer_overflow",
"research/poc_generation/heap_use_after_free",
"research/poc_generation/stack_buffer_overflow",
"research/poc_generation/uninitialized_value"
])
# Find all evaluator.py files, excluding those in poc_generation
for evaluator_file in research_problems_dir.rglob("evaluator.py"):
# Skip if it's under poc_generation directory
if "poc_generation" not in str(evaluator_file):
# Get relative path from research_problems_dir
problem_path = evaluator_file.parent.relative_to(research_problems_dir)
problems.append("research/" + str(problem_path))
# Also include local algorithmic problems (from ./algorithmic/problems)
try:
alg_base = self.docker_runner.base_dir / "algorithmic" / "problems"
except Exception:
alg_base = None
if alg_base and alg_base.exists():
for item in sorted(alg_base.iterdir(), key=lambda p: p.name):
if item.is_dir() and not item.name.startswith("."):
problems.append(f"algorithmic/{item.name}")
return sorted(problems)
def get_problem_statement(
self,
track: TrackType,
problem_id: Union[str, int],
) -> Optional[str]:
"""
Get the problem statement/readme for a problem.
Args:
track: Problem track
problem_id: Problem identifier
Returns:
Problem statement text, or None if not found
"""
if track == "algorithmic":
return self.algorithmic_runner.get_problem_statement(str(problem_id))
# Research problem - read readme
problem_path = self.docker_runner.get_problem_path(str(problem_id))
readme = problem_path / "readme"
if readme.exists():
return readme.read_text(encoding="utf-8")
return None
# Convenience function for quick evaluation
def evaluate(
track: TrackType,
problem_id: Union[str, int],
code: str,
*,
backend: BackendType = "docker",
timeout: Optional[int] = None,
) -> EvaluationResult:
"""
Quick evaluation function.
Example:
from frontier_cs import evaluate
result = evaluate("research", "flash_attn", solution_code)
print(f"Score: {result.score}")
"""
evaluator = FrontierCSEvaluator(backend=backend)
return evaluator.evaluate(track, problem_id, code, timeout=timeout)
|