idea-first-code-later-cp / hf_test_runner.py
samahadhoud's picture
Upload hf_test_runner.py with huggingface_hub
b63b35e verified
"""
Test runner for the Idea First, Code Later CP Benchmark dataset.
This module provides tools to load competitive programming problems from the
Hugging Face dataset and run solutions against their test cases.
Features:
- Automatic dataset loading from Hugging Face
- Support for C++ and Python solutions
- Special judge support for problems with multiple valid outputs
- Kattis integration for CS3233 problems
- Memory and time limit enforcement
Example:
>>> from hf_test_runner import TestRunner
>>> runner = TestRunner()
>>> results = runner.run_solution(
... problem_id="icpc-jakarta-2019_card-collecting",
... solution_code=open("solution.cpp").read(),
... language="cpp"
... )
>>> print(results["status"]) # "PASSED" or error type
Classes:
TestRunner: Main class for running solutions against problems.
Problem: Dataclass representing a problem from the dataset.
Functions:
run_solution: Convenience function for quick one-off solution testing.
parse_time_limit: Parse time limit strings like "2 sec" to float.
parse_memory_limit_to_bytes: Parse memory limit strings to bytes.
"""
import os
import re
import subprocess
import tempfile
import time
import signal
import resource
import traceback
from dataclasses import dataclass
from typing import Optional
try:
from datasets import load_dataset
except ImportError:
print("Please install datasets: pip install datasets")
raise
# Constants
PREEXEC_ERROR_EXIT_CODE = 2
HF_DATASET_ID = "samahadhoud/idea-first-code-later-cp-benchmark"
@dataclass
class Problem:
"""
Represents a competitive programming problem loaded from the HF dataset.
Attributes:
problem_id: Unique identifier in format "contest_slug" (e.g., "icpc-jakarta-2019_card-collecting")
problem_code: Problem letter in the contest (A, B, C, ...)
problem_slug: URL-friendly problem name
problem_title: Full human-readable problem title
contest_name: Contest identifier (e.g., "icpc-jakarta-2019")
contest_full_name: Full contest name (e.g., "ICPC Asia Jakarta Regional Contest 2019")
year: Competition year as string
source: Source URL or repository
time_limit: Time limit as string (e.g., "2 sec")
memory_limit: Memory limit as string (e.g., "256 MB")
statement: Problem statement in Markdown format
analysis: Editorial/solution analysis in Markdown
sample_test_cases_input: List of sample input strings (shown in problem statement)
sample_test_cases_output: List of corresponding sample outputs
hidden_test_cases_input: List of hidden input strings (for actual judging)
hidden_test_cases_output: List of corresponding hidden outputs
has_special_judge: True if problem accepts multiple valid outputs
special_judge_code: C++ scorer code using testlib for validation
special_judge_format: "standard" or "jakarta2017" (different CLI formats)
uses_kattis: True for CS3233 problems (must submit via Kattis)
kattis_problem_id: Kattis problem ID for submission
"""
problem_id: str
problem_code: str
problem_slug: str
problem_title: str
contest_name: str
contest_full_name: str
year: str
source: str
time_limit: str
memory_limit: str
statement: str
analysis: str
sample_test_cases_input: list
sample_test_cases_output: list
hidden_test_cases_input: list
hidden_test_cases_output: list
has_special_judge: bool
special_judge_code: str
special_judge_format: str
uses_kattis: bool
kattis_problem_id: str
def parse_time_limit(time_str: str) -> float:
"""
Parse a time limit string to seconds.
Args:
time_str: Time limit string (e.g., "2 sec", "1.5s", "3")
Returns:
Time limit in seconds as float. Returns 10.0 if parsing fails.
Examples:
>>> parse_time_limit("2 sec")
2.0
>>> parse_time_limit("1.5s")
1.5
>>> parse_time_limit("")
10.0
"""
if not time_str:
return 10.0 # default
match = re.search(r'([\d.]+)\s*(?:sec|s)?', time_str.lower())
if match:
return float(match.group(1))
return 10.0
def parse_memory_limit_to_bytes(value) -> Optional[int]:
"""
Parse a memory limit value to bytes.
Args:
value: Memory limit as int (MB), float (MB), or string (e.g., "256 MB", "1 GB")
Returns:
Memory limit in bytes, or None if unlimited/unspecified.
Raises:
ValueError: If the value cannot be parsed or is negative.
Examples:
>>> parse_memory_limit_to_bytes(256)
268435456
>>> parse_memory_limit_to_bytes("256 MB")
268435456
>>> parse_memory_limit_to_bytes("1 GB")
1073741824
>>> parse_memory_limit_to_bytes(None)
None
"""
if value is None:
return None
if isinstance(value, (int, float)):
if value < 0:
raise ValueError("memory limit must be non-negative")
return int(value * 1024 * 1024)
if isinstance(value, str):
s = value.strip().lower()
if s in ("none", "unlimited", "0", ""):
return None
m = re.match(r"^\s*([0-9]*\.?[0-9]+)\s*([kmgtp]?b?|)$", s)
if not m:
raise ValueError(f"cannot parse memory limit '{value}'")
num = float(m.group(1))
unit = m.group(2)
if unit in ("", "b"):
multiplier = 1024 * 1024
elif unit in ("k", "kb"):
multiplier = 1024
elif unit in ("m", "mb"):
multiplier = 1024 * 1024
elif unit in ("g", "gb"):
multiplier = 1024 * 1024 * 1024
else:
raise ValueError(f"unknown unit in memory limit '{value}'")
return int(num * multiplier)
raise ValueError("unsupported type for memory limit")
def _preexec_setrlimit_bytes(bytes_limit):
"""
Set memory limit in child process using setrlimit.
This function is designed to be used as a preexec_fn in subprocess.run().
It sets RLIMIT_AS (address space limit) to enforce memory limits on
the child process.
Args:
bytes_limit: Memory limit in bytes, or None for no limit.
Note:
If this function fails, it writes the traceback to stderr and
exits with PREEXEC_ERROR_EXIT_CODE (2) to signal the error.
"""
try:
if bytes_limit is None:
return
if not isinstance(bytes_limit, int) or bytes_limit < 0:
raise ValueError("bytes_limit must be non-negative int")
resource.setrlimit(resource.RLIMIT_AS, (bytes_limit, bytes_limit))
except Exception:
try:
tb = traceback.format_exc()
os.write(2, tb.encode("utf-8", errors="replace"))
except Exception:
pass
os._exit(PREEXEC_ERROR_EXIT_CODE)
class TestRunner:
"""
Test runner for the Idea First, Code Later CP Benchmark dataset.
This class loads problems from Hugging Face and provides methods to run
solutions against test cases with proper judging, time/memory limits,
and special judge support.
Attributes:
dataset: The loaded HuggingFace dataset
problems: Dict mapping problem_id to problem data
Example:
>>> runner = TestRunner()
>>> results = runner.run_solution(
... problem_id="icpc-jakarta-2019_card-collecting",
... solution_code=code,
... language="cpp"
... )
>>> if results["status"] == "PASSED":
... print("All tests passed!")
"""
def __init__(self, dataset_id: str = HF_DATASET_ID, cache_dir: Optional[str] = None):
"""
Initialize the test runner and load the dataset from Hugging Face.
Args:
dataset_id: HuggingFace dataset ID (default: samahadhoud/idea-first-code-later-cp)
cache_dir: Optional local cache directory for the dataset
Raises:
ImportError: If the datasets library is not installed
"""
print(f"Loading dataset from {dataset_id}...")
self.dataset = load_dataset(dataset_id, cache_dir=cache_dir)["train"]
self.problems = {row["problem_id"]: row for row in self.dataset}
print(f"Loaded {len(self.problems)} problems")
def get_problem(self, problem_id: str) -> Problem:
"""
Retrieve a problem by its ID.
Args:
problem_id: Unique problem identifier (e.g., "icpc-jakarta-2019_card-collecting")
Returns:
Problem object with all problem data.
Raises:
ValueError: If the problem_id is not found in the dataset.
Example:
>>> problem = runner.get_problem("icpc-jakarta-2019_card-collecting")
>>> print(problem.problem_title)
'Card Collecting'
"""
if problem_id not in self.problems:
raise ValueError(f"Problem {problem_id} not found. Available: {list(self.problems.keys())[:5]}...")
row = self.problems[problem_id]
return Problem(
problem_id=row["problem_id"],
problem_code=row["problem_code"],
problem_slug=row["problem_slug"],
problem_title=row["problem_title"],
contest_name=row["contest_name"],
contest_full_name=row["contest_full_name"],
year=row["year"],
source=row["source"],
time_limit=row["time_limit"],
memory_limit=row["memory_limit"],
statement=row["statement"],
analysis=row["analysis"],
sample_test_cases_input=row["sample_test_cases_input"],
sample_test_cases_output=row["sample_test_cases_output"],
hidden_test_cases_input=row["hidden_test_cases_input"],
hidden_test_cases_output=row["hidden_test_cases_output"],
has_special_judge=row["has_special_judge"],
special_judge_code=row["special_judge_code"],
special_judge_format=row["special_judge_format"],
uses_kattis=row["uses_kattis"],
kattis_problem_id=row["kattis_problem_id"],
)
def list_problems(self, contest: Optional[str] = None) -> list:
"""
List all available problem IDs, optionally filtered by contest.
Args:
contest: Optional substring to filter by (e.g., "jakarta-2019")
Returns:
List of problem ID strings.
Example:
>>> runner.list_problems()[:3]
['cs3233-2023_a', 'cs3233-2023_b', 'cs3233-2023_c']
>>> runner.list_problems("jakarta-2017")
['icpc-jakarta-2017_...']
"""
if contest:
return [pid for pid in self.problems if contest in pid]
return list(self.problems.keys())
def run_solution(
self,
problem_id: str,
solution_code: str,
language: str = "cpp",
run_hidden: bool = True,
solution_file: Optional[str] = None,
) -> dict:
"""
Run a solution against a problem's test cases.
Executes the solution code against sample tests first, then hidden tests
if sample tests pass. Handles compilation, time/memory limits, and
special judges automatically.
Args:
problem_id: The problem ID from the dataset
solution_code: The solution source code as a string
language: Programming language - "cpp" (default) or "Python"
run_hidden: Whether to run hidden test cases after samples pass
solution_file: Optional path to solution file (used for Kattis submission)
Returns:
dict with keys:
- status: "PASSED", "Wrong Answer", "Time Limit Exceeded",
"Memory Limit Exceeded", "Runtime Error", or "Compile Error"
- problem_id: The problem ID
- problem_title: Human-readable problem title
- test_cases: List of individual test results
- sample_summary: Dict with pass/fail counts for sample tests
- hidden_summary: Dict with pass/fail counts for hidden tests
Example:
>>> results = runner.run_solution(
... "icpc-jakarta-2019_card-collecting",
... open("solution.cpp").read(),
... language="cpp"
... )
>>> print(results["status"])
'PASSED'
"""
problem = self.get_problem(problem_id)
# Handle Kattis problems
if problem.uses_kattis:
return self._run_kattis(problem, solution_code, solution_file, language)
# Compile special judge if needed
scorer_executable = None
if problem.has_special_judge and problem.special_judge_code:
scorer_executable = self._compile_scorer(problem)
results = {
"problem_id": problem_id,
"problem_title": problem.problem_title,
"status": "PASSED",
"test_cases": [],
"sample_summary": {},
"hidden_summary": {},
}
time_limit = parse_time_limit(problem.time_limit)
memory_limit = problem.memory_limit
# Run sample tests
sample_passed = self._run_tests(
problem, solution_code, language,
problem.sample_test_cases_input,
problem.sample_test_cases_output,
"sample", results, time_limit, memory_limit,
scorer_executable
)
if not sample_passed:
return results
# Run hidden tests
if run_hidden and problem.hidden_test_cases_input:
self._run_tests(
problem, solution_code, language,
problem.hidden_test_cases_input,
problem.hidden_test_cases_output,
"hidden", results, time_limit, memory_limit,
scorer_executable
)
return results
def _run_kattis(
self,
problem: Problem,
solution_code: str,
solution_file: Optional[str],
language: str
) -> dict:
"""
Submit a solution to Kattis for CS3233 problems.
CS3233 problems are judged on Kattis and don't have local hidden test cases.
This method submits the solution via the Kattis CLI and parses the result.
Args:
problem: The Problem object
solution_code: Solution source code
solution_file: Optional path to existing solution file
language: "cpp" or "Python"
Returns:
dict with status, kattis_output, and execution time
Note:
Requires kattis-cli to be installed and configured:
https://github.com/Kattis/kattis-cli
"""
if not solution_file:
# Create temp file
ext = ".py" if language == "Python" else ".cpp"
with tempfile.NamedTemporaryFile(mode="w", suffix=ext, delete=False) as f:
f.write(solution_code)
solution_file = f.name
print(f"Submitting to Kattis: {problem.kattis_problem_id}")
try:
proc = subprocess.run(
["kattis", solution_file, "-p", problem.kattis_problem_id, "-f"],
capture_output=True, text=True, timeout=300
)
stdout = proc.stdout
stderr = proc.stderr
print(f"📤 Kattis output:\n{stdout}")
# Extract verdict
verdict_match = re.search(
r'(Accepted|Wrong Answer|Time Limit Exceeded|Run Time Error|Compile Error|Memory Limit Exceeded)',
stdout
)
time_match = re.search(r'\(([\d.]+)s\)', stdout)
raw_verdict = verdict_match.group(1) if verdict_match else "Unknown"
verdict_map = {
"Accepted": "PASSED",
"Wrong Answer": "Wrong Answer",
"Time Limit Exceeded": "Time Limit Exceeded",
"Run Time Error": "Runtime Error",
"Compile Error": "Compile Error",
"Memory Limit Exceeded": "Memory Limit Exceeded",
"Unknown": "Runtime Error"
}
return {
"problem_id": problem.problem_id,
"problem_title": problem.problem_title,
"status": verdict_map.get(raw_verdict, "Runtime Error"),
"time": float(time_match.group(1)) if time_match else None,
"kattis_output": stdout,
"kattis_stderr": stderr,
}
except Exception as e:
return {
"problem_id": problem.problem_id,
"status": "Submission Failed",
"error": str(e)
}
def _compile_scorer(self, problem: Problem) -> Optional[str]:
"""
Compile the special judge scorer for problems with multiple valid outputs.
Some problems accept multiple correct answers (e.g., "print any valid permutation").
These use a custom scorer (written in C++ using testlib) to validate outputs.
Args:
problem: The Problem object containing special_judge_code
Returns:
Path to the compiled scorer executable, or None if compilation fails.
Note:
The scorer is compiled with g++ -std=c++17. The executable is stored
in a temporary directory that persists for the duration of testing.
"""
# Create persistent temp directory for scorer (not auto-deleted)
tmp_dir = tempfile.mkdtemp(prefix="scorer_")
scorer_src = os.path.join(tmp_dir, "scorer.cpp")
scorer_exe = os.path.join(tmp_dir, "scorer")
with open(scorer_src, "w") as f:
f.write(problem.special_judge_code)
result = subprocess.run(
["g++", "-o", scorer_exe, scorer_src, "-std=c++17"],
capture_output=True, text=True
)
if result.returncode != 0:
print(f"⚠️ Scorer compilation failed: {result.stderr}")
return None
os.chmod(scorer_exe, 0o755)
return scorer_exe
def _run_tests(
self,
problem: Problem,
solution_code: str,
language: str,
inputs: list,
outputs: list,
test_type: str,
results: dict,
time_limit: float,
memory_limit: str,
scorer_executable: Optional[str],
) -> bool:
"""
Run a set of test cases against a solution.
Executes the solution for each input, compares output with expected,
and handles time/memory limits and special judging.
Args:
problem: The Problem object
solution_code: Source code to execute
language: "cpp" or "Python"
inputs: List of input strings
outputs: List of expected output strings
test_type: "sample" or "hidden" (for labeling results)
results: Dict to append test results to (modified in place)
time_limit: Time limit in seconds
memory_limit: Memory limit as string (e.g., "256 MB")
scorer_executable: Path to compiled scorer, or None for exact match
Returns:
True if all tests passed, False if any test failed.
Note:
Stops on first failure and updates results["status"] accordingly.
"""
tally = {"PASSED": 0, "TLE": 0, "MLE": 0, "Wrong Answer": 0, "RTE": 0, "CE": 0}
# Parse memory limit
try:
bytes_limit = parse_memory_limit_to_bytes(memory_limit)
except ValueError:
bytes_limit = None
# Compile C++ if needed
executable = None
if language == "cpp":
executable = self._compile_cpp(solution_code, problem.problem_slug)
if executable is None:
results["status"] = "Compile Error"
tally["CE"] += 1
results[f"{test_type}_summary"] = tally
return False
else: # Python - check syntax once before running tests
try:
compile(solution_code, "<solution>", "exec")
except SyntaxError as se:
results["status"] = "Compile Error"
results["test_cases"].append({
"test": f"{test_type}_1",
"result": "Compile Error",
"error": str(se)
})
tally["CE"] += 1
results[f"{test_type}_summary"] = tally
return False
# Calculate timeout (time_limit + 5s buffer, capped at 60s)
timeout = min(60, time_limit + 5)
for i, (input_data, expected_output) in enumerate(zip(inputs, outputs)):
test_name = f"{test_type}_{i+1}"
try:
# Run the solution
start_time = time.time()
if language == "cpp":
proc = subprocess.run(
[executable],
input=input_data, text=True, capture_output=True,
timeout=timeout,
preexec_fn=lambda bl=bytes_limit: _preexec_setrlimit_bytes(bl)
)
else: # Python
proc = subprocess.run(
["python3", "-c", solution_code],
input=input_data, text=True, capture_output=True,
timeout=timeout,
preexec_fn=lambda bl=bytes_limit: _preexec_setrlimit_bytes(bl)
)
execution_time = time.time() - start_time
generated_output = (proc.stdout or "").strip()
# Check for errors
if proc.returncode == PREEXEC_ERROR_EXIT_CODE:
raise RuntimeError("preexec_fn failed")
if proc.returncode is not None and proc.returncode < 0:
sig = -proc.returncode
if sig == signal.SIGKILL:
raise MemoryError("Killed by SIGKILL (likely OOM)")
if proc.returncode == 137:
raise MemoryError("Exit code 137 (OOM)")
if proc.returncode != 0:
raise RuntimeError(f"Non-zero exit: {proc.returncode}, stderr: {proc.stderr}")
# Judge output
is_correct = self._judge_output(
problem, input_data, expected_output, generated_output,
scorer_executable
)
if is_correct:
results["test_cases"].append({
"test": test_name,
"result": "PASSED",
"time": execution_time,
})
tally["PASSED"] += 1
print(f"✅ {test_name} Passed ({execution_time:.2f}s)")
else:
results["status"] = "Wrong Answer"
results["test_cases"].append({
"test": test_name,
"result": "Wrong Answer",
"time": execution_time,
"expected": expected_output[:200] + "..." if len(expected_output) > 200 else expected_output,
"generated": generated_output[:200] + "..." if len(generated_output) > 200 else generated_output,
})
tally["Wrong Answer"] += 1
print(f"❌ {test_name} Wrong Answer")
results[f"{test_type}_summary"] = tally
return False
except subprocess.TimeoutExpired:
results["status"] = "Time Limit Exceeded"
results["test_cases"].append({"test": test_name, "result": "TLE"})
tally["TLE"] += 1
print(f"⏳ {test_name} TLE")
results[f"{test_type}_summary"] = tally
return False
except MemoryError as e:
results["status"] = "Memory Limit Exceeded"
results["test_cases"].append({"test": test_name, "result": "MLE", "error": str(e)})
tally["MLE"] += 1
print(f"💾 {test_name} MLE")
results[f"{test_type}_summary"] = tally
return False
except Exception as e:
results["status"] = "Runtime Error"
results["test_cases"].append({"test": test_name, "result": "RTE", "error": str(e)})
tally["RTE"] += 1
print(f"❌ {test_name} Runtime Error: {e}")
results[f"{test_type}_summary"] = tally
return False
results[f"{test_type}_summary"] = tally
return True
def _compile_cpp(self, solution_code: str, name: str) -> Optional[str]:
"""
Compile a C++ solution to an executable.
Args:
solution_code: C++ source code as a string
name: Base name for the source file and executable
Returns:
Path to the compiled executable, or None if compilation fails.
Note:
Compiles with: g++ -o <exe> <src> -std=c++17 -O2
Compilation errors are printed to stdout.
"""
tmp_dir = tempfile.mkdtemp()
cpp_file = os.path.join(tmp_dir, f"{name}.cpp")
executable = os.path.join(tmp_dir, f"{name}.out")
with open(cpp_file, "w") as f:
f.write(solution_code)
result = subprocess.run(
["g++", "-o", executable, cpp_file, "-std=c++17", "-O2"],
capture_output=True, text=True
)
if result.returncode != 0:
print(f"❌ Compile Error:\n{result.stderr}")
return None
return executable
def _judge_output(
self,
problem: Problem,
input_data: str,
expected_output: str,
generated_output: str,
scorer_executable: Optional[str],
) -> bool:
"""
Judge whether the generated output is correct.
First attempts exact string match (after stripping whitespace).
If that fails and a special judge is available, uses the scorer.
Args:
problem: The Problem object
input_data: The input that was given to the solution
expected_output: The expected output from test case
generated_output: The output produced by the solution
scorer_executable: Path to compiled scorer, or None
Returns:
True if the output is correct, False otherwise.
Note:
Special judge formats:
- "standard": scorer <input> <judge_output> <contestant_output>
- "jakarta2017": scorer <input> <unused> <judge_output> < contestant_output
"""
expected_output = expected_output.strip()
generated_output = generated_output.strip()
# Exact match
if generated_output == expected_output:
return True
# Use special judge if available
if scorer_executable and problem.has_special_judge:
with tempfile.TemporaryDirectory() as tmp_dir:
input_file = os.path.join(tmp_dir, "input.txt")
expected_file = os.path.join(tmp_dir, "expected.txt")
generated_file = os.path.join(tmp_dir, "generated.txt")
with open(input_file, "w") as f:
f.write(input_data)
with open(expected_file, "w") as f:
f.write(expected_output)
with open(generated_file, "w") as f:
f.write(generated_output)
if problem.special_judge_format == "jakarta2017":
# Format: scorer <input> <unused> <judge_output> < contestant_output
proc = subprocess.run(
[scorer_executable, input_file, "dummy", expected_file],
input=generated_output, text=True, capture_output=True
)
# Returns nothing for AC, "WA" for wrong
return "WA" not in proc.stdout and proc.returncode == 0
else:
# Standard format: scorer <input> <judge_output> <contestant_output>
proc = subprocess.run(
[scorer_executable, input_file, expected_file, generated_file],
capture_output=True, text=True
)
# Check for AC in output or successful return code with no WA
output = proc.stdout.strip().upper()
return "AC" in output or (proc.returncode == 0 and "WA" not in output)
return False
# Convenience function
def run_solution(problem_id: str, solution_code: str, language: str = "cpp") -> dict:
"""
Convenience function to quickly run a solution against a problem.
Creates a TestRunner instance, runs the solution, and returns results.
For running multiple solutions, create a TestRunner instance directly
to avoid reloading the dataset each time.
Args:
problem_id: The problem ID from the dataset
solution_code: Solution source code as a string
language: "cpp" (default) or "Python"
Returns:
dict with status and test results (see TestRunner.run_solution)
Example:
>>> from hf_test_runner import run_solution
>>> results = run_solution("icpc-jakarta-2019_card-collecting", code)
>>> print(results["status"])
"""
runner = TestRunner()
return runner.run_solution(problem_id, solution_code, language)
if __name__ == "__main__":
# Example usage
runner = TestRunner()
# List some problems
print("\nAvailable problems (first 10):")
for pid in runner.list_problems()[:10]:
p = runner.get_problem(pid)
print(f" {pid}: {p.problem_title}")
print(f" Sample tests: {len(p.sample_test_cases_input)}, Hidden: {len(p.hidden_test_cases_input)}")
print(f" Special judge: {p.has_special_judge}, Kattis: {p.uses_kattis}")