olympiads-ref / USA_TST /segment_script /segment_2025.py
beichen0426's picture
Duplicate from AI-MO/olympiads-ref
63c5bce verified
# -----------------------------------------------------------------------------
# Author: Jiawei Liu
# Date: 2025-10-29
# -----------------------------------------------------------------------------
import json
import re
from pathlib import Path
from rapidfuzz import fuzz
def clean_text(text: str):
text = re.sub(
r"^# Team Selection Test for.*?publicly until they are posted by staff online\.",
"",
text,
flags=re.DOTALL | re.MULTILINE | re.IGNORECASE,
)
return text
def extract_problems(markdown_text: str):
problems = {}
# Each problem starts with a number followed by a dot and space (e.g., 'Problem 1. ')
matchs = list(
re.finditer(
r"^Problem\s*(\d+)\.\s+", markdown_text, flags=re.DOTALL | re.MULTILINE
)
)
# Split the text into parts using the problem numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
problem = markdown_text[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(markdown_text)
]
problems[problem_label] = (problem.strip(), m.group())
print(f" - Extracted {len(problems)} problems.")
return problems
def extract_solutions(markdown_text):
solutions = {}
# 1. Find the block of text containing the solutions
# Block starts with '## \(S 1\) Solutions to Day 1' and end of document
solutions_section = re.search(
r"^##.*?Solutions to Day 1\s*$(.*)",
markdown_text,
re.DOTALL | re.MULTILINE,
)
if not solutions_section:
print(" - Not found Solutions section.")
return solutions
solutions_block = solutions_section.group(1)
## \(\S 1.1\) Solution to TST 1, by Anthony Wang
# 2. Split the block into individual solutions
matchs = list(
re.finditer(r"^##.*?Solution.*?TST\s*(\d+).*?\n$", solutions_block, flags=re.MULTILINE)
)
# Split the text into parts using the solution numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
solution = solutions_block[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(solutions_block)
]
solutions[problem_label] = (solution.strip(), m.group())
print(f" - Extracted {len(solutions)} solutions.")
return solutions
def join(problems: dict, solutions: dict):
pairs = []
for problem_label, (problem, p_match) in problems.items():
solution, s_match = solutions.get(problem_label)
# Clean solution by removing the part that overlaps with the problem statement
problem_align = fuzz.partial_ratio_alignment(solution, problem)
solution = solution.replace(
solution[problem_align.src_start : problem_align.src_end], ""
)
solution = re.sub(
r"## \\\(\\S 2\\\) Solutions to Day 2", "", solution, flags=re.IGNORECASE
).strip()
if not solution:
print(f" - Warning: No solution found for problem {problem_label}.")
pairs.append((problem, solution, problem_label, p_match, s_match))
return pairs
def write_pairs(output_file: Path, pairs: list, year: str, project_root: Path):
output_jsonl_text = ""
for problem, solution, problem_label, p_match, s_match in pairs:
output_jsonl_text += (
json.dumps(
{
"year": year,
"tier": "T0",
"problem_label": problem_label,
"problem_type": None,
"exam": "IMO",
"problem": problem,
"solution": solution,
"metadata": {
"resource_path": output_file.relative_to(
project_root
).as_posix(),
"problem_match": p_match,
"solution_match": s_match,
},
},
ensure_ascii=False,
)
+ "\n"
)
output_file.write_text(output_jsonl_text, encoding="utf-8")
if __name__ == "__main__":
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
project_root = compet_base_path.parent
num_problems = 0
num_solutions = 0
problems_md = list(compet_md_path.glob("**/*probs*2025.md"))[0]
solutions_md = list(compet_md_path.glob("**/*sols*2025.md"))[0]
output_file = seg_output_path / solutions_md.relative_to(
compet_md_path
).with_suffix(".jsonl")
output_file.parent.mkdir(parents=True, exist_ok=True)
# Read the markdown file
problems_md_text = "\n" + clean_text(problems_md.read_text(encoding="utf-8"))
solutions_md_text = "\n" + clean_text(solutions_md.read_text(encoding="utf-8"))
problems = extract_problems(problems_md_text)
solutions = extract_solutions(solutions_md_text)
num_problems += len(problems)
num_solutions += len(solutions)
pairs = join(problems, solutions)
year = re.search(r"\d{4}", output_file.stem).group()
write_pairs(output_file, pairs, year, project_root)
print()
print(f"Total problems extracted: {num_problems}")
print(f"Total solutions extracted: {num_solutions}")