olympiads-ref / USAMO /segment_script /segment_1972-1995.py
LxYxvv's picture
add USAMO 1972-1995
3b3a043
# -----------------------------------------------------------------------------
# Author: Jiawei Liu
# Date: 2025-10-29
# -----------------------------------------------------------------------------
import json
import re
from pathlib import Path
from typing import List, Tuple
problem_tag = "Problem"
solution_tag = "Solution"
# answer_tag = "Answer"
def clean_text(text: str):
text = re.sub(r"Problem A1", "Problem 1", text)
text = re.sub(r"Problem A2", "Problem 2", text)
text = re.sub(r"Problem A3", "Problem 3", text)
text = re.sub(r"Problem B1", "Problem 4", text)
text = re.sub(r"Problem B2", "Problem 5", text)
text = re.sub(r"Problem B3", "Problem 6", text)
return text
def segment_exams(text: str):
matchs = list(
## 4th USAMO 1975
re.finditer(
r"^#+\s*.+USAMO\s*(\d{4})",
text,
flags=re.IGNORECASE | re.MULTILINE,
)
)
exams = {}
for i, m in enumerate(matchs):
if int(m.group(1)) >= 1996:
continue
year = m.group(1)
exam_text = text[
m.end() : matchs[i + 1].start() if i + 1 < len(matchs) else len(text)
]
exams[year] = exam_text.strip()
return exams
def analyze(text: str) -> Tuple[List, int]:
"""
Analyze the text and return the tags and problem number.
Args:
text (str): The markdown text to analyze.
Returns:
Tuple[List, int]: A tuple containing the tags and problem number.
"""
problem_pattern = re.compile(r"(?:\n|# )Problem\s+(\d+)", re.IGNORECASE)
solution_pattern = re.compile(r"(?:\n|# )Solution", re.IGNORECASE)
# answer_pattern = re.compile(r"(?:\n|# )Answer", re.IGNORECASE)
tags = []
tags.extend([(x, problem_tag) for x in problem_pattern.finditer(text)])
problem_num = len(tags)
tags.extend([(x, solution_tag) for x in solution_pattern.finditer(text)])
# tags.extend([(x, answer_tag) for x in answer_pattern.finditer(text)])
tags.sort(key=lambda x: x[0].start())
return tags, problem_num
def segment(text: str, tags):
starts = []
ends = []
for i, (m, tag) in enumerate(tags):
starts.append(tags[i][0].end())
if i + 1 < len(tags):
ends.append(tags[i + 1][0].start())
else:
ends.append(len(text))
return [
text[start:end].strip().strip("#").strip() for start, end in zip(starts, ends)
]
def join(tags, segments):
problem, solution = "", ""
problem_label, problem_match, solution_match = "", "", ""
pairs = []
tag_classes = [_[1] for _ in tags]
for (m, tag), (i, segment) in zip(tags, enumerate(segments)):
if tag == problem_tag:
problem = segment
problem_match = m.group(0)
problem_label = m.group(1)
# Check if there is no solution following this problem
next_problem_index = 0
try:
if problem_tag in tag_classes[i + 1 :]:
next_problem_index = tag_classes.index(problem_tag, i + 1)
else:
next_problem_index = len(segments)
except ValueError:
next_problem_index = len(segments)
if tag_classes[i + 1 : next_problem_index].count(solution_tag) == 0:
solution = ""
solution_match = ""
pairs.append(
(problem, solution, problem_label, problem_match, solution_match)
)
else:
solution = segment
solution_match = m.group(0)
pairs.append(
(problem, solution, problem_label, problem_match, solution_match)
)
return pairs
def write_pairs(project_root: Path, output_file: Path, pairs):
output_jsonl_text = ""
for year, problems in pairs:
for (
problem,
solution,
problem_label,
problem_match,
solution_match,
) in problems:
output_jsonl_text += (
json.dumps(
{
"year": year,
"tier": "T1",
"problem_label": problem_label,
"problem_type": None,
"exam": "USAMO",
"problem": problem,
"solution": solution,
"metadata": {
"resource_path": output_file.relative_to(
project_root
).as_posix(),
"problem_match": problem_match,
"solution_match": solution_match,
},
},
ensure_ascii=False,
)
+ "\n"
)
output_file.write_text(output_jsonl_text, encoding="utf-8")
if __name__ == "__main__":
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
project_root = compet_base_path.parent
for md_file in list(compet_md_path.glob("**/en-USAMO-1972-2003.md")):
output_file = seg_output_path / md_file.relative_to(compet_md_path).with_suffix(
".jsonl"
)
output_file.parent.mkdir(parents=True, exist_ok=True)
# Read the markdown file
markdown_text = md_file.read_text(encoding="utf-8")
markdown_text = clean_text(markdown_text)
# [(year, [(problem, solution, problem_label, problem_match, solution_match), ...]), ...]
pairs = []
exams = segment_exams(markdown_text)
for year, exam_text in exams.items():
tags, problem_num = analyze(exam_text)
segments = segment(exam_text, tags)
inner_pairs = join(tags, segments)
pairs.append((year, inner_pairs))
write_pairs(project_root, output_file, pairs)