beichen0426's picture
Duplicate from AI-MO/olympiads-ref
63c5bce verified
import re
import json
from tqdm import tqdm
from loguru import logger
from pathlib import Path
from typing import Tuple, List
from dataclasses import dataclass
project_root = Path(__file__).parent.parent.parent
@dataclass
class Problem:
match: re.Match
@dataclass
class Solution:
match: re.Match
def clean_text(text: str) -> str:
text = text.replace(
'For a discussion, see\nW. Morris and V. Soltan. The Erdős-Szekeres Problem on Points in Convex Postion-A Survey, Bulletin of the American Math Monthly. 37 (2000), 437-458.\n\nThis article is available at\nhttp://www.ams.org/bull/2000-37-04/S0273-0979-00-00877-6/home.html.\nIf $N(7)=33$, the highest sure score on this problem would be $32-6=26$. It is not known whether there exist arbitrarily large sets of points that will fool the graders.\n\n## The unexamined life is not worth living.',
''
)
text = text.replace(
'- Bishop: This piece can move any number of squares diagonally if there are no other pieces along its path.\n- Rook: This piece can move any number of squares either vertically or horizontally if there are no other pieces along its path\n- Knight: This piece can move either two squares along a row and one square along a column or two squares along a column and one square along a row.\n- King: This piece can move to any open adjacent square (including diagonally).',
''
)
return text
def find_problem_with_solution(
text: str,
problem_parttern: re.Pattern,
solution_pattern: re.Pattern
) -> int:
"""
Find the problem with solution start position in the text.
Args:
text (str): The text to search.
Returns:
int: The start position of the problem with solution.
"""
matchs = list(problem_parttern.finditer(text))
for index, match in enumerate(matchs):
section_end_position = matchs[index + 1].start() if index + 1 < len(matchs) else len(text)
if solution_pattern.search(text[match.start():section_end_position]):
return match.start()
return 0
def analyze(text: str) -> Tuple[List[Problem | Solution], int]:
"""
Analyze the text and return the tags and problem number.
Args:
text (str): The markdown text to analyze.
Returns:
Tuple[List[Problem | Solution], int]: A tuple containing the tags and problem number.
"""
problem_pattern = re.compile(r'(?:\n|\n\#+\s+)(?:(\d{1,2})\.\s+(?:problem\:\s*|\$?\[.+?\]\$?)?|problem\s+?(\w+)\s+\[\d+(?:\spoints)?\]|\$([H|M|T]_\{\d+\})\$\.)', re.IGNORECASE)
solution_pattern = re.compile(r'(?:\n|\n\#+\s+)(?:answer\:|solution(?:\s+\d+)?(?:\:|\.)|Proposed by:.*?\n)\s*', re.IGNORECASE)
start_position = find_problem_with_solution(text, problem_pattern, solution_pattern)
tags: List[Problem | Solution] = []
tags.extend([Problem(x) for x in problem_pattern.finditer(text, start_position)])
problem_num = len(tags)
tags.extend([Solution(x) for x in solution_pattern.finditer(text, start_position)])
tags.sort(key=lambda x: x.match.start())
return tags, problem_num
def segment(text: str, tags: List[Problem | Solution]) -> List[str]:
starts = []
ends = []
for i in range(len(tags)):
starts.append(tags[i].match.end())
if i + 1 < len(tags):
ends.append(tags[i + 1].match.start())
else:
ends.append(len(text))
return [text[start:end].strip() for start, end in zip(starts, ends)]
def join(tags: List[Problem | Solution], segments: List[str]) -> List[Tuple[str, str, str, str, str]]:
problem, solution = '', ''
problem_label, problem_match, solution_match = '', '', ''
pairs = []
for tag, segment in zip(tags, segments):
if isinstance(tag, Problem):
problem = segment
problem_match = tag.match.group(0)
problem_label = tag.match.group(1) or tag.match.group(2) or tag.match.group(3)
elif problem.strip() != "":
solution = segment
solution_match = tag.match.group(0)
if solution.strip() == "":
continue
pairs.append((problem, solution, problem_label, problem_match, solution_match))
return pairs
def write_pairs(output_file: Path, pairs):
year = re.search(r'(\d{4})', output_file.stem).group(1)
problem_type_mapping = {
"-alg-": "Algebra",
"-comb-": "Combinatorics",
"-geo-": "Geometry",
}
problem_type = None
for _k, _v in problem_type_mapping.items():
if _k in output_file.stem:
problem_type = _v
break
output_jsonl_text = ""
for problem, solution, problem_label, problem_match, solution_match in pairs:
output_jsonl_text += json.dumps(
{
'year': year,
'tier': "T4",
'problem_label': problem_label,
'problem_type': problem_type,
"exam": "HMMT",
'problem': problem,
'solution': solution,
'metadata': {
'resource_path': output_file.relative_to(project_root).as_posix(),
'problem_match': problem_match,
'solution_match': solution_match
}
},
ensure_ascii=False
) + '\n'
output_file.write_text(output_jsonl_text, encoding="utf-8")
def main():
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
total_problem_count = 0
total_solution_count = 0
for hmmt_md in tqdm(list(compet_md_path.glob('**/*.md')), desc='Segmenting'):
output_file = seg_output_path / hmmt_md.relative_to(compet_md_path).with_suffix('.jsonl')
output_file.parent.mkdir(parents=True, exist_ok=True)
text = '\n' + clean_text(hmmt_md.read_text(encoding="utf-8"))
tags, problem_num = analyze(text)
segments = segment(text, tags)
pairs = join(tags, segments)
if pairs and problem_num > 0:
write_pairs(output_file, pairs)
total_problem_count += problem_num
total_solution_count += len(pairs)
else:
logger.warning(f"No problem found in {hmmt_md}")
logger.info(f"Total problem count: {total_problem_count}")
logger.info(f"Total solution count: {total_solution_count}")
if __name__ == '__main__':
main()