LxYxvv's picture
Add data for Canadian Mathematical Olympiad (CANADA MO) (#3)
8e5709f verified
raw
history blame
5.24 kB
import re
import json
from tqdm import tqdm
from loguru import logger
from pathlib import Path
from typing import Tuple, List
project_root = Path(__file__).parent.parent.parent
problem_tag = 'Problem'
solution_tag = 'Solution'
def clean_text(text: str) -> str:
text = text.replace("\n\n## Canadian Mathematical Olympiad 2021", "")
text = text.replace(
"1. no colour is assigned to two regions that share an edge;\n2. for each $i",
"1). no colour is assigned to two regions that share an edge;\n2). for each $i"
)
return text
def find_problem_with_solution(
text: str,
problem_parttern: re.Pattern,
solution_pattern: re.Pattern
) -> int:
"""
Find the problem with solution start position in the text.
Args:
text (str): The text to search.
Returns:
int: The start position of the problem with solution.
"""
matchs = list(problem_parttern.finditer(text))
for index, match in enumerate(matchs):
section_end_position = matchs[index + 1].start() if index + 1 < len(matchs) else len(text)
if solution_pattern.search(text[match.start():section_end_position]):
return match.start()
def analyze(text: str) -> Tuple[List, int]:
"""
Analyze the text and return the tags and problem number.
Args:
text (str): The markdown text to analyze.
Returns:
Tuple[List, int]: A tuple containing the tags and problem number.
"""
problem_pattern = re.compile(r'(?:\n|\n#+ )(?:Problem\s*(\d+)|Problem No\.\s*(\d+)|P(\d+)|(\d+))(?:\.|\:)', re.IGNORECASE)
solution_pattern = re.compile(r'(?:\n|\n#+ )(?:Solution|First Solution|Second Solution|Alternate Solution)\s*\d*(?:\.|\:)?', re.IGNORECASE)
start_position = find_problem_with_solution(text, problem_pattern, solution_pattern) or 0
tags = []
tags.extend([(x, problem_tag) for x in problem_pattern.finditer(text, start_position)])
problem_num = len(tags)
tags.extend([(x, solution_tag) for x in solution_pattern.finditer(text, start_position)])
tags.sort(key=lambda x: x[0].start())
return tags, problem_num
def segment(text: str, tags):
starts = []
ends = []
for i in range(len(tags)):
starts.append(tags[i][0].end())
if i + 1 < len(tags):
ends.append(tags[i + 1][0].start())
else:
ends.append(len(text))
return [text[start:end].strip().strip('#').strip() for start, end in zip(starts, ends)]
def join(tags, segments):
problem, solution = '', ''
problem_label, problem_match, solution_match = '', '', ''
pairs = []
for tag, segment in zip(tags, segments):
if tag[1] == problem_tag:
problem = segment
problem_match = tag[0].group(0)
problem_label = tag[0].group(1) or tag[0].group(2) or tag[0].group(3) or tag[0].group(4)
else:
solution = segment
solution_match = tag[0].group(0)
pairs.append((problem, solution, problem_label, problem_match, solution_match))
return pairs
def write_pairs(output_file: Path, pairs):
year = re.search(r'(\d{4})', output_file.stem).group(1)
output_jsonl_text = ""
for problem, solution, problem_label, problem_match, solution_match in pairs:
output_jsonl_text += json.dumps(
{
'year': year,
'tier': "T2",
'problem_label': problem_label,
'problem_type': None,
'problem': problem,
'solution': solution,
'metadata': {
'resource_path': output_file.relative_to(project_root).as_posix(),
'problem_match': problem_match,
'solution_match': solution_match
}
},
ensure_ascii=False
) + '\n'
output_file.write_text(output_jsonl_text, encoding="utf-8")
def main():
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
total_problem_count = 0
total_solution_count = 0
for cmo_md in tqdm(list(compet_md_path.glob('**/*.md')), desc='Segmenting'):
year = re.search(r'(\d{4})', cmo_md.stem).group(1)
# Only process files from 2002 to 2024
if int(year) not in list(range(2002, 2007)) + list(range(2008, 2025)):
continue
output_file = seg_output_path / cmo_md.relative_to(compet_md_path).with_suffix('.jsonl')
output_file.parent.mkdir(parents=True, exist_ok=True)
text = '\n' + clean_text(cmo_md.read_text(encoding="utf-8"))
tags, problem_num = analyze(text)
segments = segment(text, tags)
pairs = join(tags, segments)
if pairs and problem_num > 0:
write_pairs(output_file, pairs)
total_problem_count += problem_num
total_solution_count += len(pairs)
else:
logger.warning(f"No problem found in {cmo_md}")
logger.info(f"Total problem count: {total_problem_count}")
logger.info(f"Total solution count: {total_solution_count}")
if __name__ == '__main__':
main()