Add data for IberoAmerican MO
#18
by
LxYxvv
- opened
IberoAmerican_MO/download_script/download.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -----------------------------------------------------------------------------
|
| 2 |
+
# Author: Jiawei Liu
|
| 3 |
+
# Date: 2024-12-18
|
| 4 |
+
# -----------------------------------------------------------------------------
|
| 5 |
+
'''
|
| 6 |
+
Download script for IberoAmerican MO
|
| 7 |
+
To run:
|
| 8 |
+
`python IberoAmerican_MO/download_script/download.py`
|
| 9 |
+
'''
|
| 10 |
+
|
| 11 |
+
import requests
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from requests.adapters import HTTPAdapter
|
| 15 |
+
from urllib3.util.retry import Retry
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def build_session(
|
| 19 |
+
max_retries: int = 3,
|
| 20 |
+
backoff_factor: int = 2,
|
| 21 |
+
session: requests.Session = None
|
| 22 |
+
) -> requests.Session:
|
| 23 |
+
"""
|
| 24 |
+
Build a requests session with retries
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
max_retries (int, optional): Number of retries. Defaults to 3.
|
| 28 |
+
backoff_factor (int, optional): Backoff factor. Defaults to 2.
|
| 29 |
+
session (requests.Session, optional): Session object. Defaults to None.
|
| 30 |
+
"""
|
| 31 |
+
session = session or requests.Session()
|
| 32 |
+
adapter = HTTPAdapter(max_retries=Retry(total=max_retries, backoff_factor=backoff_factor))
|
| 33 |
+
session.mount("http://", adapter)
|
| 34 |
+
session.mount("https://", adapter)
|
| 35 |
+
session.headers.update({
|
| 36 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
|
| 37 |
+
})
|
| 38 |
+
|
| 39 |
+
return session
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
output_dir = Path(__file__).parent.parent / "raw"
|
| 44 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
urls = {
|
| 47 |
+
"en-1985-2003-IberoamericanMO": "https://drive.google.com/uc?id=0B2qYu535vGeQVWtnS3hIVk9uODQ&export=download",
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
for title, url in tqdm(urls.items(), desc="Downloading "):
|
| 51 |
+
response = requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
|
| 52 |
+
output_path = output_dir / f"{title}.pdf"
|
| 53 |
+
output_path.write_bytes(response.content)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
if __name__ == "__main__":
|
| 57 |
+
main()
|
IberoAmerican_MO/md/en-1985-2003-IberoamericanMO.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
IberoAmerican_MO/raw/en-1985-2003-IberoamericanMO.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0e378717bacd4b25c580709b69bb0496b804877728493c20d14f13d502c9a09
|
| 3 |
+
size 474633
|
IberoAmerican_MO/segment_script/segment.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from loguru import logger
|
| 6 |
+
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Tuple, List
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
project_root = Path(__file__).parent.parent.parent
|
| 12 |
+
|
| 13 |
+
problem_tag = 'Problem'
|
| 14 |
+
solution_tag = 'Solution'
|
| 15 |
+
year_tag = 'Year'
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def analyze(text: str) -> Tuple[List, int]:
|
| 20 |
+
"""
|
| 21 |
+
Analyze the text and return the tags and problem number.
|
| 22 |
+
Args:
|
| 23 |
+
text (str): The markdown text to analyze.
|
| 24 |
+
Returns:
|
| 25 |
+
Tuple[List, int]: A tuple containing the tags and problem number.
|
| 26 |
+
"""
|
| 27 |
+
problem_pattern = re.compile(r'(?:\n|\n#+\s+)Problem\s*([A-z]\d+)', re.IGNORECASE)
|
| 28 |
+
solution_pattern = re.compile(r'(?:\n|\n#+\s+)(?:Solution|Alternative solution)', re.IGNORECASE)
|
| 29 |
+
year_pattern = re.compile(r'(?:\n|\n\#+)\s*\d+(?:st|nd|rd|th)\s*Iberoamerican\s*(\d+)', re.IGNORECASE)
|
| 30 |
+
|
| 31 |
+
tags = []
|
| 32 |
+
tags.extend([(x, problem_tag) for x in problem_pattern.finditer(text)])
|
| 33 |
+
problem_num = len(tags)
|
| 34 |
+
|
| 35 |
+
tags.extend([(x, solution_tag) for x in solution_pattern.finditer(text)])
|
| 36 |
+
|
| 37 |
+
tags.extend([(x, year_tag) for x in year_pattern.finditer(text)])
|
| 38 |
+
tags.sort(key=lambda x: x[0].start())
|
| 39 |
+
return tags, problem_num
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def segment(text: str, tags):
|
| 43 |
+
starts = []
|
| 44 |
+
ends = []
|
| 45 |
+
|
| 46 |
+
for i in range(len(tags)):
|
| 47 |
+
starts.append(tags[i][0].end())
|
| 48 |
+
if i + 1 < len(tags):
|
| 49 |
+
ends.append(tags[i + 1][0].start())
|
| 50 |
+
else:
|
| 51 |
+
ends.append(len(text))
|
| 52 |
+
|
| 53 |
+
return [text[start:end].strip().strip('#').strip() for start, end in zip(starts, ends)]
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def join(tags, segments):
|
| 57 |
+
year = ''
|
| 58 |
+
problem, solution = '', ''
|
| 59 |
+
problem_label, problem_match, solution_match = '', '', ''
|
| 60 |
+
pairs = []
|
| 61 |
+
|
| 62 |
+
for tag, segment in zip(tags, segments):
|
| 63 |
+
if tag[1] == year_tag:
|
| 64 |
+
year = tag[0].group(1)
|
| 65 |
+
elif tag[1] == problem_tag:
|
| 66 |
+
problem = segment
|
| 67 |
+
problem_match = tag[0].group(0)
|
| 68 |
+
problem_label = tag[0].group(1)
|
| 69 |
+
else:
|
| 70 |
+
solution = segment
|
| 71 |
+
solution_match = tag[0].group(0)
|
| 72 |
+
pairs.append((year, problem, solution, problem_label, problem_match, solution_match))
|
| 73 |
+
|
| 74 |
+
return pairs
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def write_pairs(output_file: Path, pairs):
|
| 78 |
+
output_jsonl_text = ""
|
| 79 |
+
for year, problem, solution, problem_label, problem_match, solution_match in pairs:
|
| 80 |
+
output_jsonl_text += json.dumps(
|
| 81 |
+
{
|
| 82 |
+
'year': year,
|
| 83 |
+
'tier': "T2",
|
| 84 |
+
'problem_label': problem_label,
|
| 85 |
+
'problem_type': None,
|
| 86 |
+
'problem': problem,
|
| 87 |
+
'solution': solution,
|
| 88 |
+
'metadata': {
|
| 89 |
+
'resource_path': output_file.relative_to(project_root).as_posix(),
|
| 90 |
+
'problem_match': problem_match,
|
| 91 |
+
'solution_match': solution_match
|
| 92 |
+
}
|
| 93 |
+
},
|
| 94 |
+
ensure_ascii=False
|
| 95 |
+
) + '\n'
|
| 96 |
+
|
| 97 |
+
output_file.write_text(output_jsonl_text, encoding="utf-8")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def main():
|
| 101 |
+
compet_base_path = Path(__file__).resolve().parent.parent
|
| 102 |
+
compet_md_path = compet_base_path / "md"
|
| 103 |
+
seg_output_path = compet_base_path / "segmented"
|
| 104 |
+
|
| 105 |
+
total_problem_count = 0
|
| 106 |
+
total_solution_count = 0
|
| 107 |
+
|
| 108 |
+
for cmo_md in tqdm(list(compet_md_path.glob('**/*.md')), desc='Segmenting'):
|
| 109 |
+
output_file = seg_output_path / cmo_md.relative_to(compet_md_path).with_suffix('.jsonl')
|
| 110 |
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
| 111 |
+
|
| 112 |
+
text = '\n' + cmo_md.read_text(encoding="utf-8")
|
| 113 |
+
|
| 114 |
+
tags, problem_num = analyze(text)
|
| 115 |
+
|
| 116 |
+
segments = segment(text, tags)
|
| 117 |
+
pairs = join(tags, segments)
|
| 118 |
+
if pairs and problem_num > 0:
|
| 119 |
+
write_pairs(output_file, pairs)
|
| 120 |
+
|
| 121 |
+
total_problem_count += problem_num
|
| 122 |
+
total_solution_count += len(pairs)
|
| 123 |
+
else:
|
| 124 |
+
logger.warning(f"No problem found in {cmo_md}")
|
| 125 |
+
|
| 126 |
+
logger.info(f"Total problem count: {total_problem_count}")
|
| 127 |
+
logger.info(f"Total solution count: {total_solution_count}")
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
if __name__ == '__main__':
|
| 131 |
+
main()
|
IberoAmerican_MO/segmented/en-1985-2003-IberoamericanMO.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|