LxYxvv commited on
Commit
3b3a043
·
1 Parent(s): bdd6d25

add USAMO 1972-1995

Browse files
USAMO/md/en-USAMO-1972-2003.md ADDED
The diff for this file is too large to render. See raw diff
 
USAMO/raw/en-USAMO-1972-2003.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0145291a8e2f6f60db9468be10ab2b23a73ce357bf1f994f36c3344867c6c76c
3
+ size 392426
USAMO/segment_script/segment_1972-1995.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -----------------------------------------------------------------------------
2
+ # Author: Jiawei Liu
3
+ # Date: 2025-10-29
4
+ # -----------------------------------------------------------------------------
5
+ import json
6
+ import re
7
+ from pathlib import Path
8
+ from typing import List, Tuple
9
+
10
+
11
+ problem_tag = "Problem"
12
+ solution_tag = "Solution"
13
+ # answer_tag = "Answer"
14
+
15
+
16
+ def clean_text(text: str):
17
+ text = re.sub(r"Problem A1", "Problem 1", text)
18
+ text = re.sub(r"Problem A2", "Problem 2", text)
19
+ text = re.sub(r"Problem A3", "Problem 3", text)
20
+ text = re.sub(r"Problem B1", "Problem 4", text)
21
+ text = re.sub(r"Problem B2", "Problem 5", text)
22
+ text = re.sub(r"Problem B3", "Problem 6", text)
23
+ return text
24
+
25
+
26
+ def segment_exams(text: str):
27
+ matchs = list(
28
+ ## 4th USAMO 1975
29
+ re.finditer(
30
+ r"^#+\s*.+USAMO\s*(\d{4})",
31
+ text,
32
+ flags=re.IGNORECASE | re.MULTILINE,
33
+ )
34
+ )
35
+
36
+ exams = {}
37
+ for i, m in enumerate(matchs):
38
+ if int(m.group(1)) >= 1996:
39
+ continue
40
+
41
+ year = m.group(1)
42
+ exam_text = text[
43
+ m.end() : matchs[i + 1].start() if i + 1 < len(matchs) else len(text)
44
+ ]
45
+
46
+ exams[year] = exam_text.strip()
47
+
48
+ return exams
49
+
50
+
51
+ def analyze(text: str) -> Tuple[List, int]:
52
+ """
53
+ Analyze the text and return the tags and problem number.
54
+
55
+ Args:
56
+ text (str): The markdown text to analyze.
57
+
58
+ Returns:
59
+ Tuple[List, int]: A tuple containing the tags and problem number.
60
+ """
61
+ problem_pattern = re.compile(r"(?:\n|# )Problem\s+(\d+)", re.IGNORECASE)
62
+ solution_pattern = re.compile(r"(?:\n|# )Solution", re.IGNORECASE)
63
+ # answer_pattern = re.compile(r"(?:\n|# )Answer", re.IGNORECASE)
64
+
65
+ tags = []
66
+ tags.extend([(x, problem_tag) for x in problem_pattern.finditer(text)])
67
+ problem_num = len(tags)
68
+
69
+ tags.extend([(x, solution_tag) for x in solution_pattern.finditer(text)])
70
+ # tags.extend([(x, answer_tag) for x in answer_pattern.finditer(text)])
71
+
72
+ tags.sort(key=lambda x: x[0].start())
73
+ return tags, problem_num
74
+
75
+
76
+ def segment(text: str, tags):
77
+ starts = []
78
+ ends = []
79
+
80
+ for i, (m, tag) in enumerate(tags):
81
+ starts.append(tags[i][0].end())
82
+ if i + 1 < len(tags):
83
+ ends.append(tags[i + 1][0].start())
84
+ else:
85
+ ends.append(len(text))
86
+
87
+ return [
88
+ text[start:end].strip().strip("#").strip() for start, end in zip(starts, ends)
89
+ ]
90
+
91
+
92
+ def join(tags, segments):
93
+ problem, solution = "", ""
94
+ problem_label, problem_match, solution_match = "", "", ""
95
+ pairs = []
96
+
97
+ tag_classes = [_[1] for _ in tags]
98
+
99
+ for (m, tag), (i, segment) in zip(tags, enumerate(segments)):
100
+ if tag == problem_tag:
101
+ problem = segment
102
+ problem_match = m.group(0)
103
+ problem_label = m.group(1)
104
+
105
+ # Check if there is no solution following this problem
106
+ next_problem_index = 0
107
+ try:
108
+ if problem_tag in tag_classes[i + 1 :]:
109
+ next_problem_index = tag_classes.index(problem_tag, i + 1)
110
+ else:
111
+ next_problem_index = len(segments)
112
+ except ValueError:
113
+ next_problem_index = len(segments)
114
+
115
+ if tag_classes[i + 1 : next_problem_index].count(solution_tag) == 0:
116
+ solution = ""
117
+ solution_match = ""
118
+ pairs.append(
119
+ (problem, solution, problem_label, problem_match, solution_match)
120
+ )
121
+ else:
122
+ solution = segment
123
+ solution_match = m.group(0)
124
+ pairs.append(
125
+ (problem, solution, problem_label, problem_match, solution_match)
126
+ )
127
+
128
+ return pairs
129
+
130
+
131
+ def write_pairs(project_root: Path, output_file: Path, pairs):
132
+ output_jsonl_text = ""
133
+ for year, problems in pairs:
134
+ for (
135
+ problem,
136
+ solution,
137
+ problem_label,
138
+ problem_match,
139
+ solution_match,
140
+ ) in problems:
141
+ output_jsonl_text += (
142
+ json.dumps(
143
+ {
144
+ "year": year,
145
+ "tier": "T1",
146
+ "problem_label": problem_label,
147
+ "problem_type": None,
148
+ "exam": "USAMO",
149
+ "problem": problem,
150
+ "solution": solution,
151
+ "metadata": {
152
+ "resource_path": output_file.relative_to(
153
+ project_root
154
+ ).as_posix(),
155
+ "problem_match": problem_match,
156
+ "solution_match": solution_match,
157
+ },
158
+ },
159
+ ensure_ascii=False,
160
+ )
161
+ + "\n"
162
+ )
163
+
164
+ output_file.write_text(output_jsonl_text, encoding="utf-8")
165
+
166
+
167
+ if __name__ == "__main__":
168
+ compet_base_path = Path(__file__).resolve().parent.parent
169
+ compet_md_path = compet_base_path / "md"
170
+ seg_output_path = compet_base_path / "segmented"
171
+ project_root = compet_base_path.parent
172
+
173
+ for md_file in list(compet_md_path.glob("**/en-USAMO-1972-2003.md")):
174
+ output_file = seg_output_path / md_file.relative_to(compet_md_path).with_suffix(
175
+ ".jsonl"
176
+ )
177
+ output_file.parent.mkdir(parents=True, exist_ok=True)
178
+
179
+ # Read the markdown file
180
+ markdown_text = md_file.read_text(encoding="utf-8")
181
+ markdown_text = clean_text(markdown_text)
182
+
183
+ # [(year, [(problem, solution, problem_label, problem_match, solution_match), ...]), ...]
184
+ pairs = []
185
+ exams = segment_exams(markdown_text)
186
+ for year, exam_text in exams.items():
187
+ tags, problem_num = analyze(exam_text)
188
+ segments = segment(exam_text, tags)
189
+ inner_pairs = join(tags, segments)
190
+ pairs.append((year, inner_pairs))
191
+
192
+ write_pairs(project_root, output_file, pairs)
USAMO/segmented/en-USAMO-1972-2003.jsonl ADDED
The diff for this file is too large to render. See raw diff