holylovenia commited on
Commit
d7b4539
·
verified ·
1 Parent(s): 7cebb0c

Upload bm_pt3.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bm_pt3.py +227 -0
bm_pt3.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This test is for 15 years old Malaysia student, it is about reading comprehension and general knowledge for malay language.
18
+ """
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+ import pandas as pd
22
+ import re
23
+
24
+ import datasets
25
+
26
+ from seacrowd.utils import schemas
27
+ from seacrowd.utils.configs import SEACrowdConfig
28
+ from seacrowd.utils.constants import Tasks, Licenses, TASK_TO_SCHEMA
29
+
30
+ _CITATION = None
31
+
32
+ _DATASETNAME = "bm_pt3"
33
+
34
+ _DESCRIPTION = """\
35
+ This test is for 15 years old Malaysia student, it is about reading comprehension and general knowledge for malay language.
36
+ """
37
+
38
+ _HOMEPAGE = "https://github.com/mesolitica/malaysian-dataset/tree/master/llm-benchmark/BM-pt3"
39
+
40
+ _LANGUAGES = ["zlm"]
41
+
42
+ _LICENSE = Licenses.UNLICENSE.value
43
+
44
+ _LOCAL = False
45
+
46
+ _URLS = {
47
+ "A": "https://raw.githubusercontent.com/mesolitica/malaysian-dataset/master/llm-benchmark/BM-pt3/BM-A-pt3",
48
+ "B": "https://raw.githubusercontent.com/mesolitica/malaysian-dataset/master/llm-benchmark/BM-pt3/BM-B-pt3"
49
+ }
50
+
51
+ _SUPPORTED_TASKS = [Tasks.COMMONSENSE_REASONING]
52
+
53
+ _SOURCE_VERSION = "1.0.0"
54
+
55
+ _SEACROWD_VERSION = "2024.06.20"
56
+
57
+
58
+ class BMPT3Dataset(datasets.GeneratorBasedBuilder):
59
+ """This test is for 15 years old Malaysia student, it is about reading comprehension and general knowledge for malay language."""
60
+
61
+
62
+ SUBSETS = ["A", "B"]
63
+ SEACROWD_SCHEMA = TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()
64
+
65
+ BUILDER_CONFIGS = [
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_{subset}_source",
68
+ version=datasets.Version(_SOURCE_VERSION),
69
+ description=f"{_DATASETNAME} source schema for {subset} subset",
70
+ schema="source",
71
+ subset_id=f"{_DATASETNAME}_{subset}",
72
+ )
73
+ for subset in SUBSETS
74
+ ] + [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_{subset}_seacrowd_qa",
77
+ version=datasets.Version(_SEACROWD_VERSION),
78
+ description=f"{_DATASETNAME} SEACrowd schema for {subset} subset",
79
+ schema=f"seacrowd_qa",
80
+ subset_id=f"{_DATASETNAME}_{subset}",
81
+ )
82
+ for subset in SUBSETS
83
+ ]
84
+
85
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
86
+
87
+ def _info(self) -> datasets.DatasetInfo:
88
+
89
+ if self.config.schema == "source":
90
+ features = datasets.Features(
91
+ {
92
+ "num": datasets.Value("string"),
93
+ "objective": datasets.Value("string"),
94
+ "question": datasets.Value("string"),
95
+ "choices": datasets.Sequence(datasets.Value("string")),
96
+ "answer": datasets.Value("string"),
97
+ "source": {
98
+ "title": datasets.Value("string"),
99
+ "num": datasets.Value("string"),
100
+ "url": datasets.Value("string"),
101
+ }
102
+ }
103
+ )
104
+
105
+ elif self.config.schema == "seacrowd_qa":
106
+ features = schemas.qa_features
107
+ features["meta"] = {
108
+ "source": {
109
+ "title": datasets.Value("string"),
110
+ "num": datasets.Value("string"),
111
+ "url": datasets.Value("string"),
112
+ }
113
+ }
114
+
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
124
+
125
+ if "A" in self.config.subset_id:
126
+ subset_type = "A"
127
+ data_dir = dl_manager.download_and_extract(_URLS["A"])
128
+ elif "B" in self.config.subset_id:
129
+ subset_type = "B"
130
+ data_dir = dl_manager.download_and_extract(_URLS["B"])
131
+
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ gen_kwargs={
136
+ "filepath": data_dir,
137
+ "subset_type": subset_type
138
+ },
139
+ ),
140
+ ]
141
+
142
+
143
+ def _generate_examples(self, filepath: Path, subset_type: str) -> Tuple[int, Dict]:
144
+ """Yields examples as (key, example) tuples."""
145
+ with open(filepath, "r", encoding="utf-8") as f:
146
+ data = self._extract_data(f.read(), subset_type)
147
+
148
+ if self.config.schema == "source":
149
+ for i, entry in enumerate(data):
150
+ yield i, entry
151
+
152
+ elif self.config.schema == "seacrowd_qa":
153
+ for i, entry in enumerate(data):
154
+ yield i, {
155
+ "id": str(i),
156
+ "question_id": entry["num"],
157
+ "document_id": None,
158
+ "question": entry["question"],
159
+ "type": "multiple_choice" if entry["choices"] else "open_ended",
160
+ "choices": entry["choices"],
161
+ "context": entry["objective"],
162
+ "answer": [entry["answer"]] if entry["answer"] else [],
163
+ "meta": {
164
+ "source": entry["source"]
165
+ }
166
+ }
167
+
168
+ def _extract_data(self, doc: str, subset_type: str) -> List[Dict]:
169
+ """Extracts data from the source schema"""
170
+
171
+ # RegEx pattern
172
+ pattern_num = re.compile(r"(no:\s*\d+)")
173
+ pattern_objective = re.compile(r"objektif:\s*(.*)")
174
+ pattern_question = re.compile(r'soalan:\s*(.*?)(?=\njawapan:|asal soalan:)', re.DOTALL)
175
+ pattern_choices = re.compile(r'([A-D]\.\s+.+?)(?=\n[A-D]\.|\Z)', re.DOTALL)
176
+ if subset_type == "A":
177
+ pattern_answer = re.compile(r'jawapan:\s*([A-D])[,\s]', re.DOTALL)
178
+ elif subset_type == "B":
179
+ pattern_answer = re.compile(r'jawapan:\s*(.*?)\s*asal soalan:', re.DOTALL)
180
+ pattern_asal_soalan = re.compile(r'asal soalan:\s*(.*?),\s*no\s*(\d+),\s*(.*?)\n', re.DOTALL)
181
+
182
+ res = []
183
+ doc_split = re.sub(pattern_num, "<NUMBER>", doc).split("<NUMBER>")[1:]
184
+
185
+ for i, entry in enumerate(doc_split):
186
+ # Objektif
187
+ objective = re.findall(pattern_objective, entry)
188
+ objective = objective[0] if objective else None
189
+
190
+ # Soalan
191
+ _question = re.findall(pattern_question, entry)
192
+ question = re.sub(pattern_choices, '', _question[0]).strip("\n") if _question else None
193
+
194
+ # Choices Soalan
195
+ choices = {}
196
+ if _question and subset_type == "A":
197
+ _choices = re.findall(pattern_choices, _question[0])
198
+ for _c in _choices:
199
+ alpha, txt = _c.split(". ")[0], ' '.join(_c.split(". ")[1:])
200
+ choices[alpha] = txt
201
+
202
+ # Answer
203
+ if subset_type == "A":
204
+ _answer = re.findall(pattern_answer, entry)
205
+ answer = choices[_answer[0]] if (_answer and choices) else None
206
+ elif subset_type == "B":
207
+ answer = re.findall(pattern_answer, entry)
208
+ answer = answer[0] if answer else None
209
+
210
+ # Asal soalan
211
+ source = re.findall(pattern_asal_soalan, entry)
212
+ source = source[0] if source else [None,None,None]
213
+
214
+ res.append({
215
+ "num": str(i+1),
216
+ "objective": objective,
217
+ "question": question,
218
+ "choices": list(choices.values()) if choices else [],
219
+ "answer": answer,
220
+ "source": {
221
+ "title": source[0],
222
+ "num": source[1],
223
+ "url": source[2]
224
+ }
225
+ })
226
+
227
+ return res