LxYxvv commited on
Commit
3d4e2ee
·
1 Parent(s): 69bb401

Add dataset loading script

Browse files
Files changed (1) hide show
  1. olympiads-ref.py +103 -0
olympiads-ref.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """AI-MO Olympiad Reference Dataset"""
15
+
16
+
17
+
18
+ import re
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import datasets
23
+ from huggingface_hub import HfApi
24
+
25
+
26
+ # TODO: Add BibTeX citation
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """"""
29
+
30
+ # TODO: Add description of the dataset here
31
+ # You can copy an official description
32
+ _DESCRIPTION = """"""
33
+
34
+ # TODO: Add a link to an official homepage for the dataset here
35
+ _HOMEPAGE = ""
36
+
37
+ # TODO: Add the licence for the dataset here if you can find it
38
+ _LICENSE = ""
39
+
40
+
41
+ class OlympiadReferenceDataset(datasets.GeneratorBasedBuilder):
42
+ VERSION = datasets.Version("0.0.1")
43
+
44
+ def __init__(self, *args, **kwargs):
45
+ super().__init__(*args, **kwargs)
46
+ self._hfapi = HfApi()
47
+ self.pattern = re.compile(r'.*/segmented/[^/]+\.jsonl$')
48
+
49
+ def _info(self):
50
+ features = datasets.Features(
51
+ {
52
+ "problem_type": datasets.Value("string"),
53
+ "problem_label": datasets.Value("string"),
54
+ "problem": datasets.Value("string"),
55
+ "solution": datasets.Value("string"),
56
+ "year": datasets.Value("int32"),
57
+ "tier": datasets.Value("int32")
58
+ }
59
+ )
60
+
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=features,
64
+ homepage=_HOMEPAGE,
65
+ license=_LICENSE,
66
+ citation=_CITATION,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ data_root_path = Path(dl_manager._base_path)
71
+
72
+ repo_files = self._hfapi.list_repo_files(repo_id="AI-MO/olympiads-ref", repo_type="dataset")
73
+ seg_jsonl_files = [s for s in repo_files if self.pattern.match(s)]
74
+
75
+ data_urls = [data_root_path / sjf for sjf in seg_jsonl_files]
76
+ data_files = dl_manager.download_and_extract(data_urls)
77
+
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ gen_kwargs={
82
+ "jsonl_files": data_files,
83
+ "split": "train",
84
+ },
85
+ )
86
+ ]
87
+
88
+ def _generate_examples(self, jsonl_files, split):
89
+ key = 0
90
+
91
+ for file in jsonl_files:
92
+ with open(file, "r", encoding="utf-8") as f:
93
+ for line in f:
94
+ data = json.loads(line)
95
+ yield key, {
96
+ "problem_type": data.get("problem_type"),
97
+ "problem_label": data.get("problem_label") or data.get("label"),
98
+ "problem": data.get("problem"),
99
+ "solution": data.get("solution"),
100
+ "year": data.get("year"),
101
+ "tier": data.get("tier"),
102
+ }
103
+ key += 1