Alejandro Escontrela commited on
Commit
8420f91
·
1 Parent(s): 7591c2e

Create mdm_data.py

Browse files
Files changed (1) hide show
  1. mdm_data.py +185 -0
mdm_data.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(wikitext): Add a description here."""
2
+
3
+
4
+ import os
5
+
6
+ import datasets
7
+
8
+
9
+ _CITATION = """\
10
+ @misc{
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
16
+ Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
17
+ License.
18
+ """
19
+ _HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
20
+ _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
21
+ _DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
22
+
23
+
24
+ class MDMDataConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for GLUE."""
26
+
27
+ def __init__(self, data_url, **kwargs):
28
+ """BuilderConfig for Wikitext
29
+ Args:
30
+ data_url: `string`, url to the dataset (word or raw level)
31
+ **kwargs: keyword arguments forwarded to super.
32
+ """
33
+ super(WikitextConfig, self).__init__(
34
+ version=datasets.Version(
35
+ "1.0.0",
36
+ ),
37
+ **kwargs,
38
+ )
39
+ self.data_url = data_url
40
+
41
+
42
+ class MDMData(datasets.GeneratorBasedBuilder):
43
+ """TODO(wikitext_103): Short description of my dataset."""
44
+
45
+ # TODO(wikitext_103): Set up version.
46
+ VERSION = datasets.Version("0.1.0")
47
+ BUILDER_CONFIGS = [
48
+ WikitextConfig(
49
+ name="wikitext-103-v1",
50
+ data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
51
+ description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
52
+ ),
53
+ WikitextConfig(
54
+ name="wikitext-2-v1",
55
+ data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
56
+ description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
57
+ ),
58
+ WikitextConfig(
59
+ name="wikitext-103-raw-v1",
60
+ data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
61
+ description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
62
+ "They should only be used for character level work or for creating newly derived datasets.",
63
+ ),
64
+ WikitextConfig(
65
+ name="wikitext-2-raw-v1",
66
+ data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
67
+ description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
68
+ "They should only be used for character level work or for creating newly derived datasets.",
69
+ ),
70
+ ]
71
+
72
+ def _info(self):
73
+ # TODO(wikitext): Specifies the datasets.DatasetInfo object
74
+ return datasets.DatasetInfo(
75
+ # This is the description that will appear on the datasets page.
76
+ description=_DESCRIPTION,
77
+ # datasets.features.FeatureConnectors
78
+ features=datasets.Features(
79
+ {
80
+ "text": datasets.Value("string")
81
+ # These are the features of your dataset like images, labels ...
82
+ }
83
+ ),
84
+ # If there's a common (input, target) tuple from the features,
85
+ # specify them here. They'll be used if as_supervised=True in
86
+ # builder.as_dataset.
87
+ supervised_keys=None,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ """Returns SplitGenerators."""
95
+ # TODO(wikitext): Downloads the data and defines the splits
96
+ # dl_manager is a datasets.download.DownloadManager that can be used to
97
+ # download and extract URLs
98
+ if self.config.name == "wikitext-103-v1":
99
+ data_file = dl_manager.download_and_extract(self.config.data_url)
100
+ data_dir = os.path.join(data_file, "wikitext-103")
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
113
+ ),
114
+ ]
115
+ else:
116
+ if self.config.name == "wikitext-103-raw-v1":
117
+ data_file = dl_manager.download_and_extract(self.config.data_url)
118
+ data_dir = os.path.join(data_file, "wikitext-103-raw")
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
131
+ ),
132
+ ]
133
+ else:
134
+ if self.config.name == "wikitext-2-raw-v1":
135
+ data_file = dl_manager.download_and_extract(self.config.data_url)
136
+ data_dir = os.path.join(data_file, "wikitext-2-raw")
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
140
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TRAIN,
144
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.VALIDATION,
148
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
149
+ ),
150
+ ]
151
+ else:
152
+ if self.config.name == "wikitext-2-v1":
153
+ data_file = dl_manager.download_and_extract(self.config.data_url)
154
+ data_dir = os.path.join(data_file, "wikitext-2")
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TEST,
158
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "data_file": os.path.join(data_dir, "wiki.train.tokens"),
164
+ "split": "train",
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ gen_kwargs={
170
+ "data_file": os.path.join(data_dir, "wiki.valid.tokens"),
171
+ "split": "valid",
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, data_file, split):
177
+
178
+ """Yields examples."""
179
+ # TODO(wikitext): Yields (key, example) tuples from the dataset
180
+ with open(data_file, encoding="utf-8") as f:
181
+ for idx, row in enumerate(f):
182
+ if row.strip():
183
+ yield idx, {"text": row}
184
+ else:
185
+ yield idx, {"text": ""}