bandoos commited on
Commit
f756014
·
1 Parent(s): 29fff35
Files changed (2) hide show
  1. README.md +1 -3
  2. conll2003_mini.py +265 -0
README.md CHANGED
@@ -1,3 +1 @@
1
- ---
2
- license: other
3
- ---
 
1
+ # Mini conll dataset see https://huggingface.co/datasets/conll2003 for the original
 
 
conll2003_mini.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
+ author = "Tjong Kim Sang, Erik F. and
31
+ De Meulder, Fien",
32
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
+ year = "2003",
34
+ url = "https://www.aclweb.org/anthology/W03-0419",
35
+ pages = "142--147",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+
41
+ !! forked version producing at most 10 items per split !!
42
+
43
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
44
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
45
+ not belong to the previous three groups.
46
+
47
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
48
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
49
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
50
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
51
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
52
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
53
+ tagging scheme, whereas the original dataset uses IOB1.
54
+
55
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
56
+ """
57
+
58
+ _URL = "https://data.deepai.org/conll2003.zip"
59
+ _TRAINING_FILE = "train.txt"
60
+ _DEV_FILE = "valid.txt"
61
+ _TEST_FILE = "test.txt"
62
+
63
+ _MAX_ITEMS = 10
64
+
65
+
66
+ class Conll2003Config(datasets.BuilderConfig):
67
+ """BuilderConfig for Conll2003"""
68
+
69
+ def __init__(self, **kwargs):
70
+ """BuilderConfig forConll2003.
71
+
72
+ Args:
73
+ **kwargs: keyword arguments forwarded to super.
74
+ """
75
+ super(Conll2003Config, self).__init__(**kwargs)
76
+
77
+
78
+ class Conll2003(datasets.GeneratorBasedBuilder):
79
+ """Conll2003 dataset."""
80
+
81
+ BUILDER_CONFIGS = [
82
+ Conll2003Config(
83
+ name="conll2003",
84
+ version=datasets.Version("1.0.0"),
85
+ description="Conll2003 dataset",
86
+ ),
87
+ ]
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features(
93
+ {
94
+ "id": datasets.Value("string"),
95
+ "tokens": datasets.Sequence(datasets.Value("string")),
96
+ "pos_tags": datasets.Sequence(
97
+ datasets.features.ClassLabel(
98
+ names=[
99
+ '"',
100
+ "''",
101
+ "#",
102
+ "$",
103
+ "(",
104
+ ")",
105
+ ",",
106
+ ".",
107
+ ":",
108
+ "``",
109
+ "CC",
110
+ "CD",
111
+ "DT",
112
+ "EX",
113
+ "FW",
114
+ "IN",
115
+ "JJ",
116
+ "JJR",
117
+ "JJS",
118
+ "LS",
119
+ "MD",
120
+ "NN",
121
+ "NNP",
122
+ "NNPS",
123
+ "NNS",
124
+ "NN|SYM",
125
+ "PDT",
126
+ "POS",
127
+ "PRP",
128
+ "PRP$",
129
+ "RB",
130
+ "RBR",
131
+ "RBS",
132
+ "RP",
133
+ "SYM",
134
+ "TO",
135
+ "UH",
136
+ "VB",
137
+ "VBD",
138
+ "VBG",
139
+ "VBN",
140
+ "VBP",
141
+ "VBZ",
142
+ "WDT",
143
+ "WP",
144
+ "WP$",
145
+ "WRB",
146
+ ]
147
+ )
148
+ ),
149
+ "chunk_tags": datasets.Sequence(
150
+ datasets.features.ClassLabel(
151
+ names=[
152
+ "O",
153
+ "B-ADJP",
154
+ "I-ADJP",
155
+ "B-ADVP",
156
+ "I-ADVP",
157
+ "B-CONJP",
158
+ "I-CONJP",
159
+ "B-INTJ",
160
+ "I-INTJ",
161
+ "B-LST",
162
+ "I-LST",
163
+ "B-NP",
164
+ "I-NP",
165
+ "B-PP",
166
+ "I-PP",
167
+ "B-PRT",
168
+ "I-PRT",
169
+ "B-SBAR",
170
+ "I-SBAR",
171
+ "B-UCP",
172
+ "I-UCP",
173
+ "B-VP",
174
+ "I-VP",
175
+ ]
176
+ )
177
+ ),
178
+ "ner_tags": datasets.Sequence(
179
+ datasets.features.ClassLabel(
180
+ names=[
181
+ "O",
182
+ "B-PER",
183
+ "I-PER",
184
+ "B-ORG",
185
+ "I-ORG",
186
+ "B-LOC",
187
+ "I-LOC",
188
+ "B-MISC",
189
+ "I-MISC",
190
+ ]
191
+ )
192
+ ),
193
+ }
194
+ ),
195
+ supervised_keys=None,
196
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
197
+ citation=_CITATION,
198
+ )
199
+
200
+ def _split_generators(self, dl_manager):
201
+ """Returns SplitGenerators."""
202
+ downloaded_file = dl_manager.download_and_extract(_URL)
203
+ data_files = {
204
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
205
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
206
+ "test": os.path.join(downloaded_file, _TEST_FILE),
207
+ }
208
+
209
+ return [
210
+ datasets.SplitGenerator(
211
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}
212
+ ),
213
+ datasets.SplitGenerator(
214
+ name=datasets.Split.VALIDATION,
215
+ gen_kwargs={"filepath": data_files["dev"]},
216
+ ),
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}
219
+ ),
220
+ ]
221
+
222
+ def _generate_examples(self, filepath):
223
+ logger.info("⏳ Generating examples from = %s", filepath)
224
+ with open(filepath, encoding="utf-8") as f:
225
+ guid = 0
226
+ tokens = []
227
+ pos_tags = []
228
+ chunk_tags = []
229
+ ner_tags = []
230
+ for line in f:
231
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
232
+
233
+ # produce at most _MAX_ITEMS
234
+ if guid >= _MAX_ITEMS:
235
+ return
236
+
237
+ if tokens:
238
+ yield guid, {
239
+ "id": str(guid),
240
+ "tokens": tokens,
241
+ "pos_tags": pos_tags,
242
+ "chunk_tags": chunk_tags,
243
+ "ner_tags": ner_tags,
244
+ }
245
+ guid += 1
246
+ tokens = []
247
+ pos_tags = []
248
+ chunk_tags = []
249
+ ner_tags = []
250
+ else:
251
+ # conll2003 tokens are space separated
252
+ splits = line.split(" ")
253
+ tokens.append(splits[0])
254
+ pos_tags.append(splits[1])
255
+ chunk_tags.append(splits[2])
256
+ ner_tags.append(splits[3].rstrip())
257
+ # last example
258
+ if tokens:
259
+ yield guid, {
260
+ "id": str(guid),
261
+ "tokens": tokens,
262
+ "pos_tags": pos_tags,
263
+ "chunk_tags": chunk_tags,
264
+ "ner_tags": ner_tags,
265
+ }