pandoradox commited on
Commit
ae4ef9e
·
verified ·
1 Parent(s): 4c84952

Create conll2003.py

Browse files
Files changed (1) hide show
  1. conll2003.py +224 -0
conll2003.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
2
+
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+
11
+ _CITATION = """\
12
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
13
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
14
+ author = "Tjong Kim Sang, Erik F. and
15
+ De Meulder, Fien",
16
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
17
+ year = "2003",
18
+ url = "https://www.aclweb.org/anthology/W03-0419",
19
+ pages = "142--147",
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
25
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
26
+ not belong to the previous three groups.
27
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
28
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
29
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
30
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
31
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
32
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
33
+ tagging scheme, whereas the original dataset uses IOB1.
34
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
35
+ """
36
+
37
+ _URL = "https://sanchit-404.github.io/edited_dataset.txt"
38
+ _TRAINING_FILE = "train.txt"
39
+ _DEV_FILE = "valid.txt"
40
+ _TEST_FILE = "test.txt"
41
+
42
+
43
+ class Conll2003Config(datasets.BuilderConfig):
44
+ """BuilderConfig for Conll2003"""
45
+
46
+ def __init__(self, **kwargs):
47
+ """BuilderConfig forConll2003.
48
+ Args:
49
+ **kwargs: keyword arguments forwarded to super.
50
+ """
51
+ super(Conll2003Config, self).__init__(**kwargs)
52
+
53
+
54
+ class Conll2003(datasets.GeneratorBasedBuilder):
55
+ """Conll2003 dataset."""
56
+
57
+ BUILDER_CONFIGS = [
58
+ Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "id": datasets.Value("string"),
67
+ "tokens": datasets.Sequence(datasets.Value("string")),
68
+ "pos_tags": datasets.Sequence(
69
+ datasets.features.ClassLabel(
70
+ names=[
71
+ '"',
72
+ "''",
73
+ "#",
74
+ "$",
75
+ "(",
76
+ ")",
77
+ ",",
78
+ ".",
79
+ ":",
80
+ "``",
81
+ "CC",
82
+ "CD",
83
+ "DT",
84
+ "EX",
85
+ "FW",
86
+ "IN",
87
+ "JJ",
88
+ "JJR",
89
+ "JJS",
90
+ "LS",
91
+ "MD",
92
+ "NN",
93
+ "NNP",
94
+ "NNPS",
95
+ "NNS",
96
+ "NN|SYM",
97
+ "PDT",
98
+ "POS",
99
+ "PRP",
100
+ "PRP$",
101
+ "RB",
102
+ "RBR",
103
+ "RBS",
104
+ "RP",
105
+ "SYM",
106
+ "TO",
107
+ "UH",
108
+ "VB",
109
+ "VBD",
110
+ "VBG",
111
+ "VBN",
112
+ "VBP",
113
+ "VBZ",
114
+ "WDT",
115
+ "WP",
116
+ "WP$",
117
+ "WRB",
118
+ ]
119
+ )
120
+ ),
121
+ "chunk_tags": datasets.Sequence(
122
+ datasets.features.ClassLabel(
123
+ names=[
124
+ "O",
125
+ "B-ADJP",
126
+ "I-ADJP",
127
+ "B-ADVP",
128
+ "I-ADVP",
129
+ "B-CONJP",
130
+ "I-CONJP",
131
+ "B-INTJ",
132
+ "I-INTJ",
133
+ "B-LST",
134
+ "I-LST",
135
+ "B-NP",
136
+ "I-NP",
137
+ "B-PP",
138
+ "I-PP",
139
+ "B-PRT",
140
+ "I-PRT",
141
+ "B-SBAR",
142
+ "I-SBAR",
143
+ "B-UCP",
144
+ "I-UCP",
145
+ "B-VP",
146
+ "I-VP",
147
+ ]
148
+ )
149
+ ),
150
+ "ner_tags": datasets.Sequence(
151
+ datasets.features.ClassLabel(
152
+ names=[
153
+ "O",
154
+ "B-PER",
155
+ "I-PER",
156
+ "B-ORG",
157
+ "I-ORG",
158
+ "B-LOC",
159
+ "I-LOC",
160
+ "B-MISC",
161
+ "I-MISC",
162
+ ]
163
+ )
164
+ ),
165
+ }
166
+ ),
167
+ supervised_keys=None,
168
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
169
+ citation=_CITATION,
170
+ )
171
+
172
+ def _split_generators(self, dl_manager):
173
+ """Returns SplitGenerators for a single file hosted online."""
174
+
175
+ # Download the file from the provided URL (_URL)
176
+ downloaded_file = dl_manager.download(_URL)
177
+
178
+ # Return the train split with the downloaded file path
179
+ return [
180
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
181
+ ]
182
+
183
+
184
+
185
+ def _generate_examples(self, filepath):
186
+ logger.info("⏳ Generating examples from = %s", filepath)
187
+ with open(filepath, encoding="utf-8") as f:
188
+ guid = 0
189
+ tokens = []
190
+ pos_tags = []
191
+ chunk_tags = []
192
+ ner_tags = []
193
+ for line in f:
194
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
195
+ if tokens:
196
+ yield guid, {
197
+ "id": str(guid),
198
+ "tokens": tokens,
199
+ "pos_tags": pos_tags,
200
+ "chunk_tags": chunk_tags,
201
+ "ner_tags": ner_tags,
202
+ }
203
+ guid += 1
204
+ tokens = []
205
+ pos_tags = []
206
+ chunk_tags = []
207
+ ner_tags = []
208
+ else:
209
+ # conll2003 tokens are space separated
210
+ splits = line.split(" ")
211
+ tokens.append(splits[0])
212
+ pos_tags.append(splits[1])
213
+ chunk_tags.append(splits[2])
214
+ ner_tags.append(splits[3].rstrip())
215
+ # last example
216
+ if tokens:
217
+ yield guid, {
218
+ "id": str(guid),
219
+ "tokens": tokens,
220
+ "pos_tags": pos_tags,
221
+ "chunk_tags": chunk_tags,
222
+ "ner_tags": ner_tags,
223
+ }
224
+