Basirudin commited on
Commit
d7a16d1
·
verified ·
1 Parent(s): 3325cdd

Update aka.py

Browse files
Files changed (1) hide show
  1. aka.py +258 -256
aka.py CHANGED
@@ -1,257 +1,259 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
-
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- logger = datasets.logging.get_logger(__name__)
25
-
26
-
27
- _CITATION = """\
28
- @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
- title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
- author = "Tjong Kim Sang, Erik F. and
31
- De Meulder, Fien",
32
- booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
- year = "2003",
34
- url = "https://www.aclweb.org/anthology/W03-0419",
35
- pages = "142--147",
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
41
- four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
42
- not belong to the previous three groups.
43
- The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
44
- a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
45
- a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
46
- and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
47
- if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
48
- B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
49
- tagging scheme, whereas the original dataset uses IOB1.
50
- For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
51
- """
52
-
53
- _URL = "aka.zip"
54
- _TRAINING_FILE = "train.txt"
55
- _DEV_FILE = "valid.txt"
56
- _TEST_FILE = "test.txt"
57
-
58
-
59
- class Conll2003Config(datasets.BuilderConfig):
60
- """BuilderConfig for Conll2003"""
61
-
62
- def __init__(self, **kwargs):
63
- """BuilderConfig forConll2003.
64
- Args:
65
- **kwargs: keyword arguments forwarded to super.
66
- """
67
- super(Conll2003Config, self).__init__(**kwargs)
68
-
69
-
70
- class Conll2003(datasets.GeneratorBasedBuilder):
71
- """Conll2003 dataset."""
72
-
73
- BUILDER_CONFIGS = [
74
- Conll2003Config(name="Basirudin\aka_generic_ner", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
75
- ]
76
-
77
- def _info(self):
78
- return datasets.DatasetInfo(
79
- description=_DESCRIPTION,
80
- features=datasets.Features(
81
- {
82
- "id": datasets.Value("string"),
83
- "tokens": datasets.Sequence(datasets.Value("string")),
84
- "pos_tags": datasets.Sequence(
85
- datasets.features.ClassLabel(
86
- names=[
87
- '"',
88
- "''",
89
- "#",
90
- "$",
91
- "(",
92
- ")",
93
- ",",
94
- ".",
95
- ":",
96
- "``",
97
- "CC",
98
- "CD",
99
- "DT",
100
- "EX",
101
- "FW",
102
- "IN",
103
- "JJ",
104
- "JJR",
105
- "JJS",
106
- "LS",
107
- "MD",
108
- "NN",
109
- "NNP",
110
- "NNPS",
111
- "NNS",
112
- "NN|SYM",
113
- "PDT",
114
- "POS",
115
- "PRP",
116
- "PRP$",
117
- "RB",
118
- "RBR",
119
- "RBS",
120
- "RP",
121
- "SYM",
122
- "TO",
123
- "UH",
124
- "VB",
125
- "VBD",
126
- "VBG",
127
- "VBN",
128
- "VBP",
129
- "VBZ",
130
- "WDT",
131
- "WP",
132
- "WP$",
133
- "WRB",
134
- ]
135
- )
136
- ),
137
- "chunk_tags": datasets.Sequence(
138
- datasets.features.ClassLabel(
139
- names=[
140
- "O",
141
- "B-ADJP",
142
- "I-ADJP",
143
- "B-ADVP",
144
- "I-ADVP",
145
- "B-CONJP",
146
- "I-CONJP",
147
- "B-INTJ",
148
- "I-INTJ",
149
- "B-LST",
150
- "I-LST",
151
- "B-NP",
152
- "I-NP",
153
- "B-PP",
154
- "I-PP",
155
- "B-PRT",
156
- "I-PRT",
157
- "B-SBAR",
158
- "I-SBAR",
159
- "B-UCP",
160
- "I-UCP",
161
- "B-VP",
162
- "I-VP",
163
- ]
164
- )
165
- ),
166
- "ner_tags": datasets.Sequence(
167
- datasets.features.ClassLabel(
168
- names=[
169
- "O",
170
- # "B-PER",
171
- # "I-PER",
172
- # "B-ORG",
173
- # "I-ORG",
174
- # "B-LOC",
175
- # "I-LOC",
176
- "B-KPI", # KPI
177
- "I-KPI",
178
- "B-DIM", # Dimension
179
- "I-DIM",
180
- "B-RVAL", # Filter: Rig
181
- "I-RVAL",
182
- "B-WVAL", # Filter: Well
183
- "I-WVAL",
184
- "B-FVAL", # Filter: FIeld
185
- "I-FVAL",
186
- "B-SCVAL", # Filter: Rig contractor/Service company
187
- "I-SCVAL",
188
- "B-OPVAL", # Filter: Organization
189
- "I-OPVAL",
190
- "B-HVAL", # Filter: Hole section
191
- "I-HVAL",
192
- "B-MISC",
193
- "I-MISC",
194
- ]
195
- )
196
- ),
197
- }
198
- ),
199
- supervised_keys=None,
200
- homepage="https://www.aclweb.org/anthology/W03-0419/",
201
- citation=_CITATION,
202
- )
203
-
204
- def _split_generators(self, dl_manager):
205
- """Returns SplitGenerators."""
206
- downloaded_file = dl_manager.download_and_extract(_URL)
207
- data_files = {
208
- "train": os.path.join(downloaded_file, _TRAINING_FILE),
209
- "dev": os.path.join(downloaded_file, _DEV_FILE),
210
- "test": os.path.join(downloaded_file, _TEST_FILE),
211
- }
212
-
213
- return [
214
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
215
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
216
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
217
- ]
218
-
219
- def _generate_examples(self, filepath):
220
- logger.info("⏳ Generating examples from = %s", filepath)
221
- with open(filepath, encoding="utf-8") as f:
222
- guid = 0
223
- tokens = []
224
- pos_tags = []
225
- chunk_tags = []
226
- ner_tags = []
227
- for line in f:
228
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
229
- if tokens:
230
- yield guid, {
231
- "id": str(guid),
232
- "tokens": tokens,
233
- "pos_tags": pos_tags,
234
- "chunk_tags": chunk_tags,
235
- "ner_tags": ner_tags,
236
- }
237
- guid += 1
238
- tokens = []
239
- pos_tags = []
240
- chunk_tags = []
241
- ner_tags = []
242
- else:
243
- # conll2003 tokens are space separated
244
- splits = line.split(" ")
245
- tokens.append(splits[0])
246
- pos_tags.append(splits[1])
247
- chunk_tags.append(splits[2])
248
- ner_tags.append(splits[3].rstrip())
249
- # last example
250
- if tokens:
251
- yield guid, {
252
- "id": str(guid),
253
- "tokens": tokens,
254
- "pos_tags": pos_tags,
255
- "chunk_tags": chunk_tags,
256
- "ner_tags": ner_tags,
 
 
257
  }
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
18
+
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
29
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
30
+ author = "Tjong Kim Sang, Erik F. and
31
+ De Meulder, Fien",
32
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
33
+ year = "2003",
34
+ url = "https://www.aclweb.org/anthology/W03-0419",
35
+ pages = "142--147",
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
41
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
42
+ not belong to the previous three groups.
43
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
44
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
45
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
46
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
47
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
48
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
49
+ tagging scheme, whereas the original dataset uses IOB1.
50
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
51
+ """
52
+
53
+ _URL = "aka.zip"
54
+ _TRAINING_FILE = "train.txt"
55
+ _DEV_FILE = "valid.txt"
56
+ _TEST_FILE = "test.txt"
57
+
58
+
59
+ class Conll2003Config(datasets.BuilderConfig):
60
+ """BuilderConfig for Conll2003"""
61
+
62
+ def __init__(self, **kwargs):
63
+ """BuilderConfig forConll2003.
64
+ Args:
65
+ **kwargs: keyword arguments forwarded to super.
66
+ """
67
+ super(Conll2003Config, self).__init__(**kwargs)
68
+
69
+
70
+ class Conll2003(datasets.GeneratorBasedBuilder):
71
+ """Conll2003 dataset."""
72
+
73
+ BUILDER_CONFIGS = [
74
+ Conll2003Config(name="Basirudin\aka_generic_ner", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
75
+ ]
76
+
77
+ def _info(self):
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "tokens": datasets.Sequence(datasets.Value("string")),
84
+ "pos_tags": datasets.Sequence(
85
+ datasets.features.ClassLabel(
86
+ names=[
87
+ '"',
88
+ "''",
89
+ "#",
90
+ "$",
91
+ "(",
92
+ ")",
93
+ ",",
94
+ ".",
95
+ ":",
96
+ "``",
97
+ "CC",
98
+ "CD",
99
+ "DT",
100
+ "EX",
101
+ "FW",
102
+ "IN",
103
+ "JJ",
104
+ "JJR",
105
+ "JJS",
106
+ "LS",
107
+ "MD",
108
+ "NN",
109
+ "NNP",
110
+ "NNPS",
111
+ "NNS",
112
+ "NN|SYM",
113
+ "PDT",
114
+ "POS",
115
+ "PRP",
116
+ "PRP$",
117
+ "RB",
118
+ "RBR",
119
+ "RBS",
120
+ "RP",
121
+ "SYM",
122
+ "TO",
123
+ "UH",
124
+ "VB",
125
+ "VBD",
126
+ "VBG",
127
+ "VBN",
128
+ "VBP",
129
+ "VBZ",
130
+ "WDT",
131
+ "WP",
132
+ "WP$",
133
+ "WRB",
134
+ ]
135
+ )
136
+ ),
137
+ "chunk_tags": datasets.Sequence(
138
+ datasets.features.ClassLabel(
139
+ names=[
140
+ "O",
141
+ "B-ADJP",
142
+ "I-ADJP",
143
+ "B-ADVP",
144
+ "I-ADVP",
145
+ "B-CONJP",
146
+ "I-CONJP",
147
+ "B-INTJ",
148
+ "I-INTJ",
149
+ "B-LST",
150
+ "I-LST",
151
+ "B-NP",
152
+ "I-NP",
153
+ "B-PP",
154
+ "I-PP",
155
+ "B-PRT",
156
+ "I-PRT",
157
+ "B-SBAR",
158
+ "I-SBAR",
159
+ "B-UCP",
160
+ "I-UCP",
161
+ "B-VP",
162
+ "I-VP",
163
+ ]
164
+ )
165
+ ),
166
+ "ner_tags": datasets.Sequence(
167
+ datasets.features.ClassLabel(
168
+ names=[
169
+ "O",
170
+ # "B-PER",
171
+ # "I-PER",
172
+ # "B-ORG",
173
+ # "I-ORG",
174
+ # "B-LOC",
175
+ # "I-LOC",
176
+ "B-KPI", # KPI
177
+ "I-KPI",
178
+ "B-DIM", # Dimension
179
+ "I-DIM",
180
+ "B-RVAL", # Filter: Rig
181
+ "I-RVAL",
182
+ "B-WVAL", # Filter: Well
183
+ "I-WVAL",
184
+ "B-FVAL", # Filter: FIeld
185
+ "I-FVAL",
186
+ "B-SCVAL", # Filter: Rig contractor/Service company
187
+ "I-SCVAL",
188
+ "B-OPVAL", # Filter: Organization
189
+ "I-OPVAL",
190
+ "B-HVAL", # Filter: Hole section
191
+ "I-HVAL",
192
+ "B-MISC",
193
+ "I-MISC",
194
+ ]
195
+ )
196
+ ),
197
+ }
198
+ ),
199
+ supervised_keys=None,
200
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
201
+ citation=_CITATION,
202
+ )
203
+
204
+ def _split_generators(self, dl_manager):
205
+ """Returns SplitGenerators."""
206
+ print("_split_generators", _URL)
207
+ downloaded_file = dl_manager.download_and_extract(_URL)
208
+ data_files = {
209
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
210
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
211
+ "test": os.path.join(downloaded_file, _TEST_FILE),
212
+ }
213
+
214
+ return [
215
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
216
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
217
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
218
+ ]
219
+
220
+ def _generate_examples(self, filepath):
221
+ logger.info("⏳ Generating examples from = %s", filepath)
222
+ print("_generate_examples", filepath)
223
+ with open(filepath, encoding="utf-8") as f:
224
+ guid = 0
225
+ tokens = []
226
+ pos_tags = []
227
+ chunk_tags = []
228
+ ner_tags = []
229
+ for line in f:
230
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
231
+ if tokens:
232
+ yield guid, {
233
+ "id": str(guid),
234
+ "tokens": tokens,
235
+ "pos_tags": pos_tags,
236
+ "chunk_tags": chunk_tags,
237
+ "ner_tags": ner_tags,
238
+ }
239
+ guid += 1
240
+ tokens = []
241
+ pos_tags = []
242
+ chunk_tags = []
243
+ ner_tags = []
244
+ else:
245
+ # conll2003 tokens are space separated
246
+ splits = line.split(" ")
247
+ tokens.append(splits[0])
248
+ pos_tags.append(splits[1])
249
+ chunk_tags.append(splits[2])
250
+ ner_tags.append(splits[3].rstrip())
251
+ # last example
252
+ if tokens:
253
+ yield guid, {
254
+ "id": str(guid),
255
+ "tokens": tokens,
256
+ "pos_tags": pos_tags,
257
+ "chunk_tags": chunk_tags,
258
+ "ner_tags": ner_tags,
259
  }