Francesco De Toni commited on
Commit
abdcfc0
·
1 Parent(s): 3307e00

Upload HIPE2020_sent-split.py

Browse files
Files changed (1) hide show
  1. HIPE2020_sent-split.py +459 -0
HIPE2020_sent-split.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """TODO"""
18
+
19
+ from datetime import datetime
20
+ from typing import Optional
21
+ import datasets
22
+ import re
23
+
24
+
25
+ _CITATION = """\
26
+ TODO
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ TODO
31
+ """
32
+
33
+ _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/v1.4/"
34
+
35
+
36
+ _URLs = {
37
+ "EN": {
38
+ "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-dev-en.tsv?raw=true"
39
+ }, # English only has dev
40
+ "DE": {
41
+ "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-dev-de.tsv?raw=true",
42
+ "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-train-de.tsv?raw=true",
43
+ },
44
+ "FR": {
45
+ "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-dev-fr.tsv?raw=true",
46
+ "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-train-fr.tsv?raw=true",
47
+ },
48
+ }
49
+
50
+
51
+ class HIPE2020Config(datasets.BuilderConfig):
52
+ """BuilderConfig for HIPE2020"""
53
+
54
+ def __init__(self, data_urls,**kwargs):
55
+ """BuilderConfig for HIPE2020.
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(HIPE2020Config, self).__init__(**kwargs)
60
+ self.data_urls = data_urls
61
+
62
+
63
+ class HIPE2020(datasets.GeneratorBasedBuilder):
64
+ """HIPE2020 dataset."""
65
+
66
+ BUILDER_CONFIGS = [
67
+ HIPE2020Config(
68
+ name="en",
69
+ data_urls=_URLs["EN"],
70
+ version=datasets.Version("1.0.0"),
71
+ description="HIPE dataset covering English",
72
+ ),
73
+ HIPE2020Config(
74
+ name="de",
75
+ data_urls=_URLs["DE"],
76
+ version=datasets.Version("1.0.0"),
77
+ description="HIPE dataset covering German",
78
+ ),
79
+ HIPE2020Config(
80
+ name="fr",
81
+ data_urls=_URLs["FR"],
82
+ version=datasets.Version("1.0.0"),
83
+ description="HIPE dataset covering French",
84
+ ),
85
+ ]
86
+
87
+ def _info(self):
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=datasets.Features(
91
+ {
92
+ "id": datasets.Value("string"),
93
+ "tokens": datasets.Sequence(datasets.Value("string")),
94
+ "NE_COARSE_LIT": datasets.Sequence(
95
+ datasets.features.ClassLabel(
96
+ names=[
97
+ "O",
98
+ "B-comp",
99
+ "B-loc",
100
+ "B-org",
101
+ "B-pers",
102
+ "B-prod",
103
+ "B-time",
104
+ "I-loc",
105
+ "I-org",
106
+ "I-pers",
107
+ "I-prod",
108
+ "I-time",
109
+ "_",
110
+ ]
111
+ )
112
+ ),
113
+ "NE_COARSE_METO_tags": datasets.Sequence(
114
+ datasets.features.ClassLabel(
115
+ names=[
116
+ "O",
117
+ "B-loc",
118
+ "B-org",
119
+ "B-pers",
120
+ "B-prod",
121
+ "I-loc",
122
+ "I-org",
123
+ "I-pers",
124
+ "_",
125
+ ]
126
+ )
127
+ ),
128
+ "NE_FINE_LIT_tags": datasets.Sequence(
129
+ datasets.features.ClassLabel(
130
+ names=[
131
+ "O",
132
+ "B-comp.name",
133
+ "B-loc",
134
+ "B-loc.add.elec",
135
+ "B-loc.add.phys",
136
+ "B-loc.adm.nat",
137
+ "B-loc.adm.reg",
138
+ "B-loc.adm.sup",
139
+ "B-loc.adm.town",
140
+ "B-loc.fac",
141
+ "B-loc.oro",
142
+ "B-loc.phys.astro",
143
+ "B-loc.phys.geo",
144
+ "B-loc.phys.hydro",
145
+ "B-loc.unk",
146
+ "B-org",
147
+ "B-org.adm",
148
+ "B-org.ent",
149
+ "B-org.ent.pressagency",
150
+ "B-pers",
151
+ "B-pers.coll",
152
+ "B-pers.ind",
153
+ "B-pers.ind.articleauthor",
154
+ "B-prod",
155
+ "B-prod.doctr",
156
+ "B-prod.media",
157
+ "B-time",
158
+ "B-time.date.abs",
159
+ "I-loc",
160
+ "I-loc.add.elec",
161
+ "I-loc.add.phys",
162
+ "I-loc.adm.nat",
163
+ "I-loc.adm.reg",
164
+ "I-loc.adm.sup",
165
+ "I-loc.adm.town",
166
+ "I-loc.fac",
167
+ "I-loc.oro",
168
+ "I-loc.phys.geo",
169
+ "I-loc.phys.hydro",
170
+ "I-loc.unk",
171
+ "I-org",
172
+ "I-org.adm",
173
+ "I-org.ent",
174
+ "I-org.ent.pressagency",
175
+ "I-pers",
176
+ "I-pers.coll",
177
+ "I-pers.ind",
178
+ "I-pers.ind.articleauthor",
179
+ "I-prod",
180
+ "I-prod.doctr",
181
+ "I-prod.media",
182
+ "I-time",
183
+ "I-time.date.abs",
184
+ "_",
185
+ ]
186
+ )
187
+ ),
188
+ "NE_FINE_METO_tags": datasets.Sequence(
189
+ datasets.features.ClassLabel(
190
+ names=[
191
+ "O",
192
+ "B-loc",
193
+ "B-loc.adm.reg",
194
+ "B-loc.adm.town",
195
+ "B-loc.fac",
196
+ "B-loc.oro",
197
+ "B-org",
198
+ "B-org.adm",
199
+ "B-org.ent",
200
+ "B-pers.coll",
201
+ "B-pers.ind",
202
+ "B-prod.media",
203
+ "I-loc",
204
+ "I-loc.adm.reg",
205
+ "I-loc.fac",
206
+ "I-loc.oro",
207
+ "I-org",
208
+ "I-org.adm",
209
+ "I-org.ent",
210
+ "I-pers",
211
+ "I-pers.ind",
212
+ "_",
213
+ ]
214
+ )
215
+ ),
216
+ "NE_FINE_COMP_tags": datasets.Sequence(
217
+ datasets.features.ClassLabel(
218
+ names=[
219
+ "O",
220
+ "B-comp.demonym",
221
+ "B-comp.function",
222
+ "B-comp.name",
223
+ "B-comp.qualifier",
224
+ "B-comp.title",
225
+ "I-comp.demonym",
226
+ "I-comp.function",
227
+ "I-comp.name",
228
+ "I-comp.qualifier",
229
+ "I-comp.title",
230
+ "_",
231
+ ]
232
+ )
233
+ ),
234
+ "NE_NESTED_tags": datasets.Sequence(
235
+ datasets.features.ClassLabel(
236
+ names=[
237
+ "O",
238
+ "B-loc",
239
+ "B-loc.adm.nat",
240
+ "B-loc.adm.reg",
241
+ "B-loc.adm.sup",
242
+ "B-loc.adm.town",
243
+ "B-loc.fac",
244
+ "B-loc.oro",
245
+ "B-loc.phys.geo",
246
+ "B-loc.phys.hydro",
247
+ "B-org",
248
+ "B-org.adm",
249
+ "B-org.ent",
250
+ "B-pers.coll",
251
+ "B-pers.ind",
252
+ "B-prod.media",
253
+ "B-time.date.abs",
254
+ "I-loc",
255
+ "I-loc.adm.nat",
256
+ "I-loc.adm.reg",
257
+ "I-loc.adm.town",
258
+ "I-loc.fac",
259
+ "I-loc.oro",
260
+ "I-loc.phys.geo",
261
+ "I-loc.phys.hydro",
262
+ "I-org",
263
+ "I-org.adm",
264
+ "I-org.ent",
265
+ "I-pers.ind",
266
+ "_",
267
+ ]
268
+ )
269
+ ),
270
+ "NEL_LIT_ID": datasets.Sequence(datasets.Value("string")),
271
+ "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
272
+ "no_space_after": datasets.Sequence(datasets.Value("bool")),
273
+ "end_of_line": datasets.Sequence(datasets.Value("bool")),
274
+ "PySBDSegment":datasets.Sequence(datasets.Value("bool")),
275
+ "date": datasets.Value("timestamp[s]"),
276
+ "title": datasets.Value("string"),
277
+ "document_id": datasets.Value("string"),
278
+ }
279
+ ),
280
+ supervised_keys=None,
281
+ homepage="TODO",
282
+ citation=_CITATION,
283
+ )
284
+
285
+ def _split_generators(self, dl_manager):
286
+ """Returns SplitGenerators."""
287
+ downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
288
+ if self.config.name != "en":
289
+ data_files = {
290
+ "train": downloaded_files["train"],
291
+ "dev": downloaded_files["dev"],
292
+ }
293
+ else:
294
+ data_files = {"dev": downloaded_files["dev"]}
295
+ if self.config.name == "en":
296
+ return [
297
+ datasets.SplitGenerator(
298
+ name=datasets.Split.VALIDATION,
299
+ gen_kwargs={"filepath": data_files["dev"]},
300
+ ),
301
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}), # TODO add test splits
302
+ ]
303
+
304
+ else:
305
+ return [
306
+ datasets.SplitGenerator(
307
+ name=datasets.Split.TRAIN,
308
+ gen_kwargs={"filepath": data_files["train"]},
309
+ ),
310
+ datasets.SplitGenerator(
311
+ name=datasets.Split.VALIDATION,
312
+ gen_kwargs={"filepath": data_files["dev"]},
313
+ ),
314
+ ]
315
+
316
+ def _generate_examples(self, filepath):
317
+ date_re = re.compile(r"# date = (\d{4}-\d{2}-\d{02})")
318
+ title_re = re.compile(r"newspaper = (\w{3})")
319
+ document_id_re = re.compile(r"document_id = (.*)")
320
+ with open(filepath, encoding="utf-8") as f:
321
+ guid = 0
322
+ tokens = []
323
+ NE_COARSE_LIT_tags = []
324
+ NE_COARSE_METO_tags = []
325
+ NE_FINE_LIT_tags = []
326
+ NE_FINE_METO_tags = []
327
+ NE_FINE_COMP_tags = []
328
+ NE_NESTED_tags = []
329
+ NEL_LIT_ID = []
330
+ NEL_METO_ID = []
331
+ no_space_after = []
332
+ end_of_line = []
333
+ end_of_sentence = []
334
+
335
+ new_sentence = False
336
+
337
+ for line in f:
338
+ if line.startswith(
339
+ "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
340
+ ):
341
+ continue
342
+ if line.startswith("#") or line == "\n":
343
+ date_match = re.search(date_re, line)
344
+ if date_match:
345
+ date = date_match.group(1)
346
+ date = datetime.strptime(date, "%Y-%m-%d")
347
+ title_match = re.search(title_re, line)
348
+ if title_match:
349
+ title = title_match.group(1)
350
+ document_id_match = re.search(document_id_re, line)
351
+ if document_id_match:
352
+ document_id = document_id_match.group(1)
353
+ if tokens:
354
+ yield guid, {
355
+ "id": str(guid),
356
+ "tokens": tokens,
357
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
358
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
359
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
360
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
361
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
362
+ "NE_NESTED_tags": NE_NESTED_tags,
363
+ "NEL_LIT_ID": NEL_LIT_ID,
364
+ "NEL_METO_ID": NEL_METO_ID,
365
+ "no_space_after": no_space_after,
366
+ "end_of_line": end_of_line,
367
+ "end_of_sentence": end_of_sentence,
368
+ "date": date,
369
+ "title": title,
370
+ "document_id": document_id,
371
+ }
372
+ guid += 1
373
+ tokens = []
374
+ NE_COARSE_LIT_tags = []
375
+ NE_COARSE_METO_tags = []
376
+ NE_FINE_LIT_tags = []
377
+ NE_FINE_METO_tags = []
378
+ NE_FINE_COMP_tags = []
379
+ NE_NESTED_tags = []
380
+ NEL_LIT_ID = []
381
+ NEL_METO_ID = []
382
+ no_space_after = []
383
+ end_of_line = []
384
+ end_of_sentence = []
385
+ else:
386
+ # New row if there is a new sentence
387
+ if new_sentence == True:
388
+ yield guid, {
389
+ "id": str(guid),
390
+ "tokens": tokens,
391
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
392
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
393
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
394
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
395
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
396
+ "NE_NESTED_tags": NE_NESTED_tags,
397
+ "NEL_LIT_ID": NEL_LIT_ID,
398
+ "NEL_METO_ID": NEL_METO_ID,
399
+ "no_space_after": no_space_after,
400
+ "end_of_line": end_of_line,
401
+ "end_of_sentence": end_of_sentence,
402
+ }
403
+ guid += 1
404
+ tokens = []
405
+ NE_COARSE_LIT_tags = []
406
+ NE_COARSE_METO_tags = []
407
+ NE_FINE_LIT_tags = []
408
+ NE_FINE_METO_tags = []
409
+ NE_FINE_COMP_tags = []
410
+ NE_NESTED_tags = []
411
+ NEL_LIT_ID = []
412
+ NEL_METO_ID = []
413
+ no_space_after = []
414
+ end_of_line = []
415
+ end_of_sentence = []
416
+
417
+ # HIPE 2020 tokens are tab separated
418
+ splits = line.split(
419
+ "\t"
420
+ ) # TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC
421
+ tokens.append(splits[0])
422
+ NE_COARSE_LIT_tags.append(splits[1])
423
+ NE_COARSE_METO_tags.append(splits[2])
424
+ NE_FINE_LIT_tags.append(splits[3])
425
+ NE_FINE_METO_tags.append(splits[4])
426
+ NE_FINE_COMP_tags.append(splits[5])
427
+ NE_NESTED_tags.append(splits[6])
428
+ NEL_LIT_ID.append(splits[7])
429
+ NEL_METO_ID.append(splits[8])
430
+ misc = splits[-1]
431
+ is_space = "NoSpaceAfter" in misc
432
+ is_end_of_line = "EndOfLine" in misc
433
+ is_end_of_sentence = "PySBDSegment" in misc
434
+ no_space_after.append(is_space)
435
+ end_of_line.append(is_end_of_line)
436
+ end_of_sentence.append(is_end_of_sentence)
437
+
438
+ new_sentence = is_end_of_sentence
439
+
440
+ # last example
441
+ yield guid, {
442
+ "id": str(guid),
443
+ "tokens": tokens,
444
+ "NE_COARSE_LIT": NE_COARSE_LIT_tags,
445
+ "NE_COARSE_METO_tags": NE_COARSE_METO_tags,
446
+ "NE_FINE_LIT_tags": NE_FINE_LIT_tags,
447
+ "NE_FINE_METO_tags": NE_FINE_METO_tags,
448
+ "NE_FINE_COMP_tags": NE_FINE_COMP_tags,
449
+ "NE_NESTED_tags": NE_NESTED_tags,
450
+ "NEL_LIT_ID": NEL_LIT_ID,
451
+ "NEL_METO_ID": NEL_METO_ID,
452
+ "no_space_after": no_space_after,
453
+ "end_of_line": end_of_line,
454
+ "end_of_sentence": end_of_sentence,
455
+ "date": date,
456
+ "title": title,
457
+ "document_id": document_id,
458
+ }
459
+