Datasets:

ArXiv:
License:
dfki-nlp commited on
Commit
5e55699
·
1 Parent(s): 82b44d1

Delete multilingual_tacred.py

Browse files
Files changed (1) hide show
  1. multilingual_tacred.py +0 -334
multilingual_tacred.py DELETED
@@ -1,334 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """The TACRED Relation Classification dataset in various languages, DFKI format."""
17
- import itertools
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
- _CITATION = """\
24
- @inproceedings{zhang-etal-2017-position,
25
- title = "Position-aware Attention and Supervised Data Improve Slot Filling",
26
- author = "Zhang, Yuhao and
27
- Zhong, Victor and
28
- Chen, Danqi and
29
- Angeli, Gabor and
30
- Manning, Christopher D.",
31
- booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
32
- month = sep,
33
- year = "2017",
34
- address = "Copenhagen, Denmark",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://www.aclweb.org/anthology/D17-1004",
37
- doi = "10.18653/v1/D17-1004",
38
- pages = "35--45",
39
- }
40
-
41
- @inproceedings{alt-etal-2020-tacred,
42
- title = "{TACRED} Revisited: A Thorough Evaluation of the {TACRED} Relation Extraction Task",
43
- author = "Alt, Christoph and
44
- Gabryszak, Aleksandra and
45
- Hennig, Leonhard",
46
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
47
- month = jul,
48
- year = "2020",
49
- address = "Online",
50
- publisher = "Association for Computational Linguistics",
51
- url = "https://www.aclweb.org/anthology/2020.acl-main.142",
52
- doi = "10.18653/v1/2020.acl-main.142",
53
- pages = "1558--1569",
54
- }
55
- """
56
-
57
- _DESCRIPTION = """\
58
- TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
59
- and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges.
60
- Examples in TACRED cover 41 relation types as used in the TAC KBP challenges (e.g., per:schools_attended
61
- and org:members) or are labeled as no_relation if no defined relation is held. These examples are created
62
- by combining available human annotations from the TAC KBP challenges and crowdsourcing.
63
-
64
- Please see our EMNLP paper, or our EMNLP slides for full details.
65
-
66
- Note: There is currently a label-corrected version of the TACRED dataset, which you should consider using instead of
67
- the original version released in 2017. For more details on this new version, see the TACRED Revisited paper
68
- published at ACL 2020.
69
-
70
- NOTE: This Datasetreader supports a reduced version of the original TACRED JSON format with the following changes:
71
- - Removed fields: stanford_pos, stanford_ner, stanford_head, stanford_deprel, docid
72
- The motivation for this is that we want to support additional languages, for which these fields were not required
73
- or available. The reader expects the specification of a language-specific configuration specifying the variant
74
- (original or revised) and the language (as a two-letter iso code). The default config is 'original-en'.
75
-
76
- The Datasetreader changes the offsets of the following fields, to conform with standard Python usage (see
77
- #_generate_examples()):
78
- - subj_end to subj_end + 1 (make end offset exclusive)
79
- - obj_end to obj_end + 1 (make end offset exclusive)
80
- """
81
-
82
- _HOMEPAGE = "https://nlp.stanford.edu/projects/tacred/"
83
-
84
- _LICENSE = "LDC"
85
-
86
- _URL = "https://catalog.ldc.upenn.edu/LDC2018T24"
87
-
88
- # The HuggingFace dataset library don't host the datasets but only point to the original files
89
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
90
- _PATCH_URLs = {
91
- "dev": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/dev_patch.json",
92
- "test": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/test_patch.json",
93
- }
94
-
95
- _VERSION = datasets.Version("1.0.0")
96
-
97
- _LANGS = [
98
- "ar",
99
- "de",
100
- "en",
101
- "es",
102
- # "eu",
103
- "fi",
104
- "fr",
105
- "hi",
106
- "hu",
107
- "ja",
108
- "pl",
109
- "ru",
110
- "tr",
111
- "zh",
112
- ]
113
-
114
- _CLASS_LABELS = [
115
- "no_relation",
116
- "org:alternate_names",
117
- "org:city_of_headquarters",
118
- "org:country_of_headquarters",
119
- "org:dissolved",
120
- "org:founded",
121
- "org:founded_by",
122
- "org:member_of",
123
- "org:members",
124
- "org:number_of_employees/members",
125
- "org:parents",
126
- "org:political/religious_affiliation",
127
- "org:shareholders",
128
- "org:stateorprovince_of_headquarters",
129
- "org:subsidiaries",
130
- "org:top_members/employees",
131
- "org:website",
132
- "per:age",
133
- "per:alternate_names",
134
- "per:cause_of_death",
135
- "per:charges",
136
- "per:children",
137
- "per:cities_of_residence",
138
- "per:city_of_birth",
139
- "per:city_of_death",
140
- "per:countries_of_residence",
141
- "per:country_of_birth",
142
- "per:country_of_death",
143
- "per:date_of_birth",
144
- "per:date_of_death",
145
- "per:employee_of",
146
- "per:origin",
147
- "per:other_family",
148
- "per:parents",
149
- "per:religion",
150
- "per:schools_attended",
151
- "per:siblings",
152
- "per:spouse",
153
- "per:stateorprovince_of_birth",
154
- "per:stateorprovince_of_death",
155
- "per:stateorprovinces_of_residence",
156
- "per:title",
157
- ]
158
-
159
-
160
- _NER_CLASS_LABELS = [
161
- "LOCATION",
162
- "ORGANIZATION",
163
- "PERSON",
164
- "DATE",
165
- "MONEY",
166
- "PERCENT",
167
- "TIME",
168
- "CAUSE_OF_DEATH",
169
- "CITY",
170
- "COUNTRY",
171
- "CRIMINAL_CHARGE",
172
- "EMAIL",
173
- "HANDLE",
174
- "IDEOLOGY",
175
- "NATIONALITY",
176
- "RELIGION",
177
- "STATE_OR_PROVINCE",
178
- "TITLE",
179
- "URL",
180
- "NUMBER",
181
- "ORDINAL",
182
- "MISC",
183
- "DURATION",
184
- "O",
185
- ]
186
-
187
-
188
- def convert_ptb_token(token: str) -> str:
189
- """Convert PTB tokens to normal tokens"""
190
- return {
191
- "-lrb-": "(",
192
- "-rrb-": ")",
193
- "-lsb-": "[",
194
- "-rsb-": "]",
195
- "-lcb-": "{",
196
- "-rcb-": "}",
197
- }.get(token.lower(), token)
198
-
199
-
200
- class TacredDfkiConfig(datasets.BuilderConfig):
201
- def __init__(self, **kwargs):
202
- super(TacredDfkiConfig, self).__init__(version=_VERSION, **kwargs)
203
-
204
-
205
- class TacredDfki(datasets.GeneratorBasedBuilder):
206
- """TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
207
- and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges."""
208
-
209
- BUILDER_CONFIGS = [
210
- TacredDfkiConfig(
211
- name=f"{variant}-{lang}",
212
- description=f"{'The revised TACRED (corrected labels in dev and test split)' if variant == 'revised' else 'The original TACRED'} examples in language '{lang}'.",
213
- )
214
- for (lang, variant) in itertools.product(_LANGS, ["original", "revised"])
215
- ]
216
-
217
- DEFAULT_CONFIG_NAME = "original-en" # type: ignore
218
-
219
- @property
220
- def manual_download_instructions(self):
221
- return (
222
- "To use TACRED you have to download it manually. "
223
- "It is available via the LDC at https://catalog.ldc.upenn.edu/LDC2018T24"
224
- "Please extract all files in one folder and load the dataset with: "
225
- "`datasets.load_dataset('tacred', data_dir='path/to/folder/folder_name')`."
226
- "Language-specific versions must be requested from git.nlp@dfki.de."
227
- )
228
-
229
- def _info(self):
230
- features = datasets.Features(
231
- {
232
- "id": datasets.Value("string"),
233
- "token": datasets.Sequence(datasets.Value("string")),
234
- "subj_start": datasets.Value("int32"),
235
- "subj_end": datasets.Value("int32"),
236
- "subj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
237
- "obj_start": datasets.Value("int32"),
238
- "obj_end": datasets.Value("int32"),
239
- "obj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS),
240
- "relation": datasets.ClassLabel(names=_CLASS_LABELS),
241
- }
242
- )
243
-
244
- return datasets.DatasetInfo(
245
- # This is the description that will appear on the datasets page.
246
- description=_DESCRIPTION,
247
- # This defines the different columns of the dataset and their types
248
- features=features, # Here we define them above because they are different between the two configurations
249
- # If there's a common (input, target) tuple from the features,
250
- # specify them here. They'll be used if as_supervised=True in
251
- # builder.as_dataset.
252
- supervised_keys=None,
253
- # Homepage of the dataset for documentation
254
- homepage=_HOMEPAGE,
255
- # License for the dataset if available
256
- license=_LICENSE,
257
- # Citation for the dataset
258
- citation=_CITATION,
259
- )
260
-
261
- def _split_generators(self, dl_manager):
262
- """Returns SplitGenerators."""
263
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
264
-
265
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
266
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
267
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
268
- patch_files = {}
269
- variant, lang = self.config.name.split("-")
270
- if variant == "revised":
271
- patch_files = dl_manager.download_and_extract(_PATCH_URLs)
272
-
273
- data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
274
-
275
- if not os.path.exists(data_dir):
276
- raise FileNotFoundError(
277
- "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('DFKI-SLT/tacred_dfki', data_dir=...)` that includes the unzipped files from the TACRED_LDC zip. Manual download instructions: {}".format(
278
- data_dir, self.manual_download_instructions
279
- )
280
- )
281
-
282
- return [
283
- datasets.SplitGenerator(
284
- name=datasets.Split.TRAIN,
285
- gen_kwargs={
286
- "filepath": os.path.join(data_dir, lang, "train.json"),
287
- "patch_filepath": None,
288
- },
289
- ),
290
- datasets.SplitGenerator(
291
- name=datasets.Split.TEST,
292
- gen_kwargs={
293
- "filepath": os.path.join(data_dir, lang, "test.json"),
294
- "patch_filepath": patch_files.get("test"),
295
- },
296
- ),
297
- datasets.SplitGenerator(
298
- name=datasets.Split.VALIDATION,
299
- gen_kwargs={
300
- "filepath": os.path.join(data_dir, lang, "dev.json"),
301
- "patch_filepath": patch_files.get("dev"),
302
- },
303
- ),
304
- ]
305
-
306
- def _generate_examples(self, filepath, patch_filepath):
307
- """Yields examples."""
308
- # This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
309
- # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
310
- # The key is not important, it's more here for legacy reason (legacy from tfds)
311
- patch_examples = {}
312
- if patch_filepath is not None:
313
- with open(patch_filepath, encoding="utf-8") as f:
314
- patch_examples = {example["id"]: example for example in json.load(f)}
315
-
316
- with open(filepath, encoding="utf-8") as f:
317
- data = json.load(f)
318
- for example in data:
319
- id_ = example["id"]
320
-
321
- if id_ in patch_examples:
322
- example.update(patch_examples[id_])
323
-
324
- yield id_, {
325
- "id": example["id"],
326
- "token": [convert_ptb_token(token) for token in example["token"]],
327
- "subj_start": example["subj_start"],
328
- "subj_end": example["subj_end"] + 1, # make end offset exclusive
329
- "subj_type": example["subj_type"],
330
- "obj_start": example["obj_start"],
331
- "obj_end": example["obj_end"] + 1, # make end offset exclusive
332
- "obj_type": example["obj_type"],
333
- "relation": example["relation"],
334
- }