Datasets:

ArXiv:
License:
dfki-nlp commited on
Commit
7dff620
·
1 Parent(s): 108e493

Upload tacred_dfki.py

Browse files
Files changed (1) hide show
  1. tacred_dfki.py +302 -0
tacred_dfki.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The TACRED Relation Classification dataset in various languages, DFKI format."""
17
+ import itertools
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @inproceedings{zhang-etal-2017-position,
25
+ title = "Position-aware Attention and Supervised Data Improve Slot Filling",
26
+ author = "Zhang, Yuhao and
27
+ Zhong, Victor and
28
+ Chen, Danqi and
29
+ Angeli, Gabor and
30
+ Manning, Christopher D.",
31
+ booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
32
+ month = sep,
33
+ year = "2017",
34
+ address = "Copenhagen, Denmark",
35
+ publisher = "Association for Computational Linguistics",
36
+ url = "https://www.aclweb.org/anthology/D17-1004",
37
+ doi = "10.18653/v1/D17-1004",
38
+ pages = "35--45",
39
+ }
40
+
41
+ @inproceedings{alt-etal-2020-tacred,
42
+ title = "{TACRED} Revisited: A Thorough Evaluation of the {TACRED} Relation Extraction Task",
43
+ author = "Alt, Christoph and
44
+ Gabryszak, Aleksandra and
45
+ Hennig, Leonhard",
46
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
47
+ month = jul,
48
+ year = "2020",
49
+ address = "Online",
50
+ publisher = "Association for Computational Linguistics",
51
+ url = "https://www.aclweb.org/anthology/2020.acl-main.142",
52
+ doi = "10.18653/v1/2020.acl-main.142",
53
+ pages = "1558--1569",
54
+ }
55
+ """
56
+
57
+ _DESCRIPTION = """\
58
+ TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
59
+ and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges.
60
+ Examples in TACRED cover 41 relation types as used in the TAC KBP challenges (e.g., per:schools_attended
61
+ and org:members) or are labeled as no_relation if no defined relation is held. These examples are created
62
+ by combining available human annotations from the TAC KBP challenges and crowdsourcing.
63
+
64
+ Please see our EMNLP paper, or our EMNLP slides for full details.
65
+
66
+ Note: There is currently a label-corrected version of the TACRED dataset, which you should consider using instead of
67
+ the original version released in 2017. For more details on this new version, see the TACRED Revisited paper
68
+ published at ACL 2020.
69
+
70
+ NOTE: This Datasetreader does not support the original TACRED JSON format, but instead the DFKI version, with
71
+ the following changes:
72
+ - Removed fields: stanford_pos, stanford_ner, stanford_head, stanford_deprel, docid
73
+
74
+ NOTE 2: This Datasetreader changes the offsets of the following fields, to conform with standard Python usage (see
75
+ #_generate_examples()):
76
+ - subj_end to subj_end + 1 (make end offset exclusive)
77
+ - oj_end to oj_end + 1 (make end offset exclusive)
78
+ """
79
+
80
+ _HOMEPAGE = "https://nlp.stanford.edu/projects/tacred/"
81
+
82
+ _LICENSE = "LDC"
83
+
84
+ _URL = "https://catalog.ldc.upenn.edu/LDC2018T24"
85
+
86
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
87
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
88
+ _PATCH_URLs = {
89
+ "dev": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/dev_patch.json",
90
+ "test": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/test_patch.json",
91
+ }
92
+
93
+ _VERSION = datasets.Version("1.0.0")
94
+
95
+ _LANGS = [
96
+ "ar",
97
+ "de",
98
+ "en",
99
+ "es",
100
+ #"eu",
101
+ "fi",
102
+ "fr",
103
+ "hi",
104
+ "hu",
105
+ "ja",
106
+ "pl",
107
+ "ru",
108
+ "tr",
109
+ "zh",
110
+ ]
111
+
112
+ _CLASS_LABELS = [
113
+ "no_relation",
114
+ "org:alternate_names",
115
+ "org:city_of_headquarters",
116
+ "org:country_of_headquarters",
117
+ "org:dissolved",
118
+ "org:founded",
119
+ "org:founded_by",
120
+ "org:member_of",
121
+ "org:members",
122
+ "org:number_of_employees/members",
123
+ "org:parents",
124
+ "org:political/religious_affiliation",
125
+ "org:shareholders",
126
+ "org:stateorprovince_of_headquarters",
127
+ "org:subsidiaries",
128
+ "org:top_members/employees",
129
+ "org:website",
130
+ "per:age",
131
+ "per:alternate_names",
132
+ "per:cause_of_death",
133
+ "per:charges",
134
+ "per:children",
135
+ "per:cities_of_residence",
136
+ "per:city_of_birth",
137
+ "per:city_of_death",
138
+ "per:countries_of_residence",
139
+ "per:country_of_birth",
140
+ "per:country_of_death",
141
+ "per:date_of_birth",
142
+ "per:date_of_death",
143
+ "per:employee_of",
144
+ "per:origin",
145
+ "per:other_family",
146
+ "per:parents",
147
+ "per:religion",
148
+ "per:schools_attended",
149
+ "per:siblings",
150
+ "per:spouse",
151
+ "per:stateorprovince_of_birth",
152
+ "per:stateorprovince_of_death",
153
+ "per:stateorprovinces_of_residence",
154
+ "per:title",
155
+ ]
156
+
157
+
158
+ def convert_ptb_token(token: str) -> str:
159
+ """Convert PTB tokens to normal tokens"""
160
+ return {
161
+ "-lrb-": "(",
162
+ "-rrb-": ")",
163
+ "-lsb-": "[",
164
+ "-rsb-": "]",
165
+ "-lcb-": "{",
166
+ "-rcb-": "}",
167
+ }.get(token.lower(), token)
168
+
169
+
170
+ class TacredDfkiConfig(datasets.BuilderConfig):
171
+ def __init__(self, **kwargs):
172
+ super(TacredDfkiConfig, self).__init__(version=_VERSION, **kwargs)
173
+
174
+
175
+ class TacredDfki(datasets.GeneratorBasedBuilder):
176
+ """TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
177
+ and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges."""
178
+
179
+ BUILDER_CONFIGS = [
180
+ TacredDfkiConfig(name=f"{variant}-{lang}",
181
+ description=f"{'The revised TACRED (corrected labels in dev and test split)' if variant == 'revised' else 'The original TACRED'} examples in language '{lang}'."
182
+ ) for (lang,variant) in itertools.product(_LANGS, ["original", "revised"])
183
+ ]
184
+
185
+ DEFAULT_CONFIG_NAME = "original-en" # type: ignore
186
+
187
+ @property
188
+ def manual_download_instructions(self):
189
+ return (
190
+ "To use TACRED you have to download it manually. "
191
+ "It is available via the LDC at https://catalog.ldc.upenn.edu/LDC2018T24"
192
+ "Please extract all files in one folder and load the dataset with: "
193
+ "`datasets.load_dataset('tacred', data_dir='path/to/folder/folder_name')`."
194
+ "TODO: Language-specific versions must be downloaded from URL and extracted to PATH..."
195
+ )
196
+
197
+ def _info(self):
198
+ features = datasets.Features(
199
+ {
200
+ "id": datasets.Value("string"),
201
+ "tokens": datasets.Sequence(datasets.Value("string")),
202
+ "subj_start": datasets.Value("int32"),
203
+ "subj_end": datasets.Value("int32"),
204
+ "subj_type": datasets.Value("string"),
205
+ "obj_start": datasets.Value("int32"),
206
+ "obj_end": datasets.Value("int32"),
207
+ "obj_type": datasets.Value("string"),
208
+ "relation": datasets.ClassLabel(names=_CLASS_LABELS),
209
+ }
210
+ )
211
+
212
+ return datasets.DatasetInfo(
213
+ # This is the description that will appear on the datasets page.
214
+ description=_DESCRIPTION,
215
+ # This defines the different columns of the dataset and their types
216
+ features=features, # Here we define them above because they are different between the two configurations
217
+ # If there's a common (input, target) tuple from the features,
218
+ # specify them here. They'll be used if as_supervised=True in
219
+ # builder.as_dataset.
220
+ supervised_keys=None,
221
+ # Homepage of the dataset for documentation
222
+ homepage=_HOMEPAGE,
223
+ # License for the dataset if available
224
+ license=_LICENSE,
225
+ # Citation for the dataset
226
+ citation=_CITATION,
227
+ )
228
+
229
+ def _split_generators(self, dl_manager):
230
+ """Returns SplitGenerators."""
231
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
232
+
233
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
234
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
235
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
236
+ patch_files = {}
237
+ if self.config.name.startswith("revised-"):
238
+ patch_files = dl_manager.download_and_extract(_PATCH_URLs)
239
+
240
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
241
+ lang = self.config.name.split('-')[1]
242
+
243
+ if not os.path.exists(data_dir):
244
+ raise FileNotFoundError(
245
+ "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('DFKI-SLT/tacred_dfki', data_dir=...)` that includes the unzipped files from the TACRED_LDC zip. Manual download instructions: {}".format(
246
+ data_dir, self.manual_download_instructions
247
+ )
248
+ )
249
+
250
+ return [
251
+ datasets.SplitGenerator(
252
+ name=datasets.Split.TRAIN,
253
+ gen_kwargs={
254
+ "filepath": os.path.join(data_dir, lang, "train.json"),
255
+ "patch_filepath": None,
256
+ },
257
+ ),
258
+ datasets.SplitGenerator(
259
+ name=datasets.Split.TEST,
260
+ gen_kwargs={
261
+ "filepath": os.path.join(data_dir, lang, "test.json"),
262
+ "patch_filepath": patch_files.get("test"),
263
+ },
264
+ ),
265
+ datasets.SplitGenerator(
266
+ name=datasets.Split.VALIDATION,
267
+ gen_kwargs={
268
+ "filepath": os.path.join(data_dir, lang, "dev.json"),
269
+ "patch_filepath": patch_files.get("dev"),
270
+ },
271
+ ),
272
+ ]
273
+
274
+ def _generate_examples(self, filepath, patch_filepath):
275
+ """Yields examples."""
276
+ # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
277
+ # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
278
+ # The key is not important, it's more here for legacy reason (legacy from tfds)
279
+ patch_examples = {}
280
+ if patch_filepath is not None:
281
+ with open(patch_filepath, encoding="utf-8") as f:
282
+ patch_examples = {example["id"]: example for example in json.load(f)}
283
+
284
+ with open(filepath, encoding="utf-8") as f:
285
+ data = json.load(f)
286
+ for example in data:
287
+ id_ = example["id"]
288
+
289
+ if id_ in patch_examples:
290
+ example.update(patch_examples[id_])
291
+
292
+ yield id_, {
293
+ "id": example["id"],
294
+ "tokens": [convert_ptb_token(token) for token in example["token"]],
295
+ "subj_start": example["subj_start"],
296
+ "subj_end": example["subj_end"] + 1, # make end offset exclusive
297
+ "subj_type": example["subj_type"],
298
+ "obj_start": example["obj_start"],
299
+ "obj_end": example["obj_end"] + 1, # make end offset exclusive
300
+ "obj_type": example["obj_type"],
301
+ "relation": example["relation"],
302
+ }