Datasets:

ArXiv:
License:
dfki-nlp commited on
Commit
fc96438
·
1 Parent(s): 89476c2

Delete tacred_dfki.py

Browse files
Files changed (1) hide show
  1. tacred_dfki.py +0 -304
tacred_dfki.py DELETED
@@ -1,304 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """The TACRED Relation Classification dataset in various languages, DFKI format."""
17
- import itertools
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
- _CITATION = """\
24
- @inproceedings{zhang-etal-2017-position,
25
- title = "Position-aware Attention and Supervised Data Improve Slot Filling",
26
- author = "Zhang, Yuhao and
27
- Zhong, Victor and
28
- Chen, Danqi and
29
- Angeli, Gabor and
30
- Manning, Christopher D.",
31
- booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
32
- month = sep,
33
- year = "2017",
34
- address = "Copenhagen, Denmark",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://www.aclweb.org/anthology/D17-1004",
37
- doi = "10.18653/v1/D17-1004",
38
- pages = "35--45",
39
- }
40
-
41
- @inproceedings{alt-etal-2020-tacred,
42
- title = "{TACRED} Revisited: A Thorough Evaluation of the {TACRED} Relation Extraction Task",
43
- author = "Alt, Christoph and
44
- Gabryszak, Aleksandra and
45
- Hennig, Leonhard",
46
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
47
- month = jul,
48
- year = "2020",
49
- address = "Online",
50
- publisher = "Association for Computational Linguistics",
51
- url = "https://www.aclweb.org/anthology/2020.acl-main.142",
52
- doi = "10.18653/v1/2020.acl-main.142",
53
- pages = "1558--1569",
54
- }
55
- """
56
-
57
- _DESCRIPTION = """\
58
- TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
59
- and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges.
60
- Examples in TACRED cover 41 relation types as used in the TAC KBP challenges (e.g., per:schools_attended
61
- and org:members) or are labeled as no_relation if no defined relation is held. These examples are created
62
- by combining available human annotations from the TAC KBP challenges and crowdsourcing.
63
-
64
- Please see our EMNLP paper, or our EMNLP slides for full details.
65
-
66
- Note: There is currently a label-corrected version of the TACRED dataset, which you should consider using instead of
67
- the original version released in 2017. For more details on this new version, see the TACRED Revisited paper
68
- published at ACL 2020.
69
-
70
- NOTE: This Datasetreader does not support the original TACRED JSON format, but instead the DFKI version, with
71
- the following changes:
72
- - Removed fields: stanford_pos, stanford_ner, stanford_head, stanford_deprel, docid
73
-
74
- NOTE 2: This Datasetreader changes the offsets of the following fields, to conform with standard Python usage (see
75
- #_generate_examples()):
76
- - subj_end to subj_end + 1 (make end offset exclusive)
77
- - oj_end to oj_end + 1 (make end offset exclusive)
78
- """
79
-
80
- _HOMEPAGE = "https://nlp.stanford.edu/projects/tacred/"
81
-
82
- _LICENSE = "LDC"
83
-
84
- _URL = "https://catalog.ldc.upenn.edu/LDC2018T24"
85
-
86
- # The HuggingFace dataset library don't host the datasets but only point to the original files
87
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
88
- _PATCH_URLs = {
89
- "dev": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/dev_patch.json",
90
- "test": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/test_patch.json",
91
- }
92
-
93
- _VERSION = datasets.Version("1.0.0")
94
-
95
- _LANGS = [
96
- "ar",
97
- "de",
98
- "en",
99
- "es",
100
- # "eu",
101
- "fi",
102
- "fr",
103
- "hi",
104
- "hu",
105
- "ja",
106
- "pl",
107
- "ru",
108
- "tr",
109
- "zh",
110
- ]
111
-
112
- _CLASS_LABELS = [
113
- "no_relation",
114
- "org:alternate_names",
115
- "org:city_of_headquarters",
116
- "org:country_of_headquarters",
117
- "org:dissolved",
118
- "org:founded",
119
- "org:founded_by",
120
- "org:member_of",
121
- "org:members",
122
- "org:number_of_employees/members",
123
- "org:parents",
124
- "org:political/religious_affiliation",
125
- "org:shareholders",
126
- "org:stateorprovince_of_headquarters",
127
- "org:subsidiaries",
128
- "org:top_members/employees",
129
- "org:website",
130
- "per:age",
131
- "per:alternate_names",
132
- "per:cause_of_death",
133
- "per:charges",
134
- "per:children",
135
- "per:cities_of_residence",
136
- "per:city_of_birth",
137
- "per:city_of_death",
138
- "per:countries_of_residence",
139
- "per:country_of_birth",
140
- "per:country_of_death",
141
- "per:date_of_birth",
142
- "per:date_of_death",
143
- "per:employee_of",
144
- "per:origin",
145
- "per:other_family",
146
- "per:parents",
147
- "per:religion",
148
- "per:schools_attended",
149
- "per:siblings",
150
- "per:spouse",
151
- "per:stateorprovince_of_birth",
152
- "per:stateorprovince_of_death",
153
- "per:stateorprovinces_of_residence",
154
- "per:title",
155
- ]
156
-
157
-
158
- def convert_ptb_token(token: str) -> str:
159
- """Convert PTB tokens to normal tokens"""
160
- return {
161
- "-lrb-": "(",
162
- "-rrb-": ")",
163
- "-lsb-": "[",
164
- "-rsb-": "]",
165
- "-lcb-": "{",
166
- "-rcb-": "}",
167
- }.get(token.lower(), token)
168
-
169
-
170
- class TacredDfkiConfig(datasets.BuilderConfig):
171
- def __init__(self, **kwargs):
172
- super(TacredDfkiConfig, self).__init__(version=_VERSION, **kwargs)
173
-
174
-
175
- class TacredDfki(datasets.GeneratorBasedBuilder):
176
- """TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
177
- and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges."""
178
-
179
- BUILDER_CONFIGS = [
180
- TacredDfkiConfig(
181
- name=f"{variant}-{lang}",
182
- description=f"{'The revised TACRED (corrected labels in dev and test split)' if variant == 'revised' else 'The original TACRED'} examples in language '{lang}'.",
183
- )
184
- for (lang, variant) in itertools.product(_LANGS, ["original", "revised"])
185
- ]
186
-
187
- DEFAULT_CONFIG_NAME = "original-en" # type: ignore
188
-
189
- @property
190
- def manual_download_instructions(self):
191
- return (
192
- "To use TACRED you have to download it manually. "
193
- "It is available via the LDC at https://catalog.ldc.upenn.edu/LDC2018T24"
194
- "Please extract all files in one folder and load the dataset with: "
195
- "`datasets.load_dataset('tacred', data_dir='path/to/folder/folder_name')`."
196
- "TODO: Language-specific versions must be downloaded from URL and extracted to PATH..."
197
- )
198
-
199
- def _info(self):
200
- features = datasets.Features(
201
- {
202
- "id": datasets.Value("string"),
203
- "tokens": datasets.Sequence(datasets.Value("string")),
204
- "subj_start": datasets.Value("int32"),
205
- "subj_end": datasets.Value("int32"),
206
- "subj_type": datasets.Value("string"),
207
- "obj_start": datasets.Value("int32"),
208
- "obj_end": datasets.Value("int32"),
209
- "obj_type": datasets.Value("string"),
210
- "relation": datasets.ClassLabel(names=_CLASS_LABELS),
211
- }
212
- )
213
-
214
- return datasets.DatasetInfo(
215
- # This is the description that will appear on the datasets page.
216
- description=_DESCRIPTION,
217
- # This defines the different columns of the dataset and their types
218
- features=features, # Here we define them above because they are different between the two configurations
219
- # If there's a common (input, target) tuple from the features,
220
- # specify them here. They'll be used if as_supervised=True in
221
- # builder.as_dataset.
222
- supervised_keys=None,
223
- # Homepage of the dataset for documentation
224
- homepage=_HOMEPAGE,
225
- # License for the dataset if available
226
- license=_LICENSE,
227
- # Citation for the dataset
228
- citation=_CITATION,
229
- )
230
-
231
- def _split_generators(self, dl_manager):
232
- """Returns SplitGenerators."""
233
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
234
-
235
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
236
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
237
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
238
- patch_files = {}
239
- if self.config.name.startswith("revised-"):
240
- patch_files = dl_manager.download_and_extract(_PATCH_URLs)
241
-
242
- data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
243
- lang = self.config.name.split("-")[1]
244
-
245
- if not os.path.exists(data_dir):
246
- raise FileNotFoundError(
247
- "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('DFKI-SLT/tacred_dfki', data_dir=...)` that includes the unzipped files from the TACRED_LDC zip. Manual download instructions: {}".format(
248
- data_dir, self.manual_download_instructions
249
- )
250
- )
251
-
252
- return [
253
- datasets.SplitGenerator(
254
- name=datasets.Split.TRAIN,
255
- gen_kwargs={
256
- "filepath": os.path.join(data_dir, lang, "train.json"),
257
- "patch_filepath": None,
258
- },
259
- ),
260
- datasets.SplitGenerator(
261
- name=datasets.Split.TEST,
262
- gen_kwargs={
263
- "filepath": os.path.join(data_dir, lang, "test.json"),
264
- "patch_filepath": patch_files.get("test"),
265
- },
266
- ),
267
- datasets.SplitGenerator(
268
- name=datasets.Split.VALIDATION,
269
- gen_kwargs={
270
- "filepath": os.path.join(data_dir, lang, "dev.json"),
271
- "patch_filepath": patch_files.get("dev"),
272
- },
273
- ),
274
- ]
275
-
276
- def _generate_examples(self, filepath, patch_filepath):
277
- """Yields examples."""
278
- # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
279
- # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
280
- # The key is not important, it's more here for legacy reason (legacy from tfds)
281
- patch_examples = {}
282
- if patch_filepath is not None:
283
- with open(patch_filepath, encoding="utf-8") as f:
284
- patch_examples = {example["id"]: example for example in json.load(f)}
285
-
286
- with open(filepath, encoding="utf-8") as f:
287
- data = json.load(f)
288
- for example in data:
289
- id_ = example["id"]
290
-
291
- if id_ in patch_examples:
292
- example.update(patch_examples[id_])
293
-
294
- yield id_, {
295
- "id": example["id"],
296
- "tokens": [convert_ptb_token(token) for token in example["token"]],
297
- "subj_start": example["subj_start"],
298
- "subj_end": example["subj_end"] + 1, # make end offset exclusive
299
- "subj_type": example["subj_type"],
300
- "obj_start": example["obj_start"],
301
- "obj_end": example["obj_end"] + 1, # make end offset exclusive
302
- "obj_type": example["obj_type"],
303
- "relation": example["relation"],
304
- }