Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
jilee commited on
Commit
42f48a1
·
1 Parent(s): 7f29376

Delete texprax_dataset.py

Browse files
Files changed (1) hide show
  1. texprax_dataset.py +0 -260
texprax_dataset.py DELETED
@@ -1,260 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """TexPrax: Data collected during the project https://texprax.de/ """
15
-
16
-
17
- import csv
18
- import json
19
- import os
20
- import ast
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @article{stangier2022texprax,
27
- title={TexPrax: A Messaging Application for Ethical, Real-time Data Collection and Annotation},
28
- author={Stangier, Lorenz and Lee, Ji-Ung and Wang, Yuxi and M{\"u}ller, Marvin and Frick, Nicholas and Metternich, Joachim and Gurevych, Iryna},
29
- journal={arXiv preprint arXiv:2208.07846},
30
- year={2022}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- This dataset was collected in the [TexPrax](https://texprax.de/) project and contains named entities annotated by three researchers as well as annotated sentences (problem/P, cause/C, solution/S, and other/O).
36
-
37
- """
38
-
39
- _HOMEPAGE = "https://texprax.de/"
40
-
41
- _LICENSE = "Creative Commons Attribution-NonCommercial 4.0"
42
-
43
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
44
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
45
-
46
- _SENTENCE_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-sentences.zip?sequence=8&isAllowed=y"
47
-
48
- _ENTITY_URL = "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/3534/texprax-ner.zip?sequence=9&isAllowed=y"
49
-
50
- class TexPraxConfig(datasets.BuilderConfig):
51
- """BuilderConfig for SuperGLUE."""
52
-
53
- def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
54
- """BuilderConfig for TexPrax.
55
-
56
- Args:
57
- features: *list[string]*, list of the features that will appear in the
58
- feature dict. Should not include "label".
59
- data_url: *string*, url to download the zip file from.
60
- citation: *string*, citation for the data set.
61
- url: *string*, url for information about the data set.
62
- label_classes: *list[string]*, the list of classes for the label if the
63
- label is present as a string. Non-string labels will be cast to either
64
- 'False' or 'True'.
65
- **kwargs: keyword arguments forwarded to super.
66
- """
67
- # Version history:
68
- # 1.0.2: Fixed non-nondeterminism in ReCoRD.
69
- # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
70
- # the full release (v2.0).
71
- # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
72
- # 0.0.2: Initial version.
73
- super(TexPraxConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
74
- self.features = features
75
- self.label_classes = label_classes
76
- self.data_url = data_url
77
- self.citation = _CITATION
78
- self.url = url
79
-
80
- class TexPraxDataset(datasets.GeneratorBasedBuilder):
81
- """German dialgues that ocurred between workers in a factory. This dataset contains token level entity annotation as well as sentence level problem, cause, solution annotations."""
82
-
83
- VERSION = datasets.Version("1.1.0")
84
-
85
- # This is an example of a dataset with multiple configurations.
86
- # If you don't want/need to define several sub-sets in your dataset,
87
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
88
-
89
- # If you need to make complex sub-parts in the datasets with configurable options
90
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
91
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
92
-
93
- # You will be able to load one or the other configurations in the following list with
94
- # data = datasets.load_dataset('my_dataset', 'first_domain')
95
- # data = datasets.load_dataset('my_dataset', 'second_domain')
96
- BUILDER_CONFIGS = [
97
- TexPraxConfig(
98
- name="sentence_classification",
99
- description="Sentence level annotations of the TexPrax dataset.",
100
- features=["sentence"],
101
- data_url=_SENTENCE_URLS,
102
- ),
103
- TexPraxConfig(
104
- name="named_entity_recognition",
105
- description="Sentence level annotations of the TexPrax dataset.",
106
- features=["tokens"],
107
- data_url=_ENTITY_URL,
108
- ),
109
- #datasets.BuilderConfig(name="sentence_classification", version=VERSION, description="Sentence level annotations of the TexPrax dataset."),
110
- #datasets.BuilderConfig(name="named_entity_recognition", version=VERSION, description="BIO-tagged named entites of the TexPrax dataset."),
111
- ]
112
-
113
- DEFAULT_CONFIG_NAME = "sentence_classification" # It's not mandatory to have a default configuration. Just use one if it make sense.
114
-
115
- def _info(self):
116
- if self.config.name == "sentence_classification": # This is the name of the configuration selected in BUILDER_CONFIGS above
117
- features = datasets.Features(
118
- {
119
- # Note: ID consists of <dialog-id_sentence-id_turn-id>
120
- "id": datasets.Value("string"),
121
- "sentence": datasets.Value("string"),
122
- "class": datasets.Value("string")
123
- # These are the features of your dataset like images, labels ...
124
- }
125
- )
126
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
127
- features = datasets.Features(
128
- {
129
- # Note: ID consists of <dialog-id_turn-id>
130
- "id": datasets.Value("string"),
131
- "tokens": datasets.Value("list(string)"),
132
- "labels": datasets.Value("list(string)")
133
- # These are the features of your dataset like images, labels ...
134
- }
135
- )
136
- return datasets.DatasetInfo(
137
- # This is the description that will appear on the datasets page.
138
- description=_DESCRIPTION,
139
- # This defines the different columns of the dataset and their types
140
- features=features, # Here we define them above because they are different between the two configurations
141
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
142
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
143
- # supervised_keys=("sentence", "label"),
144
- # Homepage of the dataset for documentation
145
- homepage=_HOMEPAGE,
146
- # License for the dataset if available
147
- license=_LICENSE,
148
- # Citation for the dataset
149
- citation=_CITATION,
150
- )
151
-
152
- def _split_generators(self, dl_manager):
153
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
154
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
155
-
156
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
157
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
158
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
159
- self.config.name == "sentence_classification":
160
- urls = _SENTENCE_URL
161
- data_dir = dl_manager.download_and_extract(urls)
162
- return [
163
- datasets.SplitGenerator(
164
- name=datasets.Split.TRAIN1,
165
- # These kwargs will be passed to _generate_examples
166
- gen_kwargs={
167
- "filepath": os.path.join(data_dir, "industrie_sents_batch_1.csv"),
168
- "split": "batch-1-industrie",
169
- },
170
- ),
171
- datasets.SplitGenerator(
172
- name=datasets.Split.TRAIN2,
173
- # These kwargs will be passed to _generate_examples
174
- gen_kwargs={
175
- "filepath": os.path.join(data_dir, "zerspanung_sents_batch_1.csv"),
176
- "split": "batch-1-zerspanung",
177
- },
178
- ),
179
- datasets.SplitGenerator(
180
- name=datasets.Split.VALIDATION,
181
- # These kwargs will be passed to _generate_examples
182
- gen_kwargs={
183
- "filepath": os.path.join(data_dir, "sents_batch_2.csv"),
184
- "split": "batch-2",
185
- },
186
- ),
187
- datasets.SplitGenerator(
188
- name=datasets.Split.TEST,
189
- # These kwargs will be passed to _generate_examples
190
- gen_kwargs={
191
- "filepath": os.path.join(data_dir, "sents_batch_3.csv"),
192
- "split": "batch-3"
193
- },
194
- ),
195
- ]
196
- else:
197
- urls = _ENTITY_URL
198
- data_dir = dl_manager.download_and_extract(urls)
199
- return [
200
- datasets.SplitGenerator(
201
- name=datasets.Split.TRAIN1,
202
- # These kwargs will be passed to _generate_examples
203
- gen_kwargs={
204
- "filepath": os.path.join(data_dir, "industrie_entities_batch_1.csv"),
205
- "split": "batch-1-industrie",
206
- },
207
- ),
208
- datasets.SplitGenerator(
209
- name=datasets.Split.TRAIN2,
210
- # These kwargs will be passed to _generate_examples
211
- gen_kwargs={
212
- "filepath": os.path.join(data_dir, "zerspanung_entities_batch_1.csv"),
213
- "split": "batch-1-zerspanung",
214
- },
215
- ),
216
- datasets.SplitGenerator(
217
- name=datasets.Split.VALIDATION,
218
- # These kwargs will be passed to _generate_examples
219
- gen_kwargs={
220
- "filepath": os.path.join(data_dir, "entities_batch_2.csv"),
221
- "split": "batch-2",
222
- },
223
- ),
224
- datasets.SplitGenerator(
225
- name=datasets.Split.TEST,
226
- # These kwargs will be passed to _generate_examples
227
- gen_kwargs={
228
- "filepath": os.path.join(data_dir, "entities_batch_3.csv"),
229
- "split": "batch-3"
230
- },
231
- ),
232
- ]
233
-
234
-
235
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
236
- def _generate_examples(self, filepath, split):
237
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
238
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
239
- with open(filepath, encoding="utf-8") as f:
240
- creader = csv.reader(f, delimiter=';', quotechar='"')
241
- for key, row in enumerate(creader):
242
- if self.config.name == "sentence_classification":
243
- idx, sentence, label = row
244
- # Yields examples as (key, example) tuples
245
- yield key, {
246
- "idx": idx,
247
- "sentence": sentence,
248
- "label": label,
249
- }
250
- else:
251
- idx, sentence, labels = row
252
- # Yields examples as (key, example) tuples
253
- yield key, {
254
- "idx": idx,
255
- "tokens": [t.strip() for t in ast.literal_eval(sentence)],
256
- "labels": [l.strip() for l in ast.literal_eval(labels)],
257
- }
258
-
259
-
260
-