thomasyu888 commited on
Commit
97afcd5
·
1 Parent(s): 569616c

Create testdataset.py

Browse files
Files changed (1) hide show
  1. testdataset.py +172 -0
testdataset.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This is inpsired from the mednil implementation: https://huggingface.co/datasets/bigbio/mednli/blob/main/mednli.py
18
+
19
+ The files comprising this dataset must be on the users local machine in a single directory that is
20
+ passed to `datasets.load_datset` via the `data_dir` kwarg. This loader script will read the archive
21
+ files directly (i.e. the user should not uncompress, untar or unzip any of the files). For example,
22
+ if `data_dir` is `"testdataset"` it should contain the following files:
23
+ testdataset
24
+ ├── testdataset.zip
25
+ """
26
+
27
+ import json
28
+ import os
29
+ from typing import Dict, List, Tuple
30
+
31
+ import datasets
32
+
33
+ # from .bigbiohub import entailment_features
34
+ # from .bigbiohub import BigBioConfig
35
+ # from .bigbiohub import Tasks
36
+
37
+
38
+ _LANGUAGES = ["English"]
39
+ _PUBMED = False
40
+ _LOCAL = True
41
+
42
+
43
+ _DATASETNAME = "testdataset"
44
+ _DISPLAYNAME = "TESTDATASET"
45
+
46
+ _DESCRIPTION = """\
47
+ Test Dataset
48
+ """
49
+
50
+
51
+ _HOMEPAGE = "https://physionet.org/content/mednli/1.0.0/"
52
+
53
+ _LICENSE = "PHYSIONET_LICENSE_1p5"
54
+
55
+ _URLS = {}
56
+
57
+ # _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _BIGBIO_VERSION = "1.0.0"
61
+
62
+
63
+ class MedNLIDataset(datasets.GeneratorBasedBuilder):
64
+ """MedNLI"""
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ BigBioConfig(
71
+ name="mednli_source",
72
+ version=SOURCE_VERSION,
73
+ description="MedNLI source schema",
74
+ schema="source",
75
+ subset_id="mednli",
76
+ ),
77
+ BigBioConfig(
78
+ name="mednli_bigbio_te",
79
+ version=BIGBIO_VERSION,
80
+ description="MedNLI BigBio schema",
81
+ schema="bigbio_te",
82
+ subset_id="mednli",
83
+ ),
84
+ ]
85
+
86
+ DEFAULT_CONFIG_NAME = "mednli_source"
87
+
88
+ def _info(self) -> datasets.DatasetInfo:
89
+
90
+ if self.config.schema == "source":
91
+ features = datasets.Features(
92
+ {
93
+ "pairID": datasets.Value("string"),
94
+ "gold_label": datasets.Value("string"),
95
+ "sentence1": datasets.Value("string"),
96
+ "sentence2": datasets.Value("string"),
97
+ "sentence1_parse": datasets.Value("string"),
98
+ "sentence2_parse": datasets.Value("string"),
99
+ "sentence1_binary_parse": datasets.Value("string"),
100
+ "sentence2_binary_parse": datasets.Value("string"),
101
+ }
102
+ )
103
+
104
+ elif self.config.schema == "bigbio_te":
105
+ features = entailment_features
106
+
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=features,
110
+ homepage=_HOMEPAGE,
111
+ license=str(_LICENSE),
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
116
+ if self.config.data_dir is None:
117
+ raise ValueError(
118
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
119
+ )
120
+ else:
121
+ extract_dir = dl_manager.extract(
122
+ os.path.join(
123
+ self.config.data_dir,
124
+ "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0.zip",
125
+ )
126
+ )
127
+ data_dir = os.path.join(
128
+ extract_dir,
129
+ "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0",
130
+ )
131
+
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ gen_kwargs={
136
+ "filepath": os.path.join(data_dir, "mli_train_v1.jsonl"),
137
+ "split": "train",
138
+ },
139
+ ),
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TEST,
142
+ gen_kwargs={
143
+ "filepath": os.path.join(data_dir, "mli_test_v1.jsonl"),
144
+ "split": "test",
145
+ },
146
+ ),
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.VALIDATION,
149
+ gen_kwargs={
150
+ "filepath": os.path.join(data_dir, "mli_dev_v1.jsonl"),
151
+ "split": "dev",
152
+ },
153
+ ),
154
+ ]
155
+
156
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
157
+ with open(filepath, "r") as f:
158
+ if self.config.schema == "source":
159
+ for line in f:
160
+ json_line = json.loads(line)
161
+ yield json_line["pairID"], json_line
162
+
163
+ elif self.config.schema == "bigbio_te":
164
+ for line in f:
165
+ json_line = json.loads(line)
166
+ entailment_example = {
167
+ "id": json_line["pairID"],
168
+ "premise": json_line["sentence1"],
169
+ "hypothesis": json_line["sentence2"],
170
+ "label": json_line["gold_label"],
171
+ }
172
+ yield json_line["pairID"], entailment_example