thomasyu888 commited on
Commit
9266c56
·
1 Parent(s): 83b8603

Update testdataset.py

Browse files
Files changed (1) hide show
  1. testdataset.py +31 -30
testdataset.py CHANGED
@@ -24,7 +24,7 @@ testdataset
24
  ├── testdataset.zip
25
  """
26
 
27
- import json
28
  import os
29
  from dataclasses import dataclass
30
  from typing import Dict, List, Tuple
@@ -125,41 +125,42 @@ class MedNLIDataset(datasets.GeneratorBasedBuilder):
125
  )
126
 
127
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
128
- if self.config.data_dir is None:
 
129
  raise ValueError(
130
  "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
131
  )
132
- else:
133
- extract_dir = dl_manager.extract(
134
- os.path.join(
135
- self.config.data_dir,
136
- "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0.zip",
137
- )
138
- )
139
- data_dir = os.path.join(
140
- extract_dir,
141
- "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0",
142
- )
143
 
144
  return [
145
  datasets.SplitGenerator(
146
  name=datasets.Split.TRAIN,
147
  gen_kwargs={
148
- "filepath": os.path.join(data_dir, "mli_train_v1.jsonl"),
149
  "split": "train",
150
  },
151
  ),
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TEST,
154
  gen_kwargs={
155
- "filepath": os.path.join(data_dir, "mli_test_v1.jsonl"),
156
  "split": "test",
157
  },
158
  ),
159
  datasets.SplitGenerator(
160
  name=datasets.Split.VALIDATION,
161
  gen_kwargs={
162
- "filepath": os.path.join(data_dir, "mli_dev_v1.jsonl"),
163
  "split": "dev",
164
  },
165
  ),
@@ -168,17 +169,17 @@ class MedNLIDataset(datasets.GeneratorBasedBuilder):
168
  def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
169
  with open(filepath, "r") as f:
170
  if self.config.schema == "source":
171
- for line in f:
172
- json_line = json.loads(line)
173
- yield json_line["pairID"], json_line
174
-
175
- elif self.config.schema == "bigbio_te":
176
- for line in f:
177
- json_line = json.loads(line)
178
- entailment_example = {
179
- "id": json_line["pairID"],
180
- "premise": json_line["sentence1"],
181
- "hypothesis": json_line["sentence2"],
182
- "label": json_line["gold_label"],
183
- }
184
- yield json_line["pairID"], entailment_example
 
24
  ├── testdataset.zip
25
  """
26
 
27
+ import csv
28
  import os
29
  from dataclasses import dataclass
30
  from typing import Dict, List, Tuple
 
125
  )
126
 
127
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
128
+ data_dir = self.config.data_dir
129
+ if data_dir is None:
130
  raise ValueError(
131
  "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
132
  )
133
+ # else:
134
+ # extract_dir = dl_manager.extract(
135
+ # os.path.join(
136
+ # self.config.data_dir,
137
+ # "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0.zip",
138
+ # )
139
+ # )
140
+ # data_dir = os.path.join(
141
+ # extract_dir,
142
+ # "mednli-a-natural-language-inference-dataset-for-the-clinical-domain-1.0.0",
143
+ # )
144
 
145
  return [
146
  datasets.SplitGenerator(
147
  name=datasets.Split.TRAIN,
148
  gen_kwargs={
149
+ "filepath": os.path.join(data_dir, "train.csv"),
150
  "split": "train",
151
  },
152
  ),
153
  datasets.SplitGenerator(
154
  name=datasets.Split.TEST,
155
  gen_kwargs={
156
+ "filepath": os.path.join(data_dir, "test.csv"),
157
  "split": "test",
158
  },
159
  ),
160
  datasets.SplitGenerator(
161
  name=datasets.Split.VALIDATION,
162
  gen_kwargs={
163
+ "filepath": os.path.join(data_dir, "dev.csv"),
164
  "split": "dev",
165
  },
166
  ),
 
169
  def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
170
  with open(filepath, "r") as f:
171
  if self.config.schema == "source":
172
+ test = csv.reader(f)
173
+ for row in test:
174
+ yield row
175
+
176
+ # elif self.config.schema == "bigbio_te":
177
+ # for line in f:
178
+ # json_line = json.loads(line)
179
+ # entailment_example = {
180
+ # "id": json_line["pairID"],
181
+ # "premise": json_line["sentence1"],
182
+ # "hypothesis": json_line["sentence2"],
183
+ # "label": json_line["gold_label"],
184
+ # }
185
+ # yield json_line["pairID"], entailment_example