Datasets:

wzkariampuzha commited on
Commit
1912dc9
·
1 Parent(s): adc5455

Update EpiClassify4GARD.py

Browse files
Files changed (1) hide show
  1. EpiClassify4GARD.py +76 -70
EpiClassify4GARD.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -14,94 +14,81 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
 
17
 
18
- import csv
19
- import os
20
- import textwrap
21
  import datasets
22
- from datasets.tasks import TextClassification
23
 
24
- _CITATION = """
25
- John, J. N., Sid, E., & Zhu, Q. (2021). Recurrent Neural Networks to Automatically Identify Rare Disease Epidemiologic Studies from PubMed. AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science, 2021, 325–334.
26
- """
27
 
28
- _DESCRIPTION = """\
 
29
 
30
- [fix description]
31
 
32
- Prepare positive dataset.ipynb: Generates orphanet_epi_mesh.csv, the final positive dataset (articles that are all epidemiology studies). First, PubMed IDs are extracted from a collection of epidemiology sources provided by Orphanet. The final positive set consists of the PubMed IDs that have epidemiology, incidence, or prevalence MeSH terms. The notebook includes code to optionally expand the dataset by including articles with epidemiology-related MeSH terms beyond those included in the Orphanet file, although this was shown to have worse performance.
33
- Prepare negative dataset.ipynb: Generates negative_dataset.csv, the final negative dataset (articles that are not epidemiology studies). Using the EBI API, the top 5 PubMed search results for each of the 6,000+ rare diseases included in the GARD database are retrieved. Articles that have epidemiology MeSH terms or keywords in the abstract or that are also in the Orphanet file are removed.
34
 
35
- negative_dataset.csv: Negative dataset assembled by Prepare negative dataset.ipynb. Columns: PubMed ID, abstract text. 25,015 rows.
36
- orphanet_epi_mesh.csv: Positive dataset assembled by Prepare positive dataset.ipynb. Columns: PubMed ID, abstract text. 1,145 rows.
37
  """
38
- _HOMEPAGE = "https://github.com/ncats/epi4GARD/tree/master#epi4gard"
39
- _LICENSE = "https://raw.githubusercontent.com/ncats/epi4GARD/master/license.txt"
40
 
41
  _URL = "https://huggingface.co/datasets/wzkariampuzha/EpiClassifySet/raw/main/"
42
  _TRAINING_FILE = "epi_classify_train.tsv"
43
  _VAL_FILE = "epi_classify_val.tsv"
44
  _TEST_FILE = "epi_classify_test.tsv"
45
 
46
- class EpiClassifyConfig(datasets.BuilderConfig):
47
- """BuilderConfig for EpiClassify."""
 
48
 
49
  def __init__(self, **kwargs):
50
- """BuilderConfig for EpiClassify.
51
-
52
  Args:
53
  **kwargs: keyword arguments forwarded to super.
54
  """
55
- super(EpiClassifyConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
56
 
57
- class EpiClassify(datasets.GeneratorBasedBuilder):
58
- """The General Language Understanding Evaluation (GLUE) benchmark."""
 
59
 
60
  BUILDER_CONFIGS = [
61
- EpiClassifyConfig(
62
- name="EpiClassify",
63
- version=VERSION,
64
- description=textwrap.dedent(
65
- """\
66
- The EpiClassify Dataset [REDO DESCRIPTION The task is to predict the sentiment of a
67
- given sentence. We use the two-way (positive/negative) class split, and use only
68
- sentence-level labels.]"""
69
- ),
70
- text_features={"abstract": "abstract"},
71
- label_classes=["negative", "positive"],
72
- label_column="label",
73
- #data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
74
- #data_dir="SST-2",
75
- )
76
  ]
77
 
78
  def _info(self):
79
- #features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
80
-
81
- features = datasets.Features(
82
- {
83
- "text": datasets.Value("string"),
84
- "label": datasets.features.ClassLabel(
85
- names=[
86
- "1 = Epi Abstract",
87
- "2 = Not Epi Abstract",
88
- ]
89
- ),
90
- }
91
- )
92
-
93
- '''
94
- if self.config.label_classes:
95
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
96
- else:
97
- features["label"] = datasets.Value("float32")
98
- features["idx"] = datasets.Value("int32")
99
- '''
100
-
101
  return datasets.DatasetInfo(
102
  description=_DESCRIPTION,
103
- features=datasets.Features(features),
104
- task_templates=[TextClassification(text_column="text", label_column="label")],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  )
106
 
107
  def _split_generators(self, dl_manager):
@@ -118,14 +105,33 @@ class EpiClassify(datasets.GeneratorBasedBuilder):
118
  datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
119
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
120
  ]
121
-
122
- def _generate_examples(self, filepath, split):
123
- """Yields examples."""
124
 
 
 
125
  with open(filepath, encoding="utf-8") as f:
126
- data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONNUMERIC)
127
- for id_, row in enumerate(data):
128
- yield id_, {
129
- "text": row[0],
130
- "label": row[1],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  }
 
1
  # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
+ """INSERT TITLE"""
18
 
19
+ import logging
 
 
20
  import datasets
 
21
 
 
 
 
22
 
23
+ _CITATION = """\
24
+ *REDO*
25
 
26
+ """
27
 
28
+ _DESCRIPTION = """\
29
+ **REWRITE*
30
 
 
 
31
  """
 
 
32
 
33
  _URL = "https://huggingface.co/datasets/wzkariampuzha/EpiClassifySet/raw/main/"
34
  _TRAINING_FILE = "epi_classify_train.tsv"
35
  _VAL_FILE = "epi_classify_val.tsv"
36
  _TEST_FILE = "epi_classify_test.tsv"
37
 
38
+
39
+ class EpiSetConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for Conll2003"""
41
 
42
  def __init__(self, **kwargs):
43
+ """BuilderConfig forConll2003.
 
44
  Args:
45
  **kwargs: keyword arguments forwarded to super.
46
  """
47
+ super(EpiSetConfig, self).__init__(**kwargs)
48
 
49
+
50
+ class EpiSet(datasets.GeneratorBasedBuilder):
51
+ """EpiSet4NER by GARD."""
52
 
53
  BUILDER_CONFIGS = [
54
+ EpiSetConfig(name="EpiSet4NER", version=datasets.Version("1.0.0"), description="EpiSet4NER by NIH NCATS GARD"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  ]
56
 
57
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  return datasets.DatasetInfo(
59
  description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {
62
+ "idx": datasets.Value("string"),
63
+ #"abstracts": datasets.Value("string"),
64
+ "abstracts": datasets.Sequence(datasets.Value("string")),
65
+ '''
66
+ "labels": datasets.Sequence(
67
+ datasets.features.ClassLabel(
68
+ names=[
69
+ "O", #(0)
70
+ "B-LOC", #(1)
71
+ "I-LOC", #(2)
72
+ "B-EPI", #(3)
73
+ "I-EPI", #(4)
74
+ "B-STAT", #(5)
75
+ "I-STAT", #(6)
76
+ ]
77
+ )
78
+ ),
79
+ '''
80
+ "labels": datasets.features.ClassLabel(
81
+ names=[
82
+ "1 = Epi Abstract",
83
+ "2 = Not Epi Abstract",
84
+ ]
85
+ ),
86
+
87
+ }
88
+ ),
89
+ supervised_keys=None,
90
+ homepage="https://github.com/ncats/epi4GARD/tree/master/Epi4GARD#epi4gard",
91
+ citation=_CITATION,
92
  )
93
 
94
  def _split_generators(self, dl_manager):
 
105
  datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
106
  datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
107
  ]
 
 
 
108
 
109
+ def _generate_examples(self, filepath):
110
+ logging.info("⏳ Generating examples from = %s", filepath)
111
  with open(filepath, encoding="utf-8") as f:
112
+ guid = 0
113
+ abstracts = []
114
+ labels = []
115
+ for line in f:
116
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n" or line == "abstract\tlabel\n":
117
+ if abstracts:
118
+ yield guid, {
119
+ "idx": str(guid),
120
+ "abstracts": abstracts,
121
+ "labels": labels,
122
+ }
123
+ guid += 1
124
+ abstracts = []
125
+ labels = []
126
+ else:
127
+ # EpiSet abstracts are space separated
128
+ splits = line.split("\t")
129
+ abstracts.append(splits[0])
130
+ labels.append(splits[1].rstrip())
131
+ # last example
132
+ if tokens:
133
+ yield guid, {
134
+ "idx": str(guid),
135
+ "abstracts": abstracts,
136
+ "labels": labels,
137
  }