Fix TOO MANY REQUESTS error
#2
by
albertvillanova
HF Staff
- opened
- ask_a_patient.py +24 -35
ask_a_patient.py
CHANGED
|
@@ -13,20 +13,16 @@
|
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
|
| 16 |
-
import glob
|
| 17 |
import os
|
| 18 |
-
import re
|
| 19 |
|
| 20 |
import datasets
|
| 21 |
|
| 22 |
-
from .bigbiohub import kb_features
|
| 23 |
-
from .bigbiohub import BigBioConfig
|
| 24 |
-
from .bigbiohub import Tasks
|
| 25 |
|
| 26 |
_DATASETNAME = "ask_a_patient"
|
| 27 |
_DISPLAYNAME = "AskAPatient"
|
| 28 |
|
| 29 |
-
_LANGUAGES = [
|
| 30 |
_PUBMED = True
|
| 31 |
_LOCAL = False
|
| 32 |
_CITATION = """
|
|
@@ -52,7 +48,7 @@ mapped to how they are formally written in medical ontologies (SNOMED-CT and AMT
|
|
| 52 |
|
| 53 |
_HOMEPAGE = "https://zenodo.org/record/55013"
|
| 54 |
|
| 55 |
-
_LICENSE =
|
| 56 |
|
| 57 |
_URLs = "https://zenodo.org/record/55013/files/datasets.zip"
|
| 58 |
|
|
@@ -109,32 +105,27 @@ class AskAPatient(datasets.GeneratorBasedBuilder):
|
|
| 109 |
dl_dir = dl_manager.download_and_extract(_URLs)
|
| 110 |
dataset_dir = os.path.join(dl_dir, "datasets", "AskAPatient")
|
| 111 |
# dataset supports k-folds
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
datasets.
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
name=split_id,
|
| 126 |
-
gen_kwargs={"filepath": fold_filepath, "split_id": split_id},
|
| 127 |
-
)
|
| 128 |
-
)
|
| 129 |
-
return splits
|
| 130 |
|
| 131 |
def _generate_examples(self, filepath, split_id):
|
| 132 |
with open(filepath, "r", encoding="latin-1") as f:
|
| 133 |
for i, line in enumerate(f):
|
| 134 |
-
|
| 135 |
cui, medical_concept, social_media_text = line.strip().split("\t")
|
| 136 |
if self.config.schema == "source":
|
| 137 |
-
yield
|
| 138 |
"cui": cui,
|
| 139 |
"medical_concept": medical_concept,
|
| 140 |
"social_media_text": social_media_text,
|
|
@@ -142,12 +133,12 @@ class AskAPatient(datasets.GeneratorBasedBuilder):
|
|
| 142 |
elif self.config.schema == "bigbio_kb":
|
| 143 |
text_type = "social_media_text"
|
| 144 |
offset = (0, len(social_media_text))
|
| 145 |
-
yield
|
| 146 |
-
"id":
|
| 147 |
-
"document_id":
|
| 148 |
"passages": [
|
| 149 |
{
|
| 150 |
-
"id": f"{
|
| 151 |
"type": text_type,
|
| 152 |
"text": [social_media_text],
|
| 153 |
"offsets": [offset],
|
|
@@ -155,13 +146,11 @@ class AskAPatient(datasets.GeneratorBasedBuilder):
|
|
| 155 |
],
|
| 156 |
"entities": [
|
| 157 |
{
|
| 158 |
-
"id": f"{
|
| 159 |
"type": text_type,
|
| 160 |
"text": [social_media_text],
|
| 161 |
"offsets": [offset],
|
| 162 |
-
"normalized": [
|
| 163 |
-
{"db_name": "SNOMED-CT|AMT", "db_id": cui}
|
| 164 |
-
],
|
| 165 |
}
|
| 166 |
],
|
| 167 |
"events": [],
|
|
|
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
|
|
|
|
| 16 |
import os
|
|
|
|
| 17 |
|
| 18 |
import datasets
|
| 19 |
|
| 20 |
+
from .bigbiohub import BigBioConfig, Tasks, kb_features
|
|
|
|
|
|
|
| 21 |
|
| 22 |
_DATASETNAME = "ask_a_patient"
|
| 23 |
_DISPLAYNAME = "AskAPatient"
|
| 24 |
|
| 25 |
+
_LANGUAGES = ["English"]
|
| 26 |
_PUBMED = True
|
| 27 |
_LOCAL = False
|
| 28 |
_CITATION = """
|
|
|
|
| 48 |
|
| 49 |
_HOMEPAGE = "https://zenodo.org/record/55013"
|
| 50 |
|
| 51 |
+
_LICENSE = "Creative Commons Attribution 4.0 International"
|
| 52 |
|
| 53 |
_URLs = "https://zenodo.org/record/55013/files/datasets.zip"
|
| 54 |
|
|
|
|
| 105 |
dl_dir = dl_manager.download_and_extract(_URLs)
|
| 106 |
dataset_dir = os.path.join(dl_dir, "datasets", "AskAPatient")
|
| 107 |
# dataset supports k-folds
|
| 108 |
+
splits_names = ["train", "validation", "test"]
|
| 109 |
+
fold_ids = range(10)
|
| 110 |
+
return [
|
| 111 |
+
datasets.SplitGenerator(
|
| 112 |
+
name=f"{split_name}_{fold_id}",
|
| 113 |
+
gen_kwargs={
|
| 114 |
+
"filepath": os.path.join(dataset_dir, f"AskAPatient.fold-{fold_id}.{split_name}.txt"),
|
| 115 |
+
"split_id": f"{split_name}_{fold_id}",
|
| 116 |
+
},
|
| 117 |
+
)
|
| 118 |
+
for split_name in splits_names
|
| 119 |
+
for fold_id in fold_ids
|
| 120 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
def _generate_examples(self, filepath, split_id):
|
| 123 |
with open(filepath, "r", encoding="latin-1") as f:
|
| 124 |
for i, line in enumerate(f):
|
| 125 |
+
uid = f"{split_id}_{i}"
|
| 126 |
cui, medical_concept, social_media_text = line.strip().split("\t")
|
| 127 |
if self.config.schema == "source":
|
| 128 |
+
yield uid, {
|
| 129 |
"cui": cui,
|
| 130 |
"medical_concept": medical_concept,
|
| 131 |
"social_media_text": social_media_text,
|
|
|
|
| 133 |
elif self.config.schema == "bigbio_kb":
|
| 134 |
text_type = "social_media_text"
|
| 135 |
offset = (0, len(social_media_text))
|
| 136 |
+
yield uid, {
|
| 137 |
+
"id": uid,
|
| 138 |
+
"document_id": uid,
|
| 139 |
"passages": [
|
| 140 |
{
|
| 141 |
+
"id": f"{uid}_passage",
|
| 142 |
"type": text_type,
|
| 143 |
"text": [social_media_text],
|
| 144 |
"offsets": [offset],
|
|
|
|
| 146 |
],
|
| 147 |
"entities": [
|
| 148 |
{
|
| 149 |
+
"id": f"{uid}_entity",
|
| 150 |
"type": text_type,
|
| 151 |
"text": [social_media_text],
|
| 152 |
"offsets": [offset],
|
| 153 |
+
"normalized": [{"db_name": "SNOMED-CT|AMT", "db_id": cui}],
|
|
|
|
|
|
|
| 154 |
}
|
| 155 |
],
|
| 156 |
"events": [],
|