Fix FileNotFoundError
#2
by
albertvillanova
HF Staff
- opened
- data/crossvalidation_data.zip +3 -0
- progene.py +21 -22
data/crossvalidation_data.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8745d6b112b5c02510331ae598451edceb014d76edeef63d6fe420ed4d6de5c0
|
| 3 |
+
size 21690976
|
progene.py
CHANGED
|
@@ -24,7 +24,7 @@ from .bigbiohub import kb_features
|
|
| 24 |
from .bigbiohub import BigBioConfig
|
| 25 |
from .bigbiohub import Tasks
|
| 26 |
|
| 27 |
-
_LANGUAGES = [
|
| 28 |
_PUBMED = True
|
| 29 |
_LOCAL = False
|
| 30 |
_CITATION = """\
|
|
@@ -59,10 +59,10 @@ The corpus was developed in the context of the StemNet project (http://www.stemn
|
|
| 59 |
|
| 60 |
_HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
|
| 61 |
|
| 62 |
-
_LICENSE =
|
| 63 |
|
| 64 |
# using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
|
| 65 |
-
_URLS = "
|
| 66 |
|
| 67 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
| 68 |
|
|
@@ -128,25 +128,24 @@ class ProgeneDataset(datasets.GeneratorBasedBuilder):
|
|
| 128 |
urls = _URLS
|
| 129 |
dl_dir = dl_manager.download_and_extract(urls)
|
| 130 |
dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
return splits
|
| 150 |
|
| 151 |
def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
|
| 152 |
"""Yields examples as (key, example) tuples."""
|
|
|
|
| 24 |
from .bigbiohub import BigBioConfig
|
| 25 |
from .bigbiohub import Tasks
|
| 26 |
|
| 27 |
+
_LANGUAGES = ["English"]
|
| 28 |
_PUBMED = True
|
| 29 |
_LOCAL = False
|
| 30 |
_CITATION = """\
|
|
|
|
| 59 |
|
| 60 |
_HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
|
| 61 |
|
| 62 |
+
_LICENSE = "Creative Commons Attribution 4.0 International"
|
| 63 |
|
| 64 |
# using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
|
| 65 |
+
_URLS = "data/crossvalidation_data.zip"
|
| 66 |
|
| 67 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
| 68 |
|
|
|
|
| 128 |
urls = _URLS
|
| 129 |
dl_dir = dl_manager.download_and_extract(urls)
|
| 130 |
dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
|
| 131 |
+
split_filenames = {
|
| 132 |
+
"train": "train.txt",
|
| 133 |
+
"validation": "dev.txt",
|
| 134 |
+
"test": "test.txt",
|
| 135 |
+
}
|
| 136 |
+
return [
|
| 137 |
+
datasets.SplitGenerator(
|
| 138 |
+
name=f"split_{split_num}_{split}",
|
| 139 |
+
gen_kwargs={
|
| 140 |
+
"filepath": os.path.join(
|
| 141 |
+
dataset_dir, f"flairSplit{split_num}", filename
|
| 142 |
+
),
|
| 143 |
+
"split_id": f"split_{split_num}_{split}",
|
| 144 |
+
},
|
| 145 |
+
)
|
| 146 |
+
for split_num in range(0, 10)
|
| 147 |
+
for split, filename in split_filenames.items()
|
| 148 |
+
]
|
|
|
|
| 149 |
|
| 150 |
def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
|
| 151 |
"""Yields examples as (key, example) tuples."""
|