Commit
·
a7fa5c2
1
Parent(s):
1c5de89
refactor
Browse files- README.md +1 -1
- amazonian_fish_classifier_data.py +14 -9
README.md
CHANGED
|
@@ -44,6 +44,6 @@ dataset_info:
|
|
| 44 |
- name: train
|
| 45 |
num_bytes: 578234
|
| 46 |
num_examples: 3068
|
| 47 |
-
download_size:
|
| 48 |
dataset_size: 578234
|
| 49 |
---
|
|
|
|
| 44 |
- name: train
|
| 45 |
num_bytes: 578234
|
| 46 |
num_examples: 3068
|
| 47 |
+
download_size: 330476983
|
| 48 |
dataset_size: 578234
|
| 49 |
---
|
amazonian_fish_classifier_data.py
CHANGED
|
@@ -13,8 +13,9 @@
|
|
| 13 |
# limitations under the License.
|
| 14 |
"""TODO."""
|
| 15 |
|
|
|
|
| 16 |
import os
|
| 17 |
-
|
| 18 |
import datasets
|
| 19 |
|
| 20 |
_CITATION = """TODO"""
|
|
@@ -24,13 +25,14 @@ _DESCRIPTION = """\
|
|
| 24 |
TODO
|
| 25 |
"""
|
| 26 |
|
| 27 |
-
_HOMEPAGE = "https://doi.org/10.25573/data.17314730.v1"
|
| 28 |
|
| 29 |
_LICENSE = "CC BY 4.0"
|
| 30 |
|
| 31 |
|
| 32 |
_URLS = {
|
| 33 |
"images": "https://smithsonian.figshare.com/ndownloader/files/31975544",
|
|
|
|
| 34 |
}
|
| 35 |
|
| 36 |
|
|
@@ -93,19 +95,22 @@ class AmazonianFish(datasets.GeneratorBasedBuilder):
|
|
| 93 |
|
| 94 |
def _split_generators(self, dl_manager):
|
| 95 |
images = dl_manager.download_and_extract(_URLS["images"])
|
|
|
|
|
|
|
|
|
|
| 96 |
return [
|
| 97 |
datasets.SplitGenerator(
|
| 98 |
name=datasets.Split.TRAIN,
|
| 99 |
gen_kwargs={
|
| 100 |
"images": os.path.join(images, "training_images"),
|
|
|
|
| 101 |
},
|
| 102 |
),
|
| 103 |
]
|
| 104 |
|
| 105 |
-
def _generate_examples(self, images):
|
| 106 |
-
id_
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
yield id_, {"image": str(example), "label": example.parent.name}
|
|
|
|
| 13 |
# limitations under the License.
|
| 14 |
"""TODO."""
|
| 15 |
|
| 16 |
+
|
| 17 |
import os
|
| 18 |
+
import pandas as pd
|
| 19 |
import datasets
|
| 20 |
|
| 21 |
_CITATION = """TODO"""
|
|
|
|
| 25 |
TODO
|
| 26 |
"""
|
| 27 |
|
| 28 |
+
_HOMEPAGE = "https://doi.org/10.25573/data.17314730.v1"
|
| 29 |
|
| 30 |
_LICENSE = "CC BY 4.0"
|
| 31 |
|
| 32 |
|
| 33 |
_URLS = {
|
| 34 |
"images": "https://smithsonian.figshare.com/ndownloader/files/31975544",
|
| 35 |
+
"labels": "https://smithsonian.figshare.com/ndownloader/files/31975646",
|
| 36 |
}
|
| 37 |
|
| 38 |
|
|
|
|
| 95 |
|
| 96 |
def _split_generators(self, dl_manager):
|
| 97 |
images = dl_manager.download_and_extract(_URLS["images"])
|
| 98 |
+
labels = dl_manager.download(_URLS["labels"])
|
| 99 |
+
df = pd.read_csv(labels)
|
| 100 |
+
labels = df.to_dict(orient="records")
|
| 101 |
return [
|
| 102 |
datasets.SplitGenerator(
|
| 103 |
name=datasets.Split.TRAIN,
|
| 104 |
gen_kwargs={
|
| 105 |
"images": os.path.join(images, "training_images"),
|
| 106 |
+
"labels": labels,
|
| 107 |
},
|
| 108 |
),
|
| 109 |
]
|
| 110 |
|
| 111 |
+
def _generate_examples(self, images, labels):
|
| 112 |
+
for id_, example in enumerate(labels):
|
| 113 |
+
yield id_, {
|
| 114 |
+
"image": os.path.join(images, example["Genus"], example["Image_name"]),
|
| 115 |
+
"label": example["Genus"],
|
| 116 |
+
}
|
|
|