feat: update code to download data
Browse files
jhtdb.py
CHANGED
|
@@ -11,12 +11,18 @@ import datasets
|
|
| 11 |
# TODO: Add BibTeX citation
|
| 12 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 13 |
_CITATION = """\
|
| 14 |
-
@
|
| 15 |
-
title
|
| 16 |
-
|
| 17 |
-
},
|
| 18 |
-
|
| 19 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
"""
|
| 21 |
|
| 22 |
# TODO: Add description of the dataset here
|
|
@@ -31,12 +37,8 @@ _HOMEPAGE = ""
|
|
| 31 |
# TODO: Add the licence for the dataset here if you can find it
|
| 32 |
_LICENSE = ""
|
| 33 |
|
| 34 |
-
|
| 35 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 36 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 37 |
_URLS = {
|
| 38 |
-
# "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
| 39 |
-
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
| 40 |
"small_50": {
|
| 41 |
"train": (
|
| 42 |
"datasets/jhtdb/small_50/metadata_train.csv",
|
|
@@ -93,6 +95,9 @@ class JHTDB(datasets.GeneratorBasedBuilder):
|
|
| 93 |
|
| 94 |
def _split_generators(self, dl_manager):
|
| 95 |
urls = _URLS[self.config.name]
|
|
|
|
|
|
|
|
|
|
| 96 |
data_dir = dl_manager.download_and_extract(urls)
|
| 97 |
named_splits = {
|
| 98 |
"train": datasets.Split.TRAIN,
|
|
|
|
| 11 |
# TODO: Add BibTeX citation
|
| 12 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 13 |
_CITATION = """\
|
| 14 |
+
@article{Li_2008,
|
| 15 |
+
title={A public turbulence database cluster and applications to study Lagrangian evolution of velocity increments in turbulence},
|
| 16 |
+
volume={9},
|
| 17 |
+
ISSN={1468-5248},
|
| 18 |
+
url={http://dx.doi.org/10.1080/14685240802376389},
|
| 19 |
+
DOI={10.1080/14685240802376389},
|
| 20 |
+
journal={Journal of Turbulence},
|
| 21 |
+
publisher={Informa UK Limited},
|
| 22 |
+
author={Li, Yi and Perlman, Eric and Wan, Minping and Yang, Yunke and Meneveau, Charles and Burns, Randal and Chen, Shiyi and Szalay, Alexander and Eyink, Gregory},
|
| 23 |
+
year={2008},
|
| 24 |
+
month=jan, pages={N31} }
|
| 25 |
+
|
| 26 |
"""
|
| 27 |
|
| 28 |
# TODO: Add description of the dataset here
|
|
|
|
| 37 |
# TODO: Add the licence for the dataset here if you can find it
|
| 38 |
_LICENSE = ""
|
| 39 |
|
| 40 |
+
_BASE_URL = "https://huggingface.co/datasets/dl2-g32/jhtdb/resolve/main"
|
|
|
|
|
|
|
| 41 |
_URLS = {
|
|
|
|
|
|
|
| 42 |
"small_50": {
|
| 43 |
"train": (
|
| 44 |
"datasets/jhtdb/small_50/metadata_train.csv",
|
|
|
|
| 95 |
|
| 96 |
def _split_generators(self, dl_manager):
|
| 97 |
urls = _URLS[self.config.name]
|
| 98 |
+
urls = {
|
| 99 |
+
k: (f"{_BASE_URL}/{v[0]}", f"{_BASE_URL}/{v[1]}") for k, v in urls.items()
|
| 100 |
+
}
|
| 101 |
data_dir = dl_manager.download_and_extract(urls)
|
| 102 |
named_splits = {
|
| 103 |
"train": datasets.Split.TRAIN,
|