File size: 5,244 Bytes
16e1eec 1747a0a 16e1eec 1747a0a 16e1eec 1e7c4ad 16e1eec 6fad5c5 16e1eec 1e7c4ad 16e1eec 1747a0a 16e1eec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
"""TODO: Add a description here."""
import csv
import json
import os
import numpy as np
from pathlib import Path
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{Li_2008,
title={A public turbulence database cluster and applications to study Lagrangian evolution of velocity increments in turbulence},
volume={9},
ISSN={1468-5248},
url={http://dx.doi.org/10.1080/14685240802376389},
DOI={10.1080/14685240802376389},
journal={Journal of Turbulence},
publisher={Informa UK Limited},
author={Li, Yi and Perlman, Eric and Wan, Minping and Yang, Yunke and Meneveau, Charles and Burns, Randal and Chen, Shiyi and Szalay, Alexander and Eyink, Gregory},
year={2008},
month=jan, pages={N31} }
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_BASE_URL = "https://huggingface.co/datasets/dl2-g32/jhtdb/resolve/main"
_URLS = {
"small_50": {
"train": (
"datasets/jhtdb/small_50/metadata_train.csv",
"datasets/jhtdb/small_50/train.zip",
),
"val": (
"datasets/jhtdb/small_50/metadata_val.csv",
"datasets/jhtdb/small_50/val.zip",
),
"test": (
"datasets/jhtdb/small_50/metadata_test.csv",
"datasets/jhtdb/small_50/test.zip",
),
},
"large_50": {
"train": (
"datasets/jhtdb/large_50/metadata_train.csv",
"datasets/jhtdb/large_50/train.zip",
),
"val": (
"datasets/jhtdb/large_50/metadata_val.csv",
"datasets/jhtdb/large_50/val.zip",
),
"test": (
"datasets/jhtdb/large_50/metadata_test.csv",
"datasets/jhtdb/large_50/test.zip",
),
},
"large_100": {
"train": (
"datasets/jhtdb/large_100/metadata_train.csv",
"datasets/jhtdb/large_100/train.zip",
),
"val": (
"datasets/jhtdb/large_100/metadata_val.csv",
"datasets/jhtdb/large_100/val.zip",
),
"test": (
"datasets/jhtdb/large_100/metadata_test.csv",
"datasets/jhtdb/large_100/test.zip",
),
},
}
class JHTDB(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="small_50", version=VERSION, description=""),
datasets.BuilderConfig(name="large_50", version=VERSION, description=""),
datasets.BuilderConfig(name="large_100", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "large_50"
def _info(self):
if self.config.name.startswith("small"):
features = datasets.Features(
{
"lrs": datasets.Sequence(
datasets.Array4D(shape=(3, 4, 4, 4), dtype="float32"),
),
"hr": datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"),
}
)
elif self.config.name.startswith("large"):
features = datasets.Features(
{
"lrs": datasets.Sequence(
datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"),
),
"hr": datasets.Array4D(shape=(3, 64, 64, 64), dtype="float32"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
urls = {
k: (f"{_BASE_URL}/{v[0]}", f"{_BASE_URL}/{v[1]}") for k, v in urls.items()
}
data_dir = dl_manager.download_and_extract(urls)
named_splits = {
"train": datasets.Split.TRAIN,
"val": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
return [
datasets.SplitGenerator(
name=named_splits[split],
gen_kwargs={
"metadata_path": Path(metadata_path),
"data_path": Path(data_path),
},
)
for split, (metadata_path, data_path) in data_dir.items()
]
def _generate_examples(self, metadata_path: Path, data_path: Path):
with open(metadata_path) as f:
reader = csv.DictReader(f)
for key, data in enumerate(reader):
yield key, {
"lrs": [
np.load(data_path / Path(p).name)
for p in json.loads(data["lr_paths"])
],
"hr": np.load(data_path / Path(data["hr_path"]).name),
}
|