| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Collection of datasets for the MJP.""" |
|
|
| import pathlib |
| from collections import defaultdict |
| from dataclasses import dataclass |
| from typing import Optional |
|
|
| import datasets |
| import torch |
|
|
| from fim.data.utils import load_file |
| from fim.typing import Path, Paths |
|
|
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| _ROOT_URL = "data/DFR" |
|
|
|
|
| @dataclass |
| class MJPDatasetsBuilderConfig(datasets.BuilderConfig): |
| """MJPDatasets builder config..""" |
|
|
| file_name: Optional[str] = None |
|
|
|
|
| |
| class MJP(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| VERSION = datasets.Version("1.1.0") |
|
|
| |
| |
| |
|
|
| |
| |
| BUILDER_CONFIG_CLASS = MJPDatasetsBuilderConfig |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [ |
| MJPDatasetsBuilderConfig( |
| name="DFR_V=0", |
| file_name="6_st_DFR_V=0.zip", |
| version=VERSION, |
| description="This part of my dataset covers a first domain", |
| ), |
| MJPDatasetsBuilderConfig( |
| name="DFR_V=1", |
| file_name="6_st_DFR_V=1.zip", |
| version=VERSION, |
| description="This part of my dataset covers a first domain", |
| ), |
| MJPDatasetsBuilderConfig( |
| name="DFR_V=2", |
| file_name="6_st_DFR_V=2.zip", |
| version=VERSION, |
| description="This part of my dataset covers a first domain", |
| ), |
| MJPDatasetsBuilderConfig( |
| name="DFR_V=3", |
| file_name="6_st_DFR_V=3.zip", |
| version=VERSION, |
| description="This part of my dataset covers a first domain", |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "DFR_V=0" |
|
|
| files_to_load = { |
| "observation_grid": "fine_grid_grid.pt", |
| "observation_values": "fine_grid_noisy_sample_paths.pt", |
| "seq_lengths": "fine_grid_mask_seq_lengths.pt", |
| "time_normalization_factors": "fine_grid_time_normalization_factors.pt", |
| "intensity_matrices": "fine_grid_intensity_matrices.pt", |
| "adjacency_matrices": "fine_grid_adjacency_matrices.pt", |
| "initial_distributions": "fine_grid_initial_distributions.pt", |
| } |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "observation_grid": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32")))), |
| "observation_values": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("uint32")))), |
| "time_normalization_factors": datasets.Value("float32"), |
| "seq_lengths": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))), |
| "intensity_matrices": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
| "adjacency_matrices": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
| "initial_distributions": datasets.Sequence(datasets.Value("uint64")), |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| |
| |
| |
| urls = f"{_ROOT_URL}/{self.config.file_name}" |
| data_dir = dl_manager.download_and_extract(urls) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={"datadir": pathlib.Path(data_dir) / self.config.file_name.split(".")[0]}, |
| ) |
| ] |
|
|
| def __get_files(self, path: Path) -> Paths: |
| files_to_load = [(key, pathlib.Path(path) / file_name) for key, file_name in self.files_to_load.items()] |
| return files_to_load |
|
|
| |
| def _generate_examples(self, datadir): |
| data = defaultdict(list) |
| files_to_load = self.__get_files(datadir) |
| for key, file_path in files_to_load: |
| data[key].append(load_file(file_path)) |
| for k, v in data.items(): |
| data[k] = torch.cat(v) |
|
|
| for id in range(len(data["observation_grid"])): |
| yield id, {k: v[id].tolist() for k, v in data.items() if k in self.info.features} |
|
|