| import os | |
| import datasets | |
| from huggingface_hub import HfFileSystem | |
| from typing import List | |
| logger = datasets.logging.get_logger(name=__name__) | |
| fs = HfFileSystem() | |
| _CITATION = """ | |
| """ | |
| _DESCRIPTION = """ | |
| This dataset contain file about datetime date. | |
| It's created with purpose is practice loading dataset from hugging face hub. | |
| """ | |
| _HOMEPAGE = """ | |
| """ | |
| _REPO = "datasets/nguyenminh4099/date-data" | |
| _BRANCH = "main" | |
| _REPO_BRANCH = f"{_REPO}@{_BRANCH}" | |
| _REPO_URL = f"https://huggingface.co/{_REPO}/resolve/{_BRANCH}" | |
| _URL = os.path.join(_REPO_URL, "data", "{filename}.zip") | |
| _CONFIGS = ['all'] | |
| _CONFIGS.extend( | |
| os.path.basename(file)[:-4] | |
| for file in fs.listdir(_REPO_BRANCH + "data", detail=False) | |
| if file.endswith('.zip') | |
| ) | |
| class DateDataConfig(datasets.BuilderConfig): | |
| def __init__( | |
| self, | |
| name: str, | |
| **kwargs, | |
| ): | |
| super(DateDataConfig, self).__init__( | |
| name=name, | |
| version=datasets.Version("1.0.0"), | |
| description=_DESCRIPTION, | |
| ) | |
| class DateDate(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [DateDataConfig(name=name) for name in _CONFIGS] | |
| DEFAULT_CONFIG_NAME = 'all' | |
| def info(self) -> datasets.DatasetInfo: | |
| features = datasets.Features({ | |
| "id": datasets.Value('string'), | |
| "dow": datasets.Value('string'), | |
| "month": datasets.Value('string'), | |
| "dom": datasets.Value('string'), | |
| "hour": datasets.Value('int'), | |
| "min": datasets.Value('int'), | |
| "second": datasets.Value('int'), | |
| "timezone": datasets.Value('string'), | |
| "year": datasets.Value('int'), | |
| "file_path": datasets.Value('string'), | |
| }) | |
| return datasets.DatasetInfo( | |
| features=features, | |
| description=_DESCRIPTION, | |
| citation=_CITATION, | |
| homepage=_HOMEPAGE, | |
| ) | |
| def _split_generators( | |
| self, | |
| dl_manager: datasets.DownloadManager, | |
| ) -> List[datasets.SplitGenerator]: | |
| config_names = _CONFIGS[1:] if self.config.name == 'all' else [self.config.name] | |
| data_dirs = dl_manager.download_and_extract( | |
| [_URL.format(filename=zipfile) for zipfile in config_names] | |
| ) | |
| return datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "data_dirs": data_dirs, | |
| } | |
| ) | |
| def _generate_examples( | |
| self, | |
| data_dirs: List[str], | |
| ) -> dict: | |
| for data_dir in data_dirs: | |
| print(data_dir) | |
| yield self._extract_datetime("Wed Oct 16 11:08:00 +07 2024").update((('id','123'),('file_path',"null"))) | |
| def _read_txt( | |
| self, | |
| file_name: str, | |
| ) -> str: | |
| with open(file=file_name, mode='r') as f: | |
| return self._extract_datetime(f.read()) | |
| def _extract_datetime( | |
| self, | |
| datetime_string: str, | |
| ) -> dict: | |
| datetime_string = datetime_string.strip("./ ") | |
| components = datetime_string.split(' ') | |
| return { | |
| "dow": components[0], | |
| "month": components[1], | |
| "dom": components[2], | |
| "hour": components[3], | |
| "min": components[4], | |
| "second": components[5], | |
| "timezone": components[6], | |
| "year": components[7], | |
| } | |