| |
|
| | import datasets |
| | from huggingface_hub import HfApi |
| | from datasets import DownloadManager, DatasetInfo |
| | from datasets.data_files import DataFilesDict |
| | import os |
| | import json |
| | from os.path import dirname, basename |
| | from pathlib import Path |
| |
|
| |
|
| | |
| | _NAME = "mb23/GraySpectrogram" |
| | _EXTENSION = [".png"] |
| | _REVISION = "main" |
| |
|
| | |
| | |
| | _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps" |
| |
|
| | _DESCRIPTION = f"""\ |
| | {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets! |
| | Using for Project Learning... |
| | """ |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| |
|
| | class GraySpectrogram2(datasets.GeneratorBasedBuilder): |
| |
|
| | |
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig( |
| | name="train", |
| | description=_DESCRIPTION, |
| | ) |
| | ] |
| |
|
| | def _info(self) -> DatasetInfo: |
| | return datasets.DatasetInfo( |
| | description = self.config.description, |
| | features=datasets.Features( |
| | { |
| | "image": datasets.Image(), |
| | "caption": datasets.Value("string"), |
| | "data_idx": datasets.Value("int32"), |
| | "number" : datasets.Value("int32"), |
| | "label" : datasets.ClassLabel( |
| | names=[ |
| | "blues", |
| | "classical", |
| | "country", |
| | "disco", |
| | "hiphop", |
| | "metal", |
| | "pop", |
| | "reggae", |
| | "rock", |
| | "jazz" |
| | ] |
| | ) |
| | } |
| | ), |
| | supervised_keys=("image", "caption"), |
| | homepage=_HOMEPAGE, |
| | citation= "", |
| | |
| | |
| | ) |
| |
|
| | def _split_generators(self, dl_manager: DownloadManager): |
| | |
| | hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0) |
| |
|
| | |
| | metadata_urls = DataFilesDict.from_hf_repo( |
| | {datasets.Split.TRAIN: ["**"]}, |
| | dataset_info=hfh_dataset_info, |
| | allowed_extensions=["jsonl", ".jsonl"], |
| | ) |
| |
|
| | |
| | metadata_paths = dict() |
| | for path in metadata_urls["train"]: |
| | dname = dirname(path) |
| | folder = basename(Path(dname)) |
| | |
| | metadata_paths[folder] = path |
| |
|
| | |
| | data_urls = DataFilesDict.from_hf_repo( |
| | {datasets.Split.TRAIN: ["**"]}, |
| | dataset_info=hfh_dataset_info, |
| | allowed_extensions=["zip", ".zip"], |
| | ) |
| |
|
| | data_url = dict() |
| | train_data_url = list() |
| | test_data_url = list() |
| | for path in data_urls["train"]: |
| | dname = dirname(path) |
| | folder = basename(Path(dname)) |
| | |
| | if folder=="train": |
| | train_data_url.append(path) |
| | if folder == "test": |
| | test_data_url.append(path) |
| |
|
| | data_url["train"] = train_data_url |
| | data_url["test"] = test_data_url |
| |
|
| | |
| | iter_archive = dict() |
| | for split, files in data_url.items(): |
| | file_name_obj = list() |
| | for file_ in files: |
| | downloaded_files_path = dl_manager.download(file_) |
| | for file_obj in dl_manager.iter_archive(downloaded_files_path): |
| | |
| | if file_obj[0].startswith('content/'): |
| | fname = basename(file_obj[0]) |
| | file_obj = (fname, file_obj[1].read()) |
| | file_name_obj.append(file_obj) |
| | |
| | else: |
| | file_name_obj.append(file_obj) |
| | |
| | iter_archive[split] = file_name_obj |
| | |
| | gs = [] |
| | for split, files in iter_archive.items(): |
| | ''' |
| | split : "train" or "test" or "val" |
| | files : zip files |
| | ''' |
| | |
| | metadata_path = dl_manager.download_and_extract(metadata_paths[split]) |
| | |
| | gs.append( |
| | datasets.SplitGenerator( |
| | name = split, |
| | gen_kwargs = { |
| | "images" : iter(iter_archive[split]), |
| | "metadata_path": metadata_path |
| | } |
| | ) |
| | ) |
| | return gs |
| |
|
| | |
| | def _generate_examples(self, images, metadata_path): |
| | """Generate images and captions for splits.""" |
| | |
| | |
| | file_list = list() |
| | caption_list = list() |
| | dataIDX_list = list() |
| | num_list = list() |
| | label_list = list() |
| |
|
| | with open(metadata_path) as fin: |
| | for line in fin: |
| | data = json.loads(line) |
| | file_list.append(data["file_name"]) |
| | caption_list.append(data["caption"]) |
| | dataIDX_list.append(data["data_idx"]) |
| | num_list.append(data["number"]) |
| | label_list.append(data["label"]) |
| |
|
| | for idx, (file_path, file_obj) in enumerate(images): |
| | yield file_path, { |
| | "image": { |
| | "path": file_path, |
| | "bytes": file_obj.read() |
| | }, |
| | "caption" : caption_list[idx], |
| | "data_idx" : dataIDX_list[idx], |
| | "number" : num_list[idx], |
| | "label": label_list[idx] |
| | } |
| |
|