| | """NBMSet24: Nocturnal Bird Migration Dataset""" |
| |
|
| | import os |
| | import datasets |
| | import pandas as pd |
| | from tqdm.auto import tqdm |
| | import tarfile |
| |
|
| | from . import classes |
| |
|
| | from .classes import BIRD_NAMES_NBM |
| |
|
| | from .descriptions import _NBM_CITATION, _NBM_DESCRIPTION |
| |
|
| |
|
| | |
| | _NBMSET24_CITATION = """\ |
| | @article{birdset, |
| | title = {NBMSet24}, |
| | author={anonymous}, |
| | year={2025} |
| | } |
| | """ |
| | _NBMSET24_DESCRIPTION = """\ |
| | Dataset from https://arxiv.org/abs/2412.03633 |
| | """ |
| |
|
| | base_url = "https://huggingface.co/datasets/ppeyret/NBMSet24/resolve/main/" |
| |
|
| |
|
| | def _extract_all_to_same_folder(tar_path, output_dir): |
| | """custom extraction for tar.gz files, that extracts all files to output_dir without subfolders""" |
| | |
| | if not os.path.isfile(output_dir) and os.path.isdir(output_dir) and os.listdir(output_dir): |
| | return output_dir |
| | os.makedirs(output_dir, exist_ok=True) |
| |
|
| | with tarfile.open(tar_path, "r:gz") as tar: |
| | for member in tar.getmembers(): |
| | if member.isfile(): |
| | member.name = os.path.basename(member.name) |
| | tar.extract(member, path=output_dir) |
| |
|
| | return output_dir |
| |
|
| |
|
| | def _extract_and_delete(dl_dir: dict) -> dict: |
| | """extracts downloaded files and deletes the archive file immediately, with progress bar. |
| | only the processed archive and its content are saved at the same time.""" |
| | audio_paths = {name: [] for name, data in dl_dir.items() if isinstance(data, list)} |
| | for name, data in dl_dir.items(): |
| | if not isinstance(data, list): |
| | continue |
| |
|
| | |
| | for path in tqdm(data, f"Extracting {name} split"): |
| | head, tail = os.path.split(path) |
| | output_dir = os.path.join(head, "extracted", tail) |
| | |
| | audio_path = _extract_all_to_same_folder(path, output_dir) |
| | os.remove(path) |
| | |
| | os.remove(f"{path}.lock") if os.path.exists(f"{path}.lock") else None |
| | os.remove(f"{path}.json") if os.path.exists(f"{path}.json") else None |
| | audio_paths[name].append(audio_path) |
| |
|
| | return audio_paths |
| |
|
| |
|
| | class NBMSetConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name, |
| | citation, |
| | class_list, |
| | |
| | |
| | |
| | **kwargs): |
| | super().__init__(version=datasets.Version("0.0.1"), name=name, **kwargs) |
| | |
| | print(f"name: \n{name}") |
| | print(f"class list : \n{class_list}") |
| | features = datasets.Features({ |
| | "audio": datasets.Audio(sampling_rate=32_000, mono=True, decode=False), |
| | "filepath": datasets.Value("string"), |
| | "start_time": datasets.Value("float64"), |
| | "end_time": datasets.Value("float64"), |
| | "low_freq": datasets.Value("int64"), |
| | "high_freq": datasets.Value("int64"), |
| | "ebird_code": datasets.ClassLabel(names=class_list), |
| | "ebird_code_multilabel": datasets.Sequence(datasets.ClassLabel(names=class_list)), |
| | "ebird_code_secondary": datasets.Sequence(datasets.Value("string")), |
| | "label":datasets.Value("string"), |
| | "original_label": datasets.Value("string"), |
| | |
| | |
| | |
| | |
| | |
| | |
| | "microphone": datasets.Value("string"), |
| | |
| | "source": datasets.Value("string"), |
| | "xenocanto_id": datasets.Value("int64"), |
| | |
| | "detected_events": datasets.Sequence(datasets.Sequence(datasets.Value("float64"))), |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | }) |
| |
|
| | self.features = features |
| | self.citation = citation |
| |
|
| |
|
| | class BirdSet(datasets.GeneratorBasedBuilder): |
| | """TODO: Short description of my dataset.""" |
| | |
| | DEFAULT_WRITER_BATCH_SIZE = 500 |
| |
|
| | BUILDER_CONFIGS = [ |
| | NBMSetConfig( |
| | name="NBMSet24", |
| | description=_NBMSET24_DESCRIPTION, |
| | citation=_NBMSET24_CITATION, |
| | data_dir=f"{base_url}", |
| | class_list=BIRD_NAMES_NBM, |
| | |
| | |
| | |
| | ), |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | ] |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_NBMSET24_DESCRIPTION + self.config.description, |
| | features=self.config.features, |
| | citation=self.config.citation + "\n" + _NBMSET24_DESCRIPTION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | ds_name = self.config.name |
| | |
| | train_files = {"NBMSet24": 12 |
| | } |
| |
|
| | test_files = {"NBMSet24": 1, |
| | } |
| |
|
| | |
| |
|
| | dl_dir = dl_manager.download({ |
| | "train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)], |
| | "test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)], |
| | |
| | "meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"), |
| | "meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"), |
| | |
| | }) |
| |
|
| | |
| | audio_paths = _extract_and_delete(dl_dir) if not dl_manager.is_streaming else None |
| |
|
| | |
| | |
| | names = [name for name in dl_dir.keys() if not name.startswith("meta_")] |
| | is_streaming = dl_manager.is_streaming |
| |
|
| | return [datasets.SplitGenerator( |
| | name=name, |
| | gen_kwargs={ |
| | "audio_archive_iterators": (dl_manager.iter_archive(archive_path) for archive_path in dl_dir[name]) if is_streaming else () , |
| | "audio_extracted_paths": audio_paths[name] if not is_streaming else (), |
| | "meta_path": dl_dir[f"meta_{name}"], |
| | "split": name |
| | } |
| | ) for name in names] |
| |
|
| |
|
| | def _generate_examples(self, audio_archive_iterators, audio_extracted_paths, meta_path, split): |
| | metadata = pd.read_parquet(meta_path) |
| | if metadata.index.name != "filepath": |
| | metadata.index = metadata["filepath"].str.split("/").apply(lambda x: x[-1]) |
| |
|
| | idx = 0 |
| | |
| | for audio_archive_iterator in audio_archive_iterators: |
| | for audio_path_in_archive, audio_file in audio_archive_iterator: |
| | file_name = os.path.split(audio_path_in_archive)[-1] |
| | rows = metadata.loc[[file_name]] |
| | audio = audio_file.read() |
| | for _, row in rows.iterrows(): |
| | yield idx, self._metadata_from_row(row, split, audio_path=file_name, audio=audio) |
| | idx += 1 |
| |
|
| | |
| | for audio_extracted_path in audio_extracted_paths: |
| | audio_files = os.listdir(audio_extracted_path) |
| | current_metadata = metadata.loc[audio_files] |
| | for audio_file, row in current_metadata.iterrows(): |
| | audio_path = os.path.join(audio_extracted_path, audio_file) |
| | yield idx, self._metadata_from_row(row, split, audio_path=audio_path) |
| | idx += 1 |
| |
|
| |
|
| | @staticmethod |
| | def _metadata_from_row(row, split: str, audio_path=None, audio=None) -> dict: |
| | return {"audio": audio_path if not audio else {"path": None, "bytes": audio}, |
| | "filepath": audio_path, |
| | "start_time": row["start_time"], |
| | "end_time": row["end_time"], |
| | "low_freq": row["low_freq"], |
| | "high_freq": row["high_freq"], |
| | "ebird_code": row.get("ebird_code", None), |
| | "ebird_code_multilabel": row.get("ebird_code_multilabel", None) if "no_call" not in row.get("ebird_code_multilabel", []) else [], |
| | "ebird_code_secondary": row.get("ebird_code_secondary", None), |
| | "original_label":row["original_label"], |
| | "label":row["label"], |
| | |
| | |
| | |
| | |
| | |
| | |
| | "microphone": row["microphone"], |
| | |
| | "source": row["source"], |
| | "xenocanto_id": row["xenocanto_id"], |
| | |
| | "detected_events": row.get("detected_events", None), |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | } |
| |
|