NBMSet24 / NBMSet24.py
ppeyret's picture
Update NBMSet24.py
fcc6d24 verified
"""NBMSet24: Nocturnal Bird Migration Dataset"""
import os
import datasets
import pandas as pd
from tqdm.auto import tqdm
import tarfile
from . import classes
from .classes import BIRD_NAMES_NBM
from .descriptions import _NBM_CITATION, _NBM_DESCRIPTION
#############################################
_NBMSET24_CITATION = """\
@article{birdset,
title = {NBMSet24},
author={anonymous},
year={2025}
}
"""
_NBMSET24_DESCRIPTION = """\
Dataset from https://arxiv.org/abs/2412.03633
"""
base_url = "https://huggingface.co/datasets/ppeyret/NBMSet24/resolve/main/"
def _extract_all_to_same_folder(tar_path, output_dir):
"""custom extraction for tar.gz files, that extracts all files to output_dir without subfolders"""
# check if data already exists
if not os.path.isfile(output_dir) and os.path.isdir(output_dir) and os.listdir(output_dir):
return output_dir
os.makedirs(output_dir, exist_ok=True)
with tarfile.open(tar_path, "r:gz") as tar:
for member in tar.getmembers():
if member.isfile():
member.name = os.path.basename(member.name)
tar.extract(member, path=output_dir)
return output_dir
def _extract_and_delete(dl_dir: dict) -> dict:
"""extracts downloaded files and deletes the archive file immediately, with progress bar.
only the processed archive and its content are saved at the same time."""
audio_paths = {name: [] for name, data in dl_dir.items() if isinstance(data, list)}
for name, data in dl_dir.items():
if not isinstance(data, list):
continue
# extract and immediately delete archives
for path in tqdm(data, f"Extracting {name} split"):
head, tail = os.path.split(path)
output_dir = os.path.join(head, "extracted", tail)
#audio_path = dl_manager.extract(path) # if all archive files are without subfolders this works just fine
audio_path = _extract_all_to_same_folder(path, output_dir)
os.remove(path)
# datasets >3.0.0 hadels cach differently
os.remove(f"{path}.lock") if os.path.exists(f"{path}.lock") else None
os.remove(f"{path}.json") if os.path.exists(f"{path}.json") else None
audio_paths[name].append(audio_path)
return audio_paths
class NBMSetConfig(datasets.BuilderConfig):
def __init__(
self,
name,
citation,
class_list,
# genus_list,
# species_group_list,
# order_list,
**kwargs):
super().__init__(version=datasets.Version("0.0.1"), name=name, **kwargs)
print(f"name: \n{name}")
print(f"class list : \n{class_list}")
features = datasets.Features({
"audio": datasets.Audio(sampling_rate=32_000, mono=True, decode=False),
"filepath": datasets.Value("string"),
"start_time": datasets.Value("float64"),
"end_time": datasets.Value("float64"),
"low_freq": datasets.Value("int64"),
"high_freq": datasets.Value("int64"),
"ebird_code": datasets.ClassLabel(names=class_list),
"ebird_code_multilabel": datasets.Sequence(datasets.ClassLabel(names=class_list)),
"ebird_code_secondary": datasets.Sequence(datasets.Value("string")),
"label":datasets.Value("string"),
"original_label": datasets.Value("string"),
# "french_label":datasets.Value("string"),
# "call_type": datasets.Value("string"),
# "sex": datasets.Value("string"),
# "lat": datasets.Value("float64"),
# "long": datasets.Value("float64"),
# "length": datasets.Value("int64"),
"microphone": datasets.Value("string"),
# "license": datasets.Value("string"),
"source": datasets.Value("string"),
"xenocanto_id": datasets.Value("int64"),
# "local_time": datasets.Value("string"),
"detected_events": datasets.Sequence(datasets.Sequence(datasets.Value("float64"))),
# "event_cluster": datasets.Sequence(datasets.Value("int64")),
# "peaks": datasets.Sequence(datasets.Value("float64")),
# "quality": datasets.Value("string"),
# "recordist": datasets.Value("string"),
# "genus": datasets.ClassLabel(names=genus_list),
# "species_group": datasets.ClassLabel(names=species_group_list),
# "order": datasets.ClassLabel(names=order_list),
# "genus_multilabel": datasets.Sequence(datasets.ClassLabel(names=genus_list)),
# "species_group_multilabel": datasets.Sequence(datasets.ClassLabel(names=species_group_list)),
# "order_multilabel": datasets.Sequence(datasets.ClassLabel(names=order_list)),
})
self.features = features
self.citation = citation
class BirdSet(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
# ram problems?
DEFAULT_WRITER_BATCH_SIZE = 500
BUILDER_CONFIGS = [
NBMSetConfig(
name="NBMSet24",
description=_NBMSET24_DESCRIPTION,
citation=_NBMSET24_CITATION,
data_dir=f"{base_url}",
class_list=BIRD_NAMES_NBM,
# genus_list=classes.GENUS_NBM,
# species_group_list=classes.SPECIES_GROUP_NBM,
# order_list=classes.ORDER_NBM,
),
# NBMSetConfig(
# name="NBM_xc",
# description=_NBMSET24_DESCRIPTION,
# citation=_NBMSET24_CITATION,
# data_dir=f"{base_url}",
# class_list=BIRD_NAMES_NBM,
# # genus_list=classes.GENUS_NBM,
# # species_group_list=classes.SPECIES_GROUP_NBM,
# # order_list=classes.ORDER_NBM,
# ),
# NBMSetConfig(
# name="NBM_scape",
# description=_NBMSET24_DESCRIPTION,
# citation=_NBMSET24_CITATION,
# data_dir=f"{base_url}",
# class_list=BIRD_NAMES_NBM,
# # genus_list=classes.GENUS_NBM,
# # species_group_list=classes.SPECIES_GROUP_NBM,
# # order_list=classes.ORDER_NBM,
# ),
]
def _info(self):
return datasets.DatasetInfo(
description=_NBMSET24_DESCRIPTION + self.config.description,
features=self.config.features,
citation=self.config.citation + "\n" + _NBMSET24_DESCRIPTION,
)
def _split_generators(self, dl_manager):
ds_name = self.config.name
# settings for how much archives (tar.gz) files are uploaded for a specific dataset
train_files = {"NBMSet24": 12
}
test_files = {"NBMSet24": 1,
}
# test_5s_files = {"NBM",}
dl_dir = dl_manager.download({
"train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
"test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
# "test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
"meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
"meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
# "meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
})
# custom extraction that deletes archives right after extraction
audio_paths = _extract_and_delete(dl_dir) if not dl_manager.is_streaming else None
# construct split generators
# assumes every key in dl_dir of NAME also has meta_NAME
names = [name for name in dl_dir.keys() if not name.startswith("meta_")]
is_streaming = dl_manager.is_streaming
return [datasets.SplitGenerator(
name=name,
gen_kwargs={
"audio_archive_iterators": (dl_manager.iter_archive(archive_path) for archive_path in dl_dir[name]) if is_streaming else () ,
"audio_extracted_paths": audio_paths[name] if not is_streaming else (),
"meta_path": dl_dir[f"meta_{name}"],
"split": name
}
) for name in names]
def _generate_examples(self, audio_archive_iterators, audio_extracted_paths, meta_path, split):
metadata = pd.read_parquet(meta_path)
if metadata.index.name != "filepath":
metadata.index = metadata["filepath"].str.split("/").apply(lambda x: x[-1])
idx = 0
# in case of streaming
for audio_archive_iterator in audio_archive_iterators:
for audio_path_in_archive, audio_file in audio_archive_iterator:
file_name = os.path.split(audio_path_in_archive)[-1]
rows = metadata.loc[[file_name]]
audio = audio_file.read()
for _, row in rows.iterrows():
yield idx, self._metadata_from_row(row, split, audio_path=file_name, audio=audio)
idx += 1
# in case of not streaming
for audio_extracted_path in audio_extracted_paths:
audio_files = os.listdir(audio_extracted_path)
current_metadata = metadata.loc[audio_files]
for audio_file, row in current_metadata.iterrows():
audio_path = os.path.join(audio_extracted_path, audio_file)
yield idx, self._metadata_from_row(row, split, audio_path=audio_path)
idx += 1
@staticmethod
def _metadata_from_row(row, split: str, audio_path=None, audio=None) -> dict:
return {"audio": audio_path if not audio else {"path": None, "bytes": audio},
"filepath": audio_path,
"start_time": row["start_time"],
"end_time": row["end_time"],
"low_freq": row["low_freq"],
"high_freq": row["high_freq"],
"ebird_code": row.get("ebird_code", None),
"ebird_code_multilabel": row.get("ebird_code_multilabel", None) if "no_call" not in row.get("ebird_code_multilabel", []) else [],
"ebird_code_secondary": row.get("ebird_code_secondary", None),
"original_label":row["original_label"],
"label":row["label"],
# "french_label":row["french_label"],
# "call_type": row["call_type"],
# "sex": row["sex"],
# "lat": row["lat"],
# "long": row["long"],
# "length": row.get("length", None),
"microphone": row["microphone"],
# "license": row.get("license", None),
"source": row["source"],
"xenocanto_id": row["xenocanto_id"],
# "local_time": row["local_time"],
"detected_events": row.get("detected_events", None),
# "event_cluster": row.get("event_cluster", None),
# "peaks": row.get("peaks", None),
# "quality": row.get("quality", None),
# "recordist": row.get("recordist", None),
# "genus": row.get("genus", None) if split != "test_5s" else None,
# "species_group": row.get("species_group", None) if split != "test_5s" else None,
# "order": row.get("order", None) if split != "test_5s" else None,
# "genus_multilabel": row.get("genus_multilabel", [row.get("genus")]),
# "species_group_multilabel": row.get("species_group_multilabel", [row.get("species_group")]),
# "order_multilabel": row.get("order_multilabel", [row.get("order")]),
}