nanospeech-dataset / nanospeech-dataset.py
manojkmk's picture
Update nanospeech-dataset.py
7a816e6 verified
import json
import os
import datasets
_DESCRIPTION = """
NanoSpeech dataset containing speech samples with transcriptions.
"""
_HOMEPAGE = "https://huggingface.co/datasets/manojkmk/nanospeech-dataset"
class NanoSpeechDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for NanoSpeech dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for NanoSpeech dataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NanoSpeechDatasetConfig, self).__init__(**kwargs)
class NanoSpeechDataset(datasets.GeneratorBasedBuilder):
"""NanoSpeech dataset: Collection of audio samples with transcriptions."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
NanoSpeechDatasetConfig(
name="default",
version=VERSION,
description="Default configuration for NanoSpeech dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=24000),
"text": datasets.Value("string"),
"duration": datasets.Value("float"),
}
),
supervised_keys=("audio", "text"),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download files from your dataset repo
urls = {
"train": [
f"https://huggingface.co/datasets/manojkmk/nanospeech-dataset/resolve/main/shard-{i:06d}.tar"
for i in range(5) # You have 5 shards as per your verification
]
}
data_dirs = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": data_dirs["train"]},
)
]
def _generate_examples(self, filepaths):
"""Yields examples."""
id_ = 0
for filepath in filepaths:
# The tar file is already downloaded and extracted by the dl_manager
# Iterate over directories created from extracted tars
for root, _, files in os.walk(filepath):
# Group files by their stem (filename without extension)
file_groups = {}
for file in files:
stem = os.path.splitext(file)[0]
ext = os.path.splitext(file)[1]
if stem not in file_groups:
file_groups[stem] = {}
file_groups[stem][ext] = os.path.join(root, file)
# Process each group (mp3 and json file pair)
for stem, files_dict in file_groups.items():
if ".mp3" in files_dict and ".json" in files_dict:
audio_path = files_dict[".mp3"]
json_path = files_dict[".json"]
# Load metadata from json
with open(json_path, "r", encoding="utf-8") as f:
metadata = json.load(f)
yield id_, {
"audio": audio_path,
"text": metadata["text"],
"duration": metadata["duration"],
}
id_ += 1