ears_dataset / ears.py
shannan27's picture
Update ears.py
a78d52d verified
import os
import tarfile
import datasets
import soundfile as sf # or any other library that can load audio files
class EarsDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description="EARS dataset containing audio files categorized by speaker IDs.",
features=datasets.Features({
'audio': datasets.Audio(sampling_rate=None), # Use datasets.Audio for audio features
'spk_id': datasets.Value('string'), # Include this for training/validation splits
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/shannan27/ears_dataset",
citation="Your citation here",
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
# Update URLs to match the paths of the files in your repository
train_url = "https://huggingface.co/datasets/shannan27/ears_dataset/resolve/main/data/processed_train.tar.gz"
test_url = "https://huggingface.co/datasets/shannan27/ears_dataset/resolve/main/data/test.tar.gz"
downloaded_train_file = dl_manager.download(train_url)
downloaded_test_file = dl_manager.download(test_url)
extracted_train_path = os.path.join(dl_manager.manual_dir, 'extracted_train')
extracted_test_path = os.path.join(dl_manager.manual_dir, 'extracted_test')
# Extract tar.gz files
self._extract_archive(downloaded_train_file, extracted_train_path)
self._extract_archive(downloaded_test_file, extracted_test_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"folder_paths": [os.path.join(extracted_train_path, 'train')]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"folder_paths": [os.path.join(extracted_test_path, 'blind_testset')]},
),
]
def _extract_archive(self, archive_path, extract_to):
with tarfile.open(archive_path, "r:gz") as tar:
tar.extractall(path=extract_to)
def _generate_examples(self, folder_paths):
"""
Yields examples from the dataset.
"""
print(folder_paths)
for folder_path in folder_paths:
if "train" in folder_path.lower():
# Processing for train set
for spk_id in os.listdir(folder_path):
spk_folder = os.path.join(folder_path, spk_id)
if os.path.isdir(spk_folder):
for audio_file in os.listdir(spk_folder):
if audio_file.endswith(('.wav', '.mp3')): # Add other extensions if needed
audio_path = os.path.join(spk_folder, audio_file)
audio_array, sampling_rate = sf.read(audio_path) # Load audio file
yield f"{spk_id}_{audio_file}", {
'audio': {'array': audio_array, 'sampling_rate': sampling_rate},
'spk_id': spk_id, # Include spk_id for train set
}