| """ Babelbox Voice Dataset""" | |
| import os | |
| import csv | |
| import codecs | |
| import datasets | |
| from typing import List | |
| from pathlib import Path | |
| from tqdm import tqdm | |
| import torchaudio | |
| from io import BytesIO | |
| import pydub | |
| from pydub import AudioSegment | |
| from pydub.silence import detect_leading_silence | |
| import numpy as np | |
| logger = datasets.logging.get_logger(__name__) | |
| _CITATION = """\ | |
| @inproceedings{babelboxvoice:2022, | |
| author = {Andersson, O. and Bjelkenhed, M. and Bielsa, M. et al}, | |
| title = {Babelbox Voice: A Speech Corpus for training Whisper}, | |
| year = 2022 | |
| } | |
| """ | |
| _HF_REPO_PATH = "https://huggingface.co/datasets/babelbox/babelbox_voice/" | |
| class BabelboxVoiceConfig(datasets.BuilderConfig): | |
| """BuilderConfig for BabelboxVoice.""" | |
| def __init__(self, name, version, **kwargs): | |
| self.name = name | |
| self.version = version | |
| self.features = kwargs.pop("features", None) | |
| self.description = kwargs.pop("description", None) | |
| self.data_url = kwargs.pop("data_url", None) | |
| self.nb_data_shards = kwargs.pop("nb_data_shards", None) | |
| self.metadata_url = kwargs.pop("metadata_url", None) | |
| description = ( | |
| f"Babelbox Voice speech to text dataset." | |
| ) | |
| super(BabelboxVoiceConfig, self).__init__( | |
| name=name, | |
| version=version, | |
| **kwargs, | |
| ) | |
| class BabelboxVoice(datasets.GeneratorBasedBuilder): | |
| VERSION = datasets.Version("1.0.0") | |
| BUILDER_CONFIGS = [ | |
| BabelboxVoiceConfig( | |
| name="nst", | |
| version=VERSION, | |
| description="This part of Babel Voice includes data from National Library of Norway", | |
| features=["path", "audio", "sentence"], | |
| data_url= _HF_REPO_PATH + "resolve/main/data/nst/nst-data-{:0>3d}.tar.gz", | |
| nb_data_shards = 42, | |
| metadata_url= _HF_REPO_PATH + "resolve/main/data/nst/metadata.tar.gz" | |
| ) | |
| ] | |
| DEFAULT_CONFIG_NAME = "nst" | |
| def _info(self): | |
| description = ( | |
| "Babelbox Voice is an initiative to help teach machines how real people speak. " | |
| ) | |
| if self.config.name == "nst": | |
| features = datasets.Features( | |
| { | |
| "path": datasets.Value("string"), | |
| "audio": datasets.features.Audio(sampling_rate=16_000), | |
| "text": datasets.Value("binary"), | |
| "speaker_id": datasets.Value("string"), | |
| "sex": datasets.Value("string"), | |
| "accent": datasets.Value("string"), | |
| } | |
| ) | |
| else: | |
| features = datasets.Features( | |
| { | |
| "path": datasets.Value("string"), | |
| "audio": datasets.features.Audio(sampling_rate=16_000), | |
| "text": datasets.Value("binary"), | |
| "speaker_id": datasets.Value("string"), | |
| "sex": datasets.Value("string"), | |
| "accent": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=description, | |
| features=features, | |
| supervised_keys=None, | |
| version=self.config.version | |
| ) | |
| def get_metadata(self, dl_manager, metadata_url): | |
| if metadata_url == None: return None | |
| metadata_path = dl_manager.download(metadata_url) | |
| local_extracted_metadata_path = dl_manager.extract(metadata_path) if not dl_manager.is_streaming else None | |
| def clean_sentence(sentence): | |
| return (sentence | |
| .replace("\\Komma", "",) | |
| .replace("\\Punkt", "") | |
| .replace("\\Utropstecken", "") | |
| .replace("\\Frågetecken", "")) | |
| metadata_archive = dl_manager.iter_archive(metadata_path) | |
| metadata = {} | |
| for path, file in metadata_archive: | |
| reader = csv.DictReader(codecs.iterdecode(file, 'utf-8')) | |
| for row in tqdm(reader, desc="Reading metadata..."): | |
| filename = row['filename_channel_1'] | |
| metadata_item = { | |
| 'sentence': clean_sentence(row['text']), | |
| 'speaker_id': row['Speaker_ID'], | |
| 'sex': row['Sex'], | |
| 'accent': row['Region_of_Youth'] | |
| } | |
| metadata[filename] = metadata_item | |
| return metadata | |
| def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: | |
| download_urls = [self.config.data_url.format(i) for i in range(1, self.config.nb_data_shards + 1) ] | |
| archive_paths = dl_manager.download(download_urls) | |
| local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} | |
| metadata = self.get_metadata(dl_manager, self.config.metadata_url) | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "local_extracted_archive_paths": local_extracted_archive_paths, | |
| "archives": [dl_manager.iter_archive(path) for path in archive_paths], | |
| "metadata": metadata | |
| }) | |
| ] | |
| def _generate_examples(self, local_extracted_archive_paths, archives, metadata): | |
| sampling_rate = 16000 | |
| def get_audiosegment(array): | |
| byte_array = np.int16(array * 2 ** 15).tobytes() | |
| audio_segment = pydub.AudioSegment(byte_array, frame_rate=16000, sample_width=2, channels=1) | |
| return audio_segment | |
| def get_leading_and_trailing_silence(audio_array): | |
| audio_segment = get_audiosegment(audio_array) | |
| leading = detect_leading_silence(audio_segment) | |
| trailing = detect_leading_silence(audio_segment.reverse()) | |
| return leading, trailing | |
| def get_timestamp(audio_array): | |
| leading_silence, trailing_silence = get_leading_and_trailing_silence(audio_array) | |
| start_len = leading_silence / 1000 | |
| start_time = round(start_len / 2, 2) * 2 | |
| end_len = (len(audio_array) / sampling_rate) - (trailing_silence / 1000) | |
| end_time = round(end_len / 2, 2) * 2 | |
| return (start_time, end_time) | |
| def filter_sentence(sentence): | |
| if "... tyst under denna inspelning ..." in sentence: return False | |
| return True | |
| def get_audio(file): | |
| file_buf = BytesIO(file.read()) | |
| array, audio_sr = torchaudio.load(file_buf, format="wav") | |
| return array[0], audio_sr | |
| for i, audio_archive in enumerate(archives): | |
| for path, file in audio_archive: | |
| if local_extracted_archive_paths == False: | |
| path = os.path.join(local_extracted_archive_paths[i], path) | |
| metadata_item = metadata[path] | |
| audio_array, audio_sr = get_audio(file) | |
| if filter_sentence(metadata_item['sentence']) == False: continue | |
| audio_len = len(audio_array) / sampling_rate | |
| audio_len = round(audio_len / 2, 2) * 2 | |
| print(get_leading_and_trailing_silence(audio_array)) | |
| text = { | |
| "text": metadata_item['sentence'], | |
| "offsets": [ | |
| {"text": metadata_item['sentence'], "timestamp": get_timestamp(audio_array) } | |
| ] | |
| } | |
| result = { | |
| 'path' : path, | |
| 'audio': {"path": path, "array": audio_array, "sampling_rate": sampling_rate}, | |
| 'text' : text, | |
| 'speaker_id' : metadata_item['speaker_id'], | |
| 'sex' : metadata_item['sex'], | |
| 'accent' : metadata_item['accent'] | |
| } | |
| yield path, result | |