code
stringlengths
17
6.64M
class FluentSpeechCommands(Corpus): '\n Parse the Fluent Speech Command dataset\n\n Args:\n dataset_root: (str) The dataset root of Fluent Speech Command\n ' def __init__(self, dataset_root: str, n_jobs: int=4) -> None: self.dataset_root = Path(dataset_root) self.train = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'train_data.csv')), self._get_unique_name) self.valid = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'valid_data.csv')), self._get_unique_name) self.test = self.dataframe_to_datapoints(pd.read_csv(((self.dataset_root / 'data') / 'test_data.csv')), self._get_unique_name) data_points = OrderedDict() data_points.update(self.train) data_points.update(self.valid) data_points.update(self.test) data_points = {key: self._parse_data(data) for (key, data) in data_points.items()} self._all_data = data_points @staticmethod def _get_unique_name(data_point): return Path(data_point['path']).stem def _parse_data(self, data): return dict(path=(self.dataset_root / data['path']), speakerId=data['speakerId'], transcription=data['transcription'], action=data['action'], object=data['object'], location=data['location']) @property def all_data(self): "\n Return all the data points in a dict of the format\n\n .. code-block:: yaml\n\n data_id1:\n path: (str) The waveform path\n speakerId: (str) The speaker name\n transcription: (str) The transcription\n action: (str) The action\n object: (str) The action's targeting object\n location: (str) The location where the action happens\n\n data_id2:\n ...\n " return self._all_data @property def data_split(self): '\n Return a list:\n\n :code:`train_data`, :code:`valid_data`, :code:`test_data`\n\n each is a dict following the format specified in :obj:`all_data`\n ' return super().data_split @property def data_split_ids(self): '\n Return a list:\n\n :code:`train_ids`, :code:`valid_ids`, :code:`test_ids`\n\n Each is a list containing data_ids. data_ids can be used as the key to access the :obj:`all_data`\n ' return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys())) @classmethod def download_dataset(cls, tgt_dir: str) -> None: '\n Download and unzip the dataset to :code:`tgt_dir`/fluent_speech_commands_dataset\n\n Args:\n tgt_dir (str): The root directory containing many different datasets\n ' import os import tarfile import requests tgt_dir = Path(tgt_dir) tgt_dir.mkdir(exists_ok=True, parents=True) def unzip_targz_then_delete(filepath: str): with tarfile.open(os.path.abspath(filepath)) as tar: tar.extractall(path=os.path.abspath(tgt_dir)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(tgt_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to', os.path.abspath(filepath)) with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') unzip_targz_then_delete(filepath) else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') if (not (os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'fluent_speech_commands_dataset/wavs')) and os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'fluent_speech_commands_dataset/data/speakers')))): download_from_url('http://140.112.21.28:9000/fluent.tar.gz') logger.info(f'Fluent speech commands dataset downloaded. Located at {os.path.abspath(tgt_dir)}/fluent_speech_commands_dataset/')
class IEMOCAP(Corpus): '\n Parse the IEMOCAP dataset\n\n Args:\n dataset_root: (str) The dataset root of IEMOCAP\n ' def __init__(self, dataset_root: str, n_jobs: int=4) -> None: self.dataset_root = Path(dataset_root) self.sessions = [self._preprocess_single_session(self.dataset_root, session_id) for session_id in range(1, (IEMOCAP_SESSION_NUM + 1))] self._all_data = dict() for session in self.sessions: self._all_data.update(session['improvised']) self._all_data.update(session['scripted']) @staticmethod def _preprocess_single_session(dataset_root: Path, session_id: int): data = dict(improvised={}, scripted={}) session_dir = (dataset_root / f'Session{session_id}') label_dir = (session_dir / LABEL_DIR_PATH) wav_root_dir = (session_dir / WAV_DIR_PATH) wav_paths = find_files(wav_root_dir) for wav_path in wav_paths: wav_path = Path(wav_path) spk_and_act_and_scene = wav_path.parts[(- 2)] label_file = (label_dir / f'{spk_and_act_and_scene}.txt') with label_file.open() as file: content = file.read() result = re.search(f'{str(wav_path.stem)} (.+) ', content) speaker = spk_and_act_and_scene.split('_')[0] act = ('improvised' if ('impro' in spk_and_act_and_scene) else 'scripted') emotion = result.groups()[0] unique_id = wav_path.stem data[act][unique_id] = dict(wav_path=str(wav_path), speaker=speaker, act=act, emotion=emotion, session_id=session_id) return data @property def all_data(self): '\n Return:\n dict\n\n all the data points of IEMOCAP in the format of\n\n .. code-block:: yaml\n\n data_id1:\n wav_path (str): The waveform path\n speaker (str): The speaker name\n act (str): improvised / scripted\n emotion (str): The emotion label\n session_id (int): The session\n\n data_id2:\n ...\n ' return deepcopy(self._all_data.copy()) def get_whole_session(self, session_id: int): '\n Args:\n session_id (int): The session index selected from 1, 2, 3, 4, 5\n\n Return:\n dict\n\n data points in a single session (containing improvised and scripted recordings) in the\n same format as :obj:`all_data`\n ' output = dict() output.update(self.get_session_with_act(session_id, 'improvised')) output.update(self.get_session_with_act(session_id, 'scripted')) return deepcopy(output) def get_session_with_act(self, session_id: int, act: str): "\n Args:\n session_id (int): The session index selected from 1, 2, 3, 4, 5\n act (str): 'improvised' or 'scripted'\n\n Return:\n :obj:`s3prl.base.container.Container`\n\n data points in a single session with a specific act (either improvised or scripted) in the\n same format as :obj:`all_data`\n " assert (act in ['improvised', 'scripted']) return deepcopy(self.sessions[(session_id - 1)][act]) @classmethod def download_dataset(cls, tgt_dir: str) -> None: import os import tarfile import requests assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist' def unzip_targz_then_delete(filepath: str): with tarfile.open(os.path.abspath(filepath)) as tar: tar.extractall(path=os.path.abspath(tgt_dir)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(tgt_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to', os.path.abspath(filepath)) with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') unzip_targz_then_delete(filepath) else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'IEMOCAP/'))): download_from_url('http://140.112.21.28:9000/IEMOCAP.tar.gz') logger.info(f'IEMOCAP dataset downloaded. Located at {os.path.abspath(tgt_dir)}/IEMOCAP/')
def read_text(file: Path) -> str: src_file = ('-'.join(str(file).split('-')[:(- 1)]) + '.trans.txt') idx = file.stem.replace('.flac', '') with open(src_file, 'r') as fp: for line in fp: if (idx == line.split(' ')[0]): return line[:(- 1)].split(' ', 1)[1] logging.warning(f'Transcription of {file} not found!')
def check_no_repeat(splits: List[str]) -> bool: count = defaultdict(int) for split in splits: count[split] += 1 repeated = '' for (key, val) in count.items(): if (val > 1): repeated += f' {key} ({val} times)' if (len(repeated) != 0): logging.warning(f'Found repeated splits in corpus: {repeated}, which might cause unexpected behaviors.') return False return True
def _parse_spk_to_gender(speaker_file: Path) -> dict: speaker_file = Path(speaker_file) with speaker_file.open() as file: lines = [line.strip() for line in file.readlines()] for line_id in range(len(lines)): line = lines[line_id] if (('SEX' in line) and ('SUBSET' in line) and ('MINUTES' in line) and ('NAME' in line)): break line_id += 1 spk2gender = {} for line_id in range(line_id, len(lines)): line = lines[line_id] line = re.sub('\t+', ' ', line) line = re.sub(' +', ' ', line) parts = line.split('|', maxsplit=4) (ID, SEX, SUBSET, MINUTES, NAME) = parts spk2gender[int(ID)] = SEX.strip() return spk2gender
class LibriLight(Corpus): def __init__(self, dataset_root: str, n_jobs: int=4, train_split: str='10m-fold0') -> None: self.dataset_root = Path(dataset_root).resolve() self.train_split = train_split if (train_split == '10h'): roots = [(self.dataset_root / '1h'), (self.dataset_root / '9h')] elif (train_split == '1h'): roots = [(self.dataset_root / '1h')] elif train_split.startswith('10m'): fold_id = int(train_split.split('-')[(- 1)].split('fold')[(- 1)]) roots = [((self.dataset_root / '1h') / str(fold_id))] else: raise ValueError(f'Unsupported split: {train_split}') self._data = self._collect_data(roots, n_jobs) @classmethod def download_dataset(cls, dataset_root: str): Path(dataset_root).mkdir(parents=True, exist_ok=True) subprocess.check_call(['wget', 'https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz', '-O', str((Path(dataset_root) / 'librispeech_finetuning.tgz'))]) subprocess.check_call(['tar', 'zxvf', 'librispeech_finetuning.tgz', '-C', str(Path(dataset_root))]) @property def all_data(self): return self._data @staticmethod def _collect_data(roots: List[Path], n_jobs: int=4) -> Dict[(str, Dict[(str, List[Any])])]: spkr_file = _urls_to_filepaths(LIBRISPEECH_SPKR_INFO) spkr2gender = _parse_spk_to_gender(Path(spkr_file).resolve()) data_dict = {} for split_dir in roots: if (not os.path.exists(split_dir)): logging.info(f'Split {split_dir} is not downloaded. Skip data collection.') continue wav_list = list(Path(split_dir).rglob('*.flac')) name_list = [file.stem.replace('.flac', '') for file in wav_list] text_list = Parallel(n_jobs=n_jobs)((delayed(read_text)(file) for file in wav_list)) spkr_list = [int(name.split('-')[0]) for name in name_list] for wav_id in range(len(wav_list)): wav = Path(wav_list[wav_id]) data_dict[wav.stem] = {'wav_path': str(wav.resolve()), 'transcription': text_list[wav_id], 'speaker': spkr_list[wav_id], 'gender': spkr2gender[spkr_list[wav_id]]} return data_dict
def read_text(file: Path) -> str: src_file = ('-'.join(str(file).split('-')[:(- 1)]) + '.trans.txt') idx = file.stem.replace('.flac', '') with open(src_file, 'r') as fp: for line in fp: if (idx == line.split(' ')[0]): return line[:(- 1)].split(' ', 1)[1] logger.warning(f'Transcription of {file} not found!')
def check_no_repeat(splits: List[str]) -> bool: count = defaultdict(int) for split in splits: count[split] += 1 repeated = '' for (key, val) in count.items(): if (val > 1): repeated += f' {key} ({val} times)' if (len(repeated) != 0): logger.warning(f'Found repeated splits in corpus: {repeated}, which might cause unexpected behaviors.') return False return True
def _parse_spk_to_gender(speaker_file: Path) -> dict: speaker_file = Path(speaker_file) with speaker_file.open() as file: lines = [line.strip() for line in file.readlines()] for line_id in range(len(lines)): line = lines[line_id] if (('SEX' in line) and ('SUBSET' in line) and ('MINUTES' in line) and ('NAME' in line)): break line_id += 1 spk2gender = {} for line_id in range(line_id, len(lines)): line = lines[line_id] line = re.sub('\t+', ' ', line) line = re.sub(' +', ' ', line) parts = line.split('|', maxsplit=4) (ID, SEX, SUBSET, MINUTES, NAME) = parts spk2gender[int(ID)] = SEX.strip() return spk2gender
class LibriSpeech(Corpus): 'LibriSpeech Corpus\n Link: https://www.openslr.org/12\n\n Args:\n dataset_root (str): Path to LibriSpeech corpus directory.\n n_jobs (int, optional): Number of jobs. Defaults to 4.\n train_split (List[str], optional): Training splits. Defaults to ["train-clean-100"].\n valid_split (List[str], optional): Validation splits. Defaults to ["dev-clean"].\n test_split (List[str], optional): Testing splits. Defaults to ["test-clean"].\n ' def __init__(self, dataset_root: str, n_jobs: int=4, train_split: List[str]=['train-clean-100'], valid_split: List[str]=['dev-clean'], test_split: List[str]=['test-clean']) -> None: self.dataset_root = Path(dataset_root).resolve() self.train_split = train_split self.valid_split = valid_split self.test_split = test_split self.all_splits = ((train_split + valid_split) + test_split) assert check_no_repeat(self.all_splits) self.data_dict = self._collect_data(dataset_root, self.all_splits, n_jobs) self.train = self._data_to_dict(self.data_dict, train_split) self.valid = self._data_to_dict(self.data_dict, valid_split) self.test = self._data_to_dict(self.data_dict, test_split) self._data = OrderedDict() self._data.update(self.train) self._data.update(self.valid) self._data.update(self.test) def get_corpus_splits(self, splits: List[str]): return self._data_to_dict(self.data_dict, splits) @property def all_data(self): "\n Return all the data points in a dict of the format\n\n .. code-block:: yaml\n\n data_id1:\n wav_path: (str) The waveform path\n transcription: (str) The transcription\n speaker: (str) The speaker name\n gender: (str) The speaker's gender\n corpus_split: (str) The split of corpus this sample belongs to\n\n data_id2:\n ...\n " return self._data @property def data_split_ids(self): return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys())) @staticmethod def _collect_data(dataset_root: str, splits: List[str], n_jobs: int=4) -> Dict[(str, Dict[(str, List[Any])])]: spkr2gender = _parse_spk_to_gender((Path(dataset_root) / 'SPEAKERS.TXT')) data_dict = {} for split in splits: split_dir = os.path.join(dataset_root, split) if (not os.path.exists(split_dir)): logger.info(f'Split {split} is not downloaded. Skip data collection.') continue wav_list = list(Path(split_dir).rglob('*.flac')) name_list = [file.stem.replace('.flac', '') for file in wav_list] text_list = Parallel(n_jobs=n_jobs)((delayed(read_text)(file) for file in wav_list)) spkr_list = [int(name.split('-')[0]) for name in name_list] (wav_list, name_list, text_list, spkr_list) = zip(*[(wav, name, text, spkr) for (wav, name, text, spkr) in sorted(zip(wav_list, name_list, text_list, spkr_list), key=(lambda x: x[1]))]) data_dict[split] = {'name_list': list(name_list), 'wav_list': list(wav_list), 'text_list': list(text_list), 'spkr_list': list(spkr_list), 'gender_list': [spkr2gender[spkr] for spkr in spkr_list]} return data_dict @staticmethod def _data_to_dict(data_dict: Dict[(str, Dict[(str, List[Any])])], splits: List[str]) -> dict(): data = dict({name: {'wav_path': data_dict[split]['wav_list'][i], 'transcription': data_dict[split]['text_list'][i], 'speaker': data_dict[split]['spkr_list'][i], 'gender': data_dict[split]['gender_list'][i], 'corpus_split': split} for split in splits for (i, name) in enumerate(data_dict[split]['name_list'])}) return data @classmethod def download_dataset(cls, target_dir: str, splits: List[str]=['train-clean-100', 'dev-clean', 'test-clean']) -> None: import os import tarfile import requests target_dir = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) def unzip_targz_then_delete(filepath: str): with tarfile.open(os.path.abspath(filepath)) as tar: tar.extractall(path=os.path.abspath(target_dir)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(target_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to {os.path.abspath(filepath)}') with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') unzip_targz_then_delete(filepath) else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') for split in splits: if (not os.path.exists(os.path.join(os.path.abspath(target_dir), ('Librispeech/' + split)))): download_from_url((('https://www.openslr.org/resources/12/' + split) + '.tar.gz')) logger.info((', '.join(splits) + f'downloaded. Located at {os.path.abspath(target_dir)}/Librispeech/'))
class Quesst14(): def __init__(self, dataset_root: str): dataset_root = Path(dataset_root) self.doc_paths = self._english_audio_paths(dataset_root, 'language_key_utterances.lst') self.dev_query_paths = self._english_audio_paths(dataset_root, f'language_key_dev.lst') self.eval_query_paths = self._english_audio_paths(dataset_root, f'language_key_eval.lst') self.n_dev_queries = len(self.dev_query_paths) self.n_eval_queries = len(self.eval_query_paths) self.n_docs = len(self.doc_paths) @staticmethod def _english_audio_paths(dataset_root_path, lst_name): 'Extract English audio paths.' audio_paths = [] with open(((dataset_root_path / 'scoring') / lst_name)) as f: for line in f: (audio_path, lang) = tuple(line.strip().split()) if (lang != 'nnenglish'): continue audio_path = re.sub('^.*?\\/', '', audio_path) audio_paths.append((dataset_root_path / audio_path)) return audio_paths @property def valid_queries(self): return self.dev_query_paths @property def test_queries(self): return self.eval_query_paths @property def docs(self): '\n Valid and Test share the same document database\n ' return self.doc_paths @classmethod def download_dataset(cls, tgt_dir: str) -> None: import os import tarfile import requests assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist' def unzip_targz_then_delete(filepath: str): with tarfile.open(os.path.abspath(filepath)) as tar: tar.extractall(path=os.path.abspath(tgt_dir)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(tgt_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to', os.path.abspath(filepath)) with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') unzip_targz_then_delete(filepath) else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'quesst14Database/'))): download_from_url('https://speech.fit.vutbr.cz/files/quesst14Database.tgz') logger.info(f'Quesst14 dataset downloaded. Located at {os.path.abspath(tgt_dir)}/quesst14Database/')
class SNIPS(Corpus): def __init__(self, dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str]) -> None: self.dataset_root = Path(dataset_root) self.train_speakers = train_speakers self.valid_speakers = valid_speakers self.test_speakers = test_speakers self.data_dict = self._collect_data(self.dataset_root, train_speakers, valid_speakers, test_speakers) self.train = self._data_to_dict(self.data_dict, ['train']) self.valid = self._data_to_dict(self.data_dict, ['valid']) self.test = self._data_to_dict(self.data_dict, ['test']) self._data = OrderedDict() self._data.update(self.train) self._data.update(self.valid) self._data.update(self.test) @property def all_data(self): return self._data @property def data_split_ids(self): return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys())) @staticmethod def _collect_data(dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str]) -> Dict[(str, Dict[(str, Any)])]: transcripts_file = open((dataset_root / 'all.iob.snips.txt')).readlines() transcripts = {} for line in transcripts_file: line = line.strip().split(' ') index = line[0] sent = ' '.join(line[1:]) transcripts[index] = sent data_dict = {} for (split, speaker_list) in [('train', train_speakers), ('valid', valid_speakers), ('test', test_speakers)]: wav_list = list((dataset_root / split).rglob('*.wav')) (new_wav_list, name_list, spkr_list) = ([], [], []) uf = 0 for i in trange(len(wav_list), desc='checking files'): uid = wav_list[i].stem if (uid in transcripts): spkr = uid.split('-')[0] if (spkr in speaker_list): new_wav_list.append(str(wav_list[i])) name_list.append(uid) spkr_list.append(spkr) else: logging.info(wav_list[i], 'Not Found') uf += 1 logging.info(('%d wav file with label not found in text file!' % uf)) wav_list = new_wav_list logging.info(f'loaded audio from {len(speaker_list)} speakers {str(speaker_list)} with {len(wav_list)} examples.') assert (len(wav_list) > 0), 'No data found @ {}'.format((dataset_root / split)) text_list = [transcripts[name] for name in name_list] (wav_list, name_list, text_list, spkr_list) = zip(*[(wav, name, text, spkr) for (wav, name, text, spkr) in sorted(zip(wav_list, name_list, text_list, spkr_list), key=(lambda x: x[1]))]) data_dict[split] = {'name_list': name_list, 'wav_list': wav_list, 'text_list': text_list, 'spkr_list': spkr_list} return data_dict @staticmethod def _data_to_dict(data_dict: Dict[(str, Dict[(str, List[Any])])], splits: List[str]) -> dict: data = dict({name: {'wav_path': data_dict[split]['wav_list'][i], 'transcription': ' '.join(data_dict[split]['text_list'][i].split('\t')[0].strip().split(' ')[1:(- 1)]), 'iob': ' '.join(data_dict[split]['text_list'][i].split('\t')[1].strip().split(' ')[1:(- 1)]), 'intent': data_dict[split]['text_list'][i].split('\t')[1].strip().split(' ')[(- 1)], 'speaker': data_dict[split]['spkr_list'][i], 'corpus_split': split} for split in splits for (i, name) in enumerate(data_dict[split]['name_list'])}) return data
class SpeechCommandsV1(Corpus): "\n Args:\n dataset_root (str): should contain a 'dev' sub-folder for the training/validation set\n and a 'test' sub-folder for the testing set\n " def __init__(self, gsc1: str, gsc1_test: str, n_jobs: int=4) -> None: train_dataset_root = Path(gsc1) test_dataset_root = Path(gsc1_test) (train_list, valid_list) = self.split_dataset(train_dataset_root) train_list = self.parse_train_valid_data_list(train_list, train_dataset_root) valid_list = self.parse_train_valid_data_list(valid_list, train_dataset_root) test_list = self.parse_test_data_list(test_dataset_root) self.train = self.list_to_dict(train_list) self.valid = self.list_to_dict(valid_list) self.test = self.list_to_dict(test_list) self._data = OrderedDict() self._data.update(self.train) self._data.update(self.valid) self._data.update(self.test) @staticmethod def split_dataset(root_dir: Union[(str, Path)], max_uttr_per_class=((2 ** 27) - 1)) -> Tuple[(List[Tuple[(str, str)]], List[Tuple[(str, str)]])]: 'Split Speech Commands into 3 set.\n\n Args:\n root_dir: speech commands dataset root dir\n max_uttr_per_class: predefined value in the original paper\n\n Return:\n train_list: [(class_name, audio_path), ...]\n valid_list: as above\n ' (train_list, valid_list) = ([], []) for entry in Path(root_dir).iterdir(): if ((not entry.is_dir()) or (entry.name == '_background_noise_')): continue for audio_path in entry.glob('*.wav'): speaker_hashed = re.sub('_nohash_.*$', '', audio_path.name) hashed_again = hashlib.sha1(speaker_hashed.encode('utf-8')).hexdigest() percentage_hash = ((int(hashed_again, 16) % (max_uttr_per_class + 1)) * (100.0 / max_uttr_per_class)) if (percentage_hash < 10): valid_list.append((entry.name, audio_path)) elif (percentage_hash < 20): pass else: train_list.append((entry.name, audio_path)) return (train_list, valid_list) @staticmethod def parse_train_valid_data_list(data_list, train_dataset_root: Path): data = [((class_name, audio_path) if (class_name in CLASSES) else ('_unknown_', audio_path)) for (class_name, audio_path) in data_list] data += [('_silence_', audio_path) for audio_path in Path(train_dataset_root, '_background_noise_').glob('*.wav')] return data @staticmethod def parse_test_data_list(test_dataset_root: Path): data = [(class_dir.name, audio_path) for class_dir in Path(test_dataset_root).iterdir() if class_dir.is_dir() for audio_path in class_dir.glob('*.wav')] return data @staticmethod def path_to_unique_name(path: str): return '/'.join(Path(path).parts[(- 2):]) @classmethod def list_to_dict(cls, data_list): data = dict({cls.path_to_unique_name(audio_path): {'wav_path': audio_path, 'class_name': class_name} for (class_name, audio_path) in data_list}) return data @property def all_data(self): '\n Return:\n Container: id (str)\n wav_path (str)\n class_name (str)\n ' return self._data @property def data_split_ids(self): return (list(self.train.keys()), list(self.valid.keys()), list(self.test.keys())) @classmethod def download_dataset(cls, tgt_dir: str) -> None: import os import tarfile import requests assert os.path.exists(os.path.abspath(tgt_dir)), 'Target directory does not exist' def unzip_targz_then_delete(filepath: str, filename: str): file_path = os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR', filename.replace('.tar.gz', '')) os.makedirs(file_path) with tarfile.open(os.path.abspath(filepath)) as tar: tar.extractall(path=os.path.abspath(file_path)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(tgt_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to', os.path.abspath(filepath)) with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') unzip_targz_then_delete(filepath, filename) else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR/speech_commands_v0.01'))): download_from_url('http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz') if (not os.path.exists(os.path.join(os.path.abspath(tgt_dir), 'CORPORA_DIR/speech_commands_test_set_v0.01'))): download_from_url('http://download.tensorflow.org/data/speech_commands_test_set_v0.01.tar.gz') logger.info(f'Speech commands dataset downloaded. Located at {os.path.abspath(tgt_dir)}/CORPORA_DIR/')
class VoxCeleb1SID(Corpus): def __init__(self, dataset_root: str, n_jobs: int=4, cache_root: str=CACHE_ROOT) -> None: self.dataset_root = Path(dataset_root).resolve() uid2split = self._get_standard_usage(self.dataset_root, cache_root) self._split2uids = defaultdict(list) for (uid, split) in uid2split.items(): self._split2uids[split].append(Path(uid.replace('/', '-')).stem) uid2wavpath = self._find_wavs_with_uids(self.dataset_root, sorted(uid2split.keys()), n_jobs=n_jobs) self._data = {Path(uid.replace('/', '-')).stem: {'wav_path': uid2wavpath[uid], 'label': self._build_label(uid)} for uid in uid2split.keys()} @property def all_data(self): return self._data @property def data_split_ids(self): return (self._split2uids['train'], self._split2uids['valid'], self._split2uids['test']) @staticmethod def _get_standard_usage(dataset_root: Path, cache_root: Path): split_filename = SPLIT_FILE_URL.split('/')[(- 1)] split_filepath = (Path(cache_root) / split_filename) if (not split_filepath.is_file()): with FileLock((str(split_filepath) + '.lock')): os.system(f'wget {SPLIT_FILE_URL} -O {str(split_filepath)}') standard_usage = [line.strip().split(' ') for line in open(split_filepath, 'r').readlines()] def code2split(code: int): splits = ['train', 'valid', 'test'] return splits[(code - 1)] standard_usage = {uid: code2split(int(split)) for (split, uid) in standard_usage} return standard_usage @staticmethod def _find_wavs_with_uids(dataset_root, uids, n_jobs=4): def find_wav_with_uid(uid): found_wavs = list(dataset_root.glob(f'*/wav/{uid}')) assert (len(found_wavs) == 1) return (uid, found_wavs[0]) uids_with_wavs = Parallel(n_jobs=n_jobs)((delayed(find_wav_with_uid)(uid) for uid in tqdm(uids, desc='Search wavs'))) uids2wav = {uid: wav for (uid, wav) in uids_with_wavs} return uids2wav @staticmethod def _build_label(uid): id_string = uid.split('/')[0] label = f'speaker_{(int(id_string[2:]) - 10001)}' return label @classmethod def download_dataset(cls, target_dir: str, splits: List[str]=['dev', 'test']) -> None: tgt_dir = os.path.abspath(target_dir) assert os.path.exists(tgt_dir), 'Target directory does not exist' from zipfile import ZipFile import requests def unzip_then_delete(filepath: str, split: str): assert os.path.exists(filepath), 'File not found!' with ZipFile(filepath) as zipf: zipf.extractall(path=os.path.join(tgt_dir, 'Voxceleb1', split)) os.remove(os.path.abspath(filepath)) def download_from_url(url: str, split: str): filename = url.split('/')[(- 1)].replace(' ', '_') filepath = os.path.join(tgt_dir, filename) r = requests.get(url, stream=True) if r.ok: logger.info(f'Saving {filename} to', filepath) with open(filepath, 'wb') as f: for chunk in r.iter_content(chunk_size=((1024 * 1024) * 10)): if chunk: f.write(chunk) f.flush() os.fsync(f.fileno()) logger.info(f'{filename} successfully downloaded') else: logger.info(f'''Download failed: status code {r.status_code} {r.text}''') return filepath def download_dev(): partpaths = [] for part in ['a', 'b', 'c', 'd']: if os.path.exists(os.path.join(tgt_dir, f'vox1_dev_wav_parta{part}')): logger.info(f'vox1_dev_wav_parta{part} exists, skip donwload') partpaths.append(os.path.join(tgt_dir, f'vox1_dev_wav_parta{part}')) continue fp = download_from_url(f'https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_parta{part}', 'dev') partpaths.append(fp) zippath = os.path.join(tgt_dir, 'vox1_dev_wav.zip') with open(zippath, 'wb') as outfile: for f in partpaths: with open(f, 'rb') as infile: for line in infile: outfile.write(line) for f in partpaths: os.remove(f) unzip_then_delete(zippath, 'dev') for split in splits: if (not os.path.exists(os.path.join(tgt_dir, (('Voxceleb1/' + split) + '/wav')))): if (split == 'dev'): download_dev() else: filepath = download_from_url('https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip', 'test') unzip_then_delete(filepath, 'test') logger.info(f'Voxceleb1 dataset downloaded. Located at {tgt_dir}/Voxceleb1/')
class VoxCeleb1SV(Corpus): def __init__(self, dataset_root: str, download_dir: str, force_download: bool=True) -> None: self.dataset_root = Path(dataset_root).resolve() (train_path, valid_path, test_path, speakerid2label) = self.format_path(self.dataset_root, download_dir, force_download) self.categories = speakerid2label self.train_data = self.path2data(train_path, speakerid2label) self.valid_data = self.path2data(valid_path, speakerid2label) self.test_data = {self.path2uid(path): {'wav_path': path, 'label': None} for path in test_path} self.test_trials = self.format_test_trials(download_dir, force_download) @classmethod def path2uid(cls, path): return '-'.join(Path(path).parts[(- 3):]) @classmethod def path2data(cls, paths, speakerid2label): data = {cls.path2uid(path): {'wav_path': path, 'label': speakerid2label[Path(path).parts[(- 3)]]} for path in paths} return data @staticmethod def format_path(dataset_root, download_dir, force_download: bool): split_filename = SPLIT_FILE_URL.split('/')[(- 1)] split_filepath = (Path(download_dir) / split_filename) _download(split_filepath, SPLIT_FILE_URL, refresh=force_download) usage_list = open(split_filepath, 'r').readlines() (train, valid, test) = ([], [], []) test_list = [item for item in usage_list if (int(item.split(' ')[1].split('/')[0][2:]) in range(10270, 10310))] usage_list = list(set(usage_list).difference(set(test_list))) test_list = [item.split(' ')[1] for item in test_list] logging.info('search specified wav name for each split') speakerids = [] for string in tqdm(usage_list, desc='Search train, dev wavs'): pair = string.split() index = pair[0] x = list(dataset_root.glob(('dev/wav/' + pair[1]))) speakerStr = pair[1].split('/')[0] if (speakerStr not in speakerids): speakerids.append(speakerStr) if ((int(index) == 1) or (int(index) == 3)): train.append(str(x[0])) elif (int(index) == 2): valid.append(str(x[0])) else: raise ValueError speakerids = sorted(speakerids) speakerid2label = {} for (idx, spk) in enumerate(speakerids): speakerid2label[spk] = idx for string in tqdm(test_list, desc='Search test wavs'): x = list(dataset_root.glob(('test/wav/' + string.strip()))) test.append(str(x[0])) logging.info(f'finish searching wav: train {len(train)}; valid {len(valid)}; test {len(test)} files found') return (train, valid, test, speakerid2label) @classmethod def format_test_trials(cls, download_dir: str, force_download: bool): trial_filename = TRIAL_FILE_URL.split('/')[(- 1)] trial_filepath = (Path(download_dir) / trial_filename) _download(trial_filepath, TRIAL_FILE_URL, refresh=force_download) trial_list = open(trial_filepath, 'r').readlines() test_trials = [] for string in tqdm(trial_list, desc='Prepare testing trials'): pair = string.split() test_trials.append((int(pair[0]), cls.path2uid(pair[1]), cls.path2uid(pair[2]))) return test_trials @property def all_data(self): return (self.train_data, self.valid_data, self.test_data, self.test_trials) @property def data_split_ids(self): return None
class Dataset(data.Dataset): def __len__(self) -> int: raise NotImplementedError def __getitem__(self, index: int): raise NotImplementedError def getinfo(self, index: int): raise NotImplementedError
class EncodeCategory(Dataset): def __init__(self, labels: List[str], encoder: CategoryEncoder) -> None: super().__init__() self.labels = labels self.encoder = encoder def __len__(self): return len(self.labels) def __getitem__(self, index: int): label = self.labels[index] return {'label': label, 'class_id': self.encoder.encode(label)}
class EncodeCategories(Dataset): def __init__(self, labels: List[List[str]], encoders: CategoryEncoders) -> None: super().__init__() self.labels = labels self.encoders = encoders def __len__(self): return len(self.labels) def __getitem__(self, index: int): labels = self.labels[index] return {'labels': labels, 'class_ids': torch.LongTensor(self.encoders.encode(labels))}
class EncodeMultiLabel(Dataset): def __init__(self, labels: List[List[str]], encoder: CategoryEncoder) -> None: super().__init__() self.labels = labels self.encoder = encoder def __len__(self): return len(self.labels) @staticmethod def label_to_binary_vector(label_ids: List[int], num_labels: int) -> torch.Tensor: if (len(label_ids) == 0): binary_labels = torch.zeros((num_labels,), dtype=torch.float) else: binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label_ids), 1.0) assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label_ids)) return binary_labels def __getitem__(self, index: int): labels = self.labels[index] label_ids = [self.encoder.encode(label) for label in labels] binary_labels = self.label_to_binary_vector(label_ids, len(self.encoder)) return {'labels': labels, 'binary_labels': binary_labels}
class EncodeText(Dataset): def __init__(self, text: List[str], tokenizer: Tokenizer, iob: List[str]=None) -> None: super().__init__() self.text = text self.iob = iob if (iob is not None): assert (len(text) == len(iob)) self.tokenizer = tokenizer def __len__(self): return len(self.text) def __getitem__(self, index: int): text = self.text[index] if (self.iob is not None): iob = self.iob[index] tokenized_ids = self.tokenizer.encode(text, iob) text = self.tokenizer.decode(tokenized_ids) else: tokenized_ids = self.tokenizer.encode(text) return {'labels': text, 'class_ids': torch.LongTensor(tokenized_ids)}
def get_info(dataset, names: List[str], cache_dir: str=None, n_jobs: int=6): logger.info(f"Getting info from dataset {dataset.__class__.__qualname__}: {' '.join(names)}") if isinstance(cache_dir, (str, Path)): logger.info(f'Using cached info in {cache_dir}') cache_dir: Path = Path(cache_dir) cache_dir.mkdir(parents=True, exist_ok=True) try: data = dataset.getinfo(0) for name in names: assert (name in data) except: fn = dataset.__getitem__ else: fn = dataset.getinfo def _get(idx): if isinstance(cache_dir, (str, Path)): cache_path: Path = (Path(cache_dir) / f'{idx}.json') if cache_path.is_file(): with cache_path.open() as f: cached = json.load(f) all_presented = True for name in names: if (name not in cached): all_presented = False if all_presented: return cached data = fn(idx) info = {} for name in names: info[name] = data[name] if isinstance(cache_dir, (str, Path)): cache_path: Path = (Path(cache_dir) / f'{idx}.json') with cache_path.open('w') as f: json.dump(info, f) return info infos = Parallel(n_jobs=n_jobs, backend='threading')((delayed(_get)(idx) for idx in tqdm(range(len(dataset))))) organized_info = defaultdict(list) for info in infos: for (k, v) in info.items(): organized_info[k].append(v) output = [] for name in names: output.append(organized_info[name]) if (len(output) == 1): return output[0] else: return output
class CategoryEncoder(): def __init__(self, category: List[str]) -> None: self.category = list(sorted(set(category))) def __len__(self) -> int: return len(self.category) def encode(self, label: str) -> int: return self.category.index(label) def decode(self, index: int) -> str: return self.category[index]
class CategoryEncoders(): def __init__(self, categories: List[List[str]]) -> None: self.categories = [CategoryEncoder(c) for c in categories] def __len__(self) -> int: return sum([len(c) for c in self.categories]) def __iter__(self): for category in self.categories: (yield category) def encode(self, labels: List[str]) -> List[int]: assert (len(labels) == len(self.categories)) return [encoder.encode(label) for (label, encoder) in zip(labels, self.categories)] def decode(self, indices: List[int]) -> List[str]: return [encoder.decode(index) for (index, encoder) in zip(indices, self.categories)]
def parse_lexicon(line: str) -> Tuple[(str, List[str])]: line.replace('\t', ' ') (word, *phonemes) = line.split() return (word, phonemes)
def read_lexicon_files(file_list: List[str]) -> Dict[(str, List[str])]: w2p_dict = defaultdict(list) for file in file_list: with open(file, 'r') as fp: lines = [line.strip() for line in fp] for line in lines: (word, phonemes) = parse_lexicon(line) w2p_dict[word].append(phonemes) w2p = {} for (word, phonemes_all) in w2p_dict.items(): if (len(phonemes_all) > 1): logging.info(f'{len(phonemes_all)} phoneme sequences found for {word}.') for (i, phonemes) in enumerate(phonemes_all): logging.info(f'{i}. {phonemes}') w2p[word] = phonemes_all[0] logging.info('Taking the first phoneme sequences for a deterministic behavior.') return w2p
class G2P(): 'Grapheme-to-phoneme\n\n Args:\n file_list (List[str], optional): List of lexicon files. Defaults to None.\n allow_unk (bool): If false, raise Error when a word can not be recognized by this basic G2P\n ' def __init__(self, file_list: List[str]=None, allow_unk: bool=False): self.allow_unk = allow_unk if (file_list is None): file_list = _urls_to_filepaths(*DEFAULT_LEXICON_URL) self.word2phone = read_lexicon_files(file_list) def encode(self, text: str) -> str: 'Converts grapheme-based sentences to phonemes\n\n Args:\n text (str): Sentence\n\n Returns:\n str: Phonemized sentence\n ' word_list = text.strip().upper().split(' ') phonemes = [] for word in word_list: if (not self.allow_unk): assert (word in self.word2phone) phonemes += self.word2phone.get(word, ['<UNK>']) return ' '.join(phonemes)
class Tokenizer(): def __init__(self): super().__init__() @abc.abstractmethod def encode(self, text: str, iob: str=None) -> List[int]: raise NotImplementedError @abc.abstractmethod def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: raise NotImplementedError def __len__(self): return self.vocab_size @abc.abstractproperty def vocab_size(self) -> int: raise NotImplementedError @abc.abstractproperty def token_type(self) -> str: raise NotImplementedError @abc.abstractclassmethod def load_from_file(cls, vocab_file: str): raise NotImplementedError @property def pad_idx(self) -> int: return 0 @property def eos_idx(self) -> int: return 1 @property def unk_idx(self) -> int: return 2 def __repr__(self) -> str: return '<{} vocab_size={}>'.format(type(self).__name__, self.vocab_size)
class CharacterTokenizer(Tokenizer): 'Character tokenizer.' def __init__(self, vocab_list: List[str]=None): super().__init__() if (vocab_list is None): vocab_list = CHARACTER_VOCAB for tok in ['<pad>', '<eos>', '<unk>']: assert (tok not in vocab_list) self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list) self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)} def encode(self, s: str) -> List[int]: s = s.strip('\r\n ') return ([self.vocab_to_idx(v) for v in s] + [self.eos_idx]) def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: vocabs = [] for (t, idx) in enumerate(idxs): v = self.idx_to_vocab(idx) if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue elif (idx == self.eos_idx): break else: vocabs.append(v) return ''.join(vocabs) @classmethod def load_from_file(cls, vocab_file: str=None, vocab_list: List[str]=None): if (vocab_file is not None): with open(vocab_file, 'r') as f: vocab_list = [line.strip('\r\n') for line in f] elif (vocab_list is not None): pass else: raise ValueError('No vocabulary information give, please specify either vocab_file or vocab_list.') return cls(vocab_list) @property def vocab_size(self) -> int: return len(self._vocab_list) @property def token_type(self) -> str: return 'character' def vocab_to_idx(self, vocab): return self._vocab2idx.get(vocab, self.unk_idx) def idx_to_vocab(self, idx): return self._vocab_list[idx]
class CharacterSlotTokenizer(Tokenizer): 'Character tokenizer with slots.' def __init__(self, vocab_list: List[str], slots: List[str]): super().__init__() for tok in ['<pad>', '<eos>', '<unk>']: assert (tok not in vocab_list) self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list) self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)} self.space_idx = self.vocab_to_idx(' ') self.slots = slots self.slot2id = {self.slots[i]: (i + len(self._vocab_list)) for i in range(len(self.slots))} self.id2slot = {(i + len(self._vocab_list)): self.slots[i] for i in range(len(self.slots))} def encode(self, sent: str, iobs: str) -> List[int]: sent = sent.strip('\r\n ') iobs = iobs.strip('\r\n ') sent = re.sub(' +', ' ', sent).strip(' ') sent = sent.split(' ') iobs = iobs.split(' ') assert (len(sent) == len(iobs)), f'transcription and iobs should have same number of words (split by space)' if (sent[0] == 'BOS'): sent = sent[1:] iobs = iobs[1:] if (sent[(- 1)] == 'EOS'): sent = sent[:(- 1)] iobs = iobs[:(- 1)] tokens = [] for (i, (wrd, iob)) in enumerate(zip(sent, iobs)): if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))): tokens.append(self.slot2id[('B-' + iob)]) tokens += [self.vocab_to_idx(v) for v in wrd] if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))): tokens.append(self.slot2id[('E-' + iob)]) if (i == (len(sent) - 1)): tokens.append(self.eos_idx) elif ((len(tokens) > 0) and (tokens[(- 1)] != self.space_idx)): tokens.append(self.space_idx) assert (tokens[(- 1)] == self.eos_idx) return tokens def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: vocabs = [] for (t, idx) in enumerate(idxs): v = self.idx_to_vocab(idx) if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue elif (idx == self.eos_idx): break else: vocabs.append(v) return ''.join(vocabs) @classmethod def load_from_file(cls, vocab_file: str, slots_file: str): with open(vocab_file, 'r') as f: vocab_list = [line.strip('\r\n') for line in f] org_slots = open(slots_file).read().split('\n') slots = [] for slot in [slot for slot in org_slots if (slot != 'O')]: slots.append(('B-' + slot)) slots.append(('E-' + slot)) return cls(vocab_list, slots) @property def vocab_size(self) -> int: return (len(self._vocab_list) + len(self.slots)) @property def token_type(self) -> str: return 'character-slot' def vocab_to_idx(self, vocab): return self._vocab2idx.get(vocab, self.unk_idx) def idx_to_vocab(self, idx): idx = int(idx) if (idx < len(self._vocab_list)): return self._vocab_list[idx] else: token = self.id2slot[idx] if (token[0] == 'B'): return (token + ' ') elif (token[0] == 'E'): return (' ' + token) else: raise ValueError('id2slot get:', token)
class SubwordTokenizer(Tokenizer): 'Subword tokenizer using sentencepiece.' def __init__(self, spm): super().__init__() if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)): raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>') self.spm = spm def encode(self, s: str) -> List[int]: tokens = self.spm.encode_as_ids(s) assert (tokens[(- 1)] == self.eos_idx) return tokens def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append(idx) return self.spm.decode_ids(crop_idx) @classmethod def load_from_file(cls, filepath: str): import sentencepiece as splib spm = splib.SentencePieceProcessor() spm.load(filepath) spm.set_encode_extra_options('eos') return cls(spm) def __setstate__(self, state): self.__dict__.update(state) self.spm.set_encode_extra_options('eos') @property def vocab_size(self) -> int: return len(self.spm) @property def token_type(self) -> str: return 'subword'
class SubwordSlotTokenizer(Tokenizer): 'Subword tokenizer with slots.' def __init__(self, spm, slots): super().__init__() if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)): raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>') self.spm = spm self.slots = slots self.slot2id = {self.slots[i]: (i + len(self.spm)) for i in range(len(self.slots))} self.id2slot = {(i + len(self.spm)): self.slots[i] for i in range(len(self.slots))} def encode(self, sent: str, iobs: str) -> List[int]: sent = sent.strip('\r\n ') iobs = iobs.strip('\r\n ') sent = re.sub(' +', ' ', sent).strip(' ') sent = sent.split(' ') iobs = iobs.split(' ') assert (len(sent) == len(iobs)), f'transcription and iobs should have same number of words (split by space)' if (sent[0] == 'BOS'): sent = sent[1:] iobs = iobs[1:] if (sent[(- 1)] == 'EOS'): sent = sent[:(- 1)] iobs = iobs[:(- 1)] tokens = [] for (i, (wrd, iob)) in enumerate(zip(sent, iobs)): if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))): tokens.append(self.slot2id[('B-' + iob)]) encoded = self.spm.encode_as_ids(wrd) assert (encoded[(- 1)] == self.eos_idx) tokens += encoded[:(- 1)] if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))): tokens.append(self.slot2id[('E-' + iob)]) assert (tokens[(- 1)] != self.eos_idx) tokens.append(self.eos_idx) return tokens def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append(idx) (sent, ret) = ([], []) for (i, x) in enumerate(crop_idx): if (x >= len(self.spm)): slot = self.id2slot[x] ret.append(slot) if (len(sent) > 0): decoded = self.spm.decode_ids(sent) ret.insert((- 1), decoded) sent = [] else: sent.append(x) return ' '.join(ret) @classmethod def load_from_file(cls, filepath: str, slots_file: str): import sentencepiece as splib spm = splib.SentencePieceProcessor() spm.load(filepath) spm.set_encode_extra_options(':eos') org_slots = open(slots_file).read().split('\n') slots = [] for slot in [slot for slot in org_slots if (slot != 'O')]: slots.append(('B-' + slot)) slots.append(('E-' + slot)) return cls(spm, slots) def __setstate__(self, state): self.__dict__.update(state) self.spm.set_encode_extra_options('eos') @property def vocab_size(self) -> int: return (len(self.spm) + len(self.slots)) @property def token_type(self) -> str: return 'subword-slot'
class WordTokenizer(CharacterTokenizer): 'Word tokenizer.' def encode(self, s: str) -> List[int]: s = s.strip('\r\n ') words = s.split(' ') return ([self.vocab_to_idx(v) for v in words] + [self.eos_idx]) def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: vocabs = [] for (t, idx) in enumerate(idxs): v = self.idx_to_vocab(idx) if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: vocabs.append(v) return ' '.join(vocabs) @property def token_type(self) -> str: return 'word'
class PhonemeTokenizer(WordTokenizer): 'Phoneme tokenizer.' @property def token_type(self) -> str: return 'phoneme'
class BertTokenizer(Tokenizer): 'Bert Tokenizer.\n\n https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/tokenization_bert.py\n ' def __init__(self, tokenizer): super().__init__() self._tokenizer = tokenizer self._tokenizer.pad_token = '<pad>' self._tokenizer.eos_token = '<eos>' self._tokenizer.unk_token = '<unk>' def encode(self, s: str) -> List[int]: reduced_idx = [] for idx in self._tokenizer.encode(s): try: r_idx = (idx - BERT_FIRST_IDX) assert (r_idx > 0) reduced_idx.append(r_idx) except AssertionError: reduced_idx.append(self.unk_idx) reduced_idx.append(self.eos_idx) return reduced_idx def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append((idx + BERT_FIRST_IDX)) return self._tokenizer.decode(crop_idx) @classmethod def load_from_file(cls, vocab_file: str): from pytorch_transformers import BertTokenizer as bert_tokenizer return cls(bert_tokenizer.from_pretrained(vocab_file)) @property def vocab_size(self) -> int: return ((BERT_LAST_IDX - BERT_FIRST_IDX) + 1) @property def token_type(self) -> str: return 'bert'
def load_tokenizer(mode: str, vocab_file: str=None, vocab_list: List[str]=None, slots_file: str=None) -> Tokenizer: 'Load a text tokenizer.\n\n Args:\n mode (str): Mode ("character", "character-slot", "subword", "subword-slot", "word", "bert-...")\n vocab_file (str, optional): Path to vocabularies. Defaults to None.\n vocab_list (List[str], optional): List of vocabularies. Defaults to None.\n slots_file (str, optional): Path to slots. Defaults to None.\n\n Raises:\n NotImplementedError: If mode is not implemented.\n\n Returns:\n Tokenizer: Text tokenizer.\n ' assert ((int((vocab_file is not None)) + int((vocab_list is not None))) <= 1), "For 'vocab_file' and 'vocab_list', at most one argument can be presented" with tempfile.NamedTemporaryFile('w') as f: if (vocab_list is not None): f.writelines([f'''{vocab} ''' for vocab in vocab_list]) f.flush() vocab_file = f.name if ((slots_file is not None) and (not mode.endswith('slot'))): mode = f'{mode}-slot' if (mode == 'character'): return CharacterTokenizer.load_from_file(vocab_file) elif (mode == 'character-slot'): return CharacterSlotTokenizer.load_from_file(vocab_file, slots_file) elif (mode == 'subword'): return SubwordTokenizer.load_from_file(vocab_file) elif (mode == 'subword-slot'): return SubwordSlotTokenizer.load_from_file(vocab_file, slots_file) elif (mode == 'word'): return WordTokenizer.load_from_file(vocab_file) elif (mode == 'phoneme'): return PhonemeTokenizer.load_from_file(vocab_file) elif mode.startswith('bert-'): return BertTokenizer.load_from_file(mode) else: raise NotImplementedError('`{}` is not yet supported.'.format(mode))
def default_phoneme_tokenizer() -> PhonemeTokenizer: 'Returns a default LibriSpeech phoneme tokenizer.\n\n Returns:\n PhonemeTokenizer: Vocabs include 71 phonemes\n ' return PhonemeTokenizer.load_from_file(vocab_list=PHONEME_VOCAB)
def generate_basic_vocab(mode: str, text_list: List[str], vocab_size: int=(- 1), coverage: float=1.0, sort_vocab: bool=True) -> List[str]: 'Generates basic vocabularies, including character and word-based vocabularies.\n\n Args:\n mode (str): Vocabulary type (character or word).\n text_list (List[str]): List of text data.\n vocab_size (int, optional):\n Vocabulary size, if not specified, vocab_size would be `coverage * actual vocab size`. Defaults to -1.\n coverage (float, optional): Vocabulary coverage. Defaults to 1.0.\n sort_vocab (bool, optional): Sort vocabularies alphabetically. Defaults to True.\n\n Returns:\n List[str]: A list of vocabularies.\n ' assert (mode in {'character', 'word'}), mode assert ((vocab_size == (- 1)) or (vocab_size > 0)), vocab_size assert ((coverage > 0.0) and (coverage <= 1.0)), coverage logger.info(f'Generating vocab (type = {mode}, coverage = {coverage}) from {len(text_list)} sentences.') counter = Counter() for text in text_list: if (mode == 'character'): counter.update(text) if (mode == 'word'): counter.update(text.split()) if (vocab_size < 0): vocab_size = int((len(counter) * coverage)) else: vocab_size = min(vocab_size, len(counter)) if (vocab_size < len(counter)): vocab_list = sorted(counter.keys(), key=(lambda k: counter[k]), reverse=True) vocab_list = vocab_list[:vocab_size] else: vocab_list = list(counter.keys()) if sort_vocab: vocab_list = sorted(vocab_list) logger.info(f'Generated {vocab_size} {mode} vocabularies.') return vocab_list
def generate_subword_vocab(text_list: List[str]=None, text_file: str=None, output_file: str=None, vocab_size: int=1000, character_coverage: float=1.0) -> str: 'Generates subword vocabularies based on `sentencepiece`.\n\n Args:\n text_list (List[str], optional): List of text data. Defaults to None.\n text_file (str, optional): Path to text data. Defaults to None.\n output_file (str, optional): Path to save trained subword vocabularies. Defaults to "".\n vocab_size (int, optional): Vocabulary size. Defaults to 8000.\n character_coverage (float, optional): Coverage of characters in text data. Defaults to 1.0.\n\n Raises:\n ImportError: If `sentencepiece` is not installed.\n\n Returns:\n str: Path to `${output_file}.model`.\n ' try: import sentencepiece as splib except ImportError: raise ImportError('`sentencepiece` cannot be imported, please run `pip install sentencepiece` first') assert (output_file is not None) output_file = str(output_file) assert (vocab_size > 0), vocab_size cmd = '--input={} --model_prefix={} --model_type=unigram --vocab_size={} --character_coverage={} --pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --eos_piece=<eos> --remove_extra_whitespaces=true ' if (text_list is not None): assert isinstance(text_list, list) assert isinstance(text_list[0], str) logger.info(f'Generating vocab (type = subword, coverage = {character_coverage}) from {len(text_list)} sentences.') with tempfile.TemporaryDirectory() as directory: input_file = os.path.join(directory, 'text.txt') with open(input_file, 'w') as fp: for text in text_list: fp.write((text + '\n')) cmd = cmd.format(input_file, output_file, vocab_size, character_coverage) splib.SentencePieceTrainer.Train(cmd) if (text_file is not None): logger.info(f'Generating vocab (type = subword, coverage = {character_coverage}) from {text_file}') cmd = cmd.format(text_file, output_file, vocab_size, character_coverage) splib.SentencePieceTrainer.Train(cmd) return (output_file + '.model')
def generate_vocab(mode: str, text_list: List[str]=None, text_file: str=None, read_lines: int=10000000, **vocab_args) -> Union[(List[str], str)]: 'Generates vocabularies given text data.\n\n Args:\n mode (str): Vocabulary type\n text_list (List[str], optional): List of text data. Defaults to None.\n text_file (str, optional): Path to text data. Defaults to None.\n read_lines (int, optional): Maximum lines to read from `text_file`. Defaults to 10000000.\n vocab_args:\n if :code:`mode != subword`, arguments for :obj:`generate_basic_vocab`\n if :code:`mode == subword`, arguments for :obj:`generate_subword_vocab`\n\n Returns:\n Union[List[str], str]: A list of vocabularies or a path to `.vocab` file.\n ' if ((text_list is None) and (mode in {'character', 'word', 'phoneme'})): assert isinstance(text_file, str) with open(text_file, 'r', encoding='UTF-8') as fp: text_list = [line.strip('\r\n ') for (i, line) in enumerate(fp) if (i < read_lines)] if (mode == 'character'): return generate_basic_vocab('character', text_list, **vocab_args) if (mode in {'word', 'phoneme'}): return generate_basic_vocab('word', text_list, **vocab_args) if (mode == 'subword'): return generate_subword_vocab(text_list=text_list, text_file=text_file, **vocab_args) else: raise ValueError(f'Unsupported mode (vocabulary type): {mode}')
class BalancedWeightedSampler(): '\n This batch sampler is always randomized, hence cannot be used for testing\n ' def __init__(self, labels: List[str], batch_size: int, duplicate: int=1, seed: int=12345678) -> None: self.epoch = 0 self.seed = seed self.batch_size = batch_size self.duplicate = duplicate class2weight = Counter() for label in labels: class2weight.update([label]) weights = [] for label in labels: weights.append((len(labels) / class2weight[label])) self.weights = weights def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self) -> Iterator[T_co]: generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) sampler = WeightedRandomSampler(self.weights, (len(self.weights) * self.duplicate), generator=generator) indices = list(sampler) batch = [] for indice in indices: batch.append(indice) if (len(batch) == self.batch_size): (yield batch) batch = [] if (len(batch) > 0): (yield batch) def __len__(self): return len(list(iter(self)))
class DistributedBatchSamplerWrapper(): def __init__(self, batch_sampler: BatchSampler, num_replicas: Optional[int]=None, rank: Optional[int]=None, allow_duplicates: bool=False, allow_uneven: bool=False) -> None: if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() if ((rank >= num_replicas) or (rank < 0)): raise ValueError('Invalid rank {}, rank should be in the interval [0, {}]'.format(rank, (num_replicas - 1))) self.batch_sampler = batch_sampler self.num_replicas = num_replicas self.rank = rank self.allow_duplicates = allow_duplicates self.allow_uneven = allow_uneven def __iter__(self) -> Iterator[T_co]: logger.info(f'Building distributed batch sampler for rank={self.rank}, world_size={self.num_replicas}') all_rank_batch_indices = list(iter(self.batch_sampler)) if ((len(all_rank_batch_indices) % self.num_replicas) == 0): target_batch_indices = all_rank_batch_indices else: num_to_halve = (self.num_replicas - (len(all_rank_batch_indices) % self.num_replicas)) flatten_batch_indices = deepcopy(all_rank_batch_indices) while (num_to_halve > 0): newly_flatten = [] all_cant_be_halved = True for indices in flatten_batch_indices: if ((num_to_halve > 0) and (len(indices) > 1)): (indices1, indices2) = (indices[:(len(indices) // 2)], indices[(len(indices) // 2):]) newly_flatten += [indices1, indices2] num_to_halve -= 1 all_cant_be_halved = False else: newly_flatten.append(indices) flatten_batch_indices = deepcopy(newly_flatten) if all_cant_be_halved: if self.allow_duplicates: logger.warning('To ensure all the dataloaders in different processes get the same number of batches. Some batches are duplicated. This must not happen during the evaluation stage.') flatten_batch_indices = (flatten_batch_indices + all_rank_batch_indices[:num_to_halve]) elif self.allow_uneven: logger.warning('Total batches will not be evenly distributed across the dataloaders in different processes. This must not happen during the training stage and can lead to hanging, while might be okay during the evaluation stage.') else: raise ValueError('The provided batch sampler cannot be safely wrapped for distributed training. Please try increase the number of indices in each batch. Or, allowing duplicated batches or uneven number of batches across dataloaders.') target_batch_indices = flatten_batch_indices if (not self.allow_uneven): assert ((len(target_batch_indices) % self.num_replicas) == 0) batch_indices = target_batch_indices[self.rank::self.num_replicas] return iter(batch_indices) def __len__(self) -> int: return len(list(iter(self))) def set_epoch(self, epoch: int) -> None: if hasattr(self.batch_sampler, 'set_epoch'): self.batch_sampler.set_epoch(epoch)
class FixedBatchSizeBatchSampler(): '\n The reduced timestamps for a batch should not exceed the max_timestamp.\n If shuffled, each indices are first shuffled before aggregated into batches\n\n Args:\n data_source: __len__ is implemented\n ' def __init__(self, data_source, batch_size: int, shuffle: bool=False, seed: int=12345678) -> None: self.batch_size = batch_size self.seed = seed self.shuffle = shuffle if shuffle: self.generator = torch.Generator() self.sampler = RandomSampler(data_source, generator=self.generator) else: self.sampler = SequentialSampler(data_source) def set_epoch(self, epoch: int) -> None: if self.shuffle: self.generator.manual_seed((self.seed + epoch)) def _evaluate_reduced_timestamps(self, batch_indices): return self.reduce_func([self.timestamps[indice] for indice in batch_indices]) def __iter__(self): batch_sampler = BatchSampler(self.sampler, batch_size=self.batch_size, drop_last=False) return iter(batch_sampler) def __len__(self): return len(list(iter(self)))
class GroupSameItemSampler(): def __init__(self, items: List[str]) -> None: self.indices = defaultdict(list) for (idx, item) in enumerate(items): self.indices[item].append(idx) self.epoch = 0 def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): for batch_indices in self.indices.values(): (yield batch_indices) def __len__(self): return len(list(iter(self)))
class MaxTimestampBatchSampler(): '\n The reduced timestamps for a batch should not exceed the max_timestamp.\n If shuffled, each indices are first shuffled before aggregated into batches\n ' def __init__(self, lengths: List[int], max_length: int, shuffle: bool=False, seed: int=12345678, reduce_func: callable=None) -> None: self.lengths = lengths self.max_length = max_length self.shuffle = shuffle self.seed = seed self.epoch = 0 self.reduce_func = (reduce_func or self._default_reduce_func) @staticmethod def _default_reduce_func(timestamps): return (max(timestamps) * len(timestamps)) def set_epoch(self, epoch: int): self.epoch = epoch def _evaluate_reduced_timestamps(self, batch_indices): return self.reduce_func([self.lengths[indice] for indice in batch_indices]) def __iter__(self): if self.shuffle: generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) indices = torch.randperm(len(self.lengths), generator=generator).tolist() else: indices = list(range(len(self.lengths))) batch = [] for indice in indices: try_new_batch = (batch + [indice]) if (self._evaluate_reduced_timestamps(try_new_batch) <= self.max_length): batch = try_new_batch elif (len(batch) == 0): raise ValueError(f'There is a single length {self.lengths[indice]} larger than max_length {self.max_length}. Please increase the max_length.') else: (yield batch) batch = [indice] if (len(batch) > 0): (yield batch) def __len__(self): return len(list(iter(self)))
class SortedSliceSampler(): '\n This sampler should only be used for training hence is always in random shuffle mode\n\n Args:\n lengths (List[int])\n batch_size (int): the default batch size\n max_length (int): if a batch contains at least on utt longer than max_length, half the batch\n get_length_func (callable): get the length of each item in the dataset, if None, a default function will be used\n in_batch_shuffle (bool): if False, batches are sorted by length from long to short\n ' def __init__(self, lengths: List[int], batch_size: int, max_length: int=300000, seed: int=12345678, in_batch_shuffle: bool=False) -> None: self.lengths = lengths self.epoch = 0 self.seed = seed self.batch_size = batch_size self.max_length = max_length self.in_batch_shuffle = in_batch_shuffle sorted_ids = [(idx, length) for (idx, length) in enumerate(lengths)] sorted_ids = sorted(sorted_ids, key=(lambda x: x[1]), reverse=True) self.sorted_ids = [data_id for (data_id, length) in sorted_ids] def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) indices = torch.randperm(len(self.lengths), generator=generator).tolist() for indice in indices: length = self.lengths[indice] if (length > self.max_length): batch_size = (self.batch_size // 2) else: batch_size = self.batch_size start_position = self.sorted_ids.index(indice) batch = self.sorted_ids[start_position:(start_position + batch_size)] if self.in_batch_shuffle: inbatch_indices = torch.randperm(len(batch), generator=generator).tolist() batch = [batch[idx] for idx in inbatch_indices] (yield batch) def __len__(self): return len(list(iter(self)))
class SortedBucketingSampler(): '\n Args:\n lengths (List[int])\n batch_size (int): the default batch size\n max_length (int): if a batch contains at least on utt longer than max_length, half the batch\n get_length_func (callable): get the length of each item in the dataset, if None, a default function will be used\n shuffle (bool): Whether to shuffle the batches\n in_batch_shuffle (bool): if False, batches are sorted by length from long to short\n ' def __init__(self, lengths: List[int], batch_size: int, max_length: int=300000, shuffle: bool=False, in_batch_shuffle: bool=False, seed: int=12345678) -> None: self.epoch = 0 self.seed = seed self.batch_size = batch_size self.max_length = max_length self.shuffle = shuffle self.in_batch_shuffle = in_batch_shuffle self.lengths = lengths sorted_ids = [(idx, length) for (idx, length) in enumerate(self.lengths)] sorted_ids = sorted(sorted_ids, key=(lambda x: x[1]), reverse=True) self.sorted_ids = [data_id for (data_id, length) in sorted_ids] def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) batches = [] position = 0 while (position < len(self.sorted_ids)): indice = self.sorted_ids[position] length = self.lengths[indice] if (length > self.max_length): batch_size = (self.batch_size // 2) else: batch_size = self.batch_size batch = self.sorted_ids[position:min((position + batch_size), len(self.sorted_ids))] position += batch_size if self.in_batch_shuffle: shuffled_batch_indices = torch.randperm(len(batch), generator=generator) batch = [batch[idx] for idx in shuffled_batch_indices] batches.append(batch) if self.shuffle: shuffled_indices = torch.randperm(len(batches), generator=generator) batches = [batches[idx] for idx in shuffled_indices] return iter(batches) def __len__(self): return len(list(iter(self)))
class AugmentedDynamicItemDataset(DynamicItemDataset): def __init__(self, data, dynamic_items=[], output_keys=[], tools: dict={}): super().__init__(data, dynamic_items, output_keys) assert isinstance(data, OrderedDict) self._tools = {} for (name, item) in tools.items(): self.add_tool(name, item) def _dynamic_tools(self, id, name): return self._tools[name] def add_tool(self, name: str, item: Any) -> None: '\n Store the :code:`item` in this dataset with the name :code:`name` so it can be used in\n :code:`__getitem__`. That is, you can retrieve the :code:`item` with the :code:`takes` argument\n of :obj:`add_dynamic_item`.\n\n .. code-block:: python\n\n def tokenize_func(tokenizer, text):\n return tokenizer(text)\n\n self.add_tool("tokenizer", tokenizer)\n self.add_dynamic_item(tokenize_func, takes=["tokenizer", "text"], provides="tokenized_ids")\n\n You can also later retreive this tool by :obj:`get_tool` or :obj:`all_tools`\n ' self._tools[name] = item self.add_dynamic_item(partial(self._dynamic_tools, name=name), takes='id', provides=name) def add_tools(self, tools: dict) -> None: '\n Store each key-value pair in :code:`tools` as a tool. See :obj:`add_tool` for more information\n ' for (key, value) in tools.items(): self.add_tool(key, value) def get_tool(self, key) -> Any: '\n See :obj:`add_tool` for more information\n ' return self._tools[key] def has_tool(self, key) -> bool: '\n Checks whether has a tool named :code:`key`.\n ' return (key in self._tools) def all_tools(self, copy=True) -> dict: '\n Return:\n dict\n\n Containing all the tools in :code:`name: value` pairs.\n See :obj:`add_tool` for more information\n ' return (deepcopy(self._tools) if copy else self._tools) def update_output_keys(self, keys: dict) -> None: '\n Compared to :obj:`set_output_keys`, this method update the output keys mapping\n instead of replace it with a new dictionary. This can be useful when you only\n want to replace a few mapping and leave others unchanged.\n ' mapping = self.pipeline.output_mapping.copy() mapping.update(keys) self.set_output_keys(mapping) def keys(self) -> List[str]: '\n List all the :code:`static_item` and :code:`dynamic_item` in the dataset.\n :code:`static_item` resides directly in the memory and are given by the dataset\n initialization dictionary. :code:`dynamic_item` are content computed\n on-the-fly basing on :code:`static_item`.\n ' available_keys: List[str] = list(self.pipeline.key_to_node.keys()) for dynamic_item in self.pipeline.dynamic_items: provides = dynamic_item.provides assert isinstance(provides, (list, tuple)) available_keys += provides available_keys = [key for key in available_keys if ((not key.startswith('_')) and (key not in self._tools))] return available_keys def set_info(self, info): self._info = info def get_info(self, index): with self.output_keys_as(self._info): return self.__getitem__(index) def __getitem__(self, index): '\n This remain all the usage of the original SpeechBrain DynamicItemDataset.__getitem__,\n except that by default it uses :obj:`keys` as the default :code:`output_keys`\n ' if (len(self.pipeline.output_mapping) == 0): with self.output_keys_as(self.keys()): return super().__getitem__(index) else: return super().__getitem__(index)
class DataPipe(): def __call__(self, dataset: Union[(dict, AugmentedDynamicItemDataset)], tools: dict=None) -> Any: if isinstance(dataset, dict): dataset = AugmentedDynamicItemDataset(dataset) if (tools is not None): dataset.add_tools(tools) return self.forward(dataset) def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: raise NotImplementedError def __getattribute__(self, name): value = super().__getattribute__(name) if isinstance(value, DynamicItem): value.func = value.func.__get__(self) return value
class SequentialDataPipe(DataPipe): def __init__(self, *pipes: List[DataPipe]) -> None: self._pipes = pipes def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: for pipe in self._pipes: dataset = pipe(dataset) return dataset
def default_collate_fn(samples, padding_value: int=0): '\n Each item in **DynamicItemDataset** is a dict\n This function pad (or transform into numpy list) a batch of dict\n\n Args:\n samples (List[dict]): Suppose each Container is in\n\n .. code-block:: yaml\n\n wav: a single waveform\n label: a single string\n\n Return:\n dict\n\n .. code-block:: yaml\n\n wav: padded waveforms\n label: np.array([a list of string labels])\n ' assert isinstance(samples[0], dict) keys = samples[0].keys() padded_samples = dict() for key in keys: values = [sample[key] for sample in samples] if isinstance(values[0], int): values = torch.LongTensor(values) elif isinstance(values[0], float): values = torch.FloatTensor(values) elif isinstance(values[0], np.ndarray): values = [torch.from_numpy(value).float() for value in values] values = pad_sequence(values, batch_first=True, padding_value=padding_value) elif isinstance(values[0], torch.Tensor): values = pad_sequence(values, batch_first=True, padding_value=padding_value) else: values = np.array(values, dtype='object') padded_samples[key] = values return padded_samples
def _count_frames(data_len, size, step): return int((((data_len - size) + step) / step))
def _gen_frame_indices(data_length, size=2000, step=2000, use_last_samples=True): i = (- 1) for i in range(_count_frames(data_length, size, step)): (yield ((i * step), ((i * step) + size))) if (use_last_samples and (((i * step) + size) < data_length)): if ((data_length - ((i + 1) * step)) > 0): (yield (((i + 1) * step), data_length))
@dataclass class UnfoldChunkByFrame(DataPipe): "\n Given a dataset with items containing `start_sec_name` and `end_sec_name`.\n For each item, produce `((end_sec_name - start_sec_name) * sample_rate / feat_frame_shift) / chunk_frames`\n items with the smaller durations between `min_chunk_frames` and `max_chunk_frames`\n\n Args:\n sample_rate (int): The sample_rate of the audio.\n feat_frame_shift (int): The target feature's frame_shift\n min_chunk_frames (int): The min frames for a chunk\n max_chunk_frames (int): The max frames for a chunk\n step_frames (int): The step frames for each sliding window\n use_last_samples (bool): whether to drop the last samples of an utterance which cannot form an window\n start_sec_name (str): The key name for the starting second of the audio\n end_sec_name (str): The key name for the ending second of the audio\n " sample_rate: int = 16000 feat_frame_shift: int = 160 min_chunk_frames: int = 2000 max_chunk_frames: int = 2000 step_frames: int = 2000 use_last_samples: bool = True start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: unfolded_items = OrderedDict() for item in dataset: key = item.pop('id') data_len = int((((item[self.end_sec_name] - item[self.start_sec_name]) * self.sample_rate) / self.feat_frame_shift)) for (unfold_index, (start, end)) in enumerate(_gen_frame_indices(data_len, self.min_chunk_frames, self.step_frames)): start_sec = (item[self.start_sec_name] + ((start * self.feat_frame_shift) / self.sample_rate)) end_sec = (item[self.start_sec_name] + ((end * self.feat_frame_shift) / self.sample_rate)) dur_sec = (end_sec - start_sec) utt_id = f'{key}_start-{start_sec}_end-{end_sec}_dur-{dur_sec}' subitem = deepcopy(item) subitem['unchunked_id'] = key subitem['chunk_index'] = unfold_index subitem[self.start_sec_name] = start_sec subitem[self.end_sec_name] = end_sec unfolded_items[utt_id] = subitem new_dataset = AugmentedDynamicItemDataset(unfolded_items) new_dataset.add_tools(dataset.all_tools(False)) return new_dataset
@dataclass class UnfoldChunkBySec(DataPipe): use_last_samples: bool = True min_chunk_secs: float = 2.5 max_chunk_secs: float = 2.5 step_secs: int = 2.5 start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: unfolded_items = OrderedDict() for item in dataset: key = item.pop('id') for (unfold_index, (start, end)) in enumerate(_gen_frame_indices((item[self.end_sec_name] - item[self.start_sec_name]), self.min_chunk_secs, self.step_secs)): start_sec = (item[self.start_sec_name] + start) end_sec = (item[self.start_sec_name] + end) dur_sec = (end_sec - start_sec) utt_id = f'{key}_start-{start_sec}_end-{end_sec}_dur-{dur_sec}' subitem = deepcopy(item) subitem['unchunked_id'] = key subitem['chunk_index'] = unfold_index subitem[self.start_sec_name] = start_sec subitem[self.end_sec_name] = end_sec unfolded_items[utt_id] = subitem new_dataset = AugmentedDynamicItemDataset(unfolded_items) new_dataset.add_tools(dataset.all_tools(False)) return new_dataset
class SetOutputKeys(DataPipe): def __init__(self, output_keys: dict=None) -> None: super().__init__() self.output_keys = output_keys def forward(self, dataset: AugmentedDynamicItemDataset): dataset.update_output_keys(self.output_keys) return dataset
@dataclass class LoadAudio(DataPipe): audio_sample_rate: int = 16000 audio_channel_reduction: str = 'first' sox_effects: list = None wav_path_name: str = 'wav_path' wav_name: str = 'wav' start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def load_audio(self, wav_path, start_sec: float=None, end_sec: float=None): crop_segment = ((start_sec is not None) and (end_sec is not None)) torchaudio.set_audio_backend('sox_io') (wav, sr) = torchaudio.load(wav_path, frame_offset=(round((start_sec * self.audio_sample_rate)) if crop_segment else 0), num_frames=(round(((end_sec - start_sec) * self.audio_sample_rate)) if crop_segment else (- 1))) if (self.sox_effects is not None): (wav, sr) = torchaudio.sox_effects.apply_effects_tensor(wav, sr, effects=self.sox_effects) if (sr != self.audio_sample_rate): resampler = torchaudio.transforms.Resample(sr, self.audio_sample_rate) wav = resampler(wav) if (self.audio_channel_reduction == 'first'): wav = wav[0] elif (self.audio_channel_reduction == 'mean'): wav = wav.mean(dim=0) wav = wav.view((- 1), 1) return wav def compute_length(self, wav): return len(wav) def forward(self, dataset: AugmentedDynamicItemDataset): item = dataset[0] if ((self.start_sec_name in item) and (self.end_sec_name in item)): crop_segment = True else: crop_segment = False if (not crop_segment): dataset.add_dynamic_item(self.load_audio, takes=self.wav_path_name, provides=self.wav_name) else: dataset.add_dynamic_item(self.load_audio, takes=[self.wav_path_name, self.start_sec_name, self.end_sec_name], provides=self.wav_name) dataset.add_dynamic_item(self.compute_length, takes=self.wav_name, provides=f'{self.wav_name}_len') return dataset
@dataclass class EncodeCategory(DataPipe): train_category_encoder: bool = False label_name: str = 'label' category_encoder_name: str = 'category' encoded_target_name: str = 'class_id' def prepare_category(self, labels): return CategoryEncoder(sorted(list(set(labels)))) def encode_label(self, category, label): return category.encode(label) def forward(self, dataset: AugmentedDynamicItemDataset): if self.train_category_encoder: with dataset.output_keys_as([self.label_name]): labels = [item[self.label_name] for item in dataset] category = self.prepare_category(labels) dataset.add_tool(self.category_encoder_name, category) category = dataset.get_tool(self.category_encoder_name) dataset.add_tool('output_size', len(category)) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class EncodeMultipleCategory(EncodeCategory): train_category_encoder: bool = False label_name: str = 'labels' category_encoder_name: str = 'categories' encoded_target_name: str = 'class_ids' def encode_label(self, categories, labels): return torch.LongTensor([category.encode(label) for (category, label) in zip(categories, labels)]) def forward(self, dataset: AugmentedDynamicItemDataset): if self.train_category_encoder: with dataset.output_keys_as([self.label_name]): labels = [item[self.label_name] for item in dataset] label_types = list(zip(*labels)) categories = [self.prepare_category(label_type) for label_type in label_types] dataset.add_tool(self.category_encoder_name, categories) dataset.add_tool('output_size', sum([len(c) for c in categories])) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class EncodeMultiLabel(DataPipe): label_name: str = 'labels' category_encoder_name: str = 'category' encoded_target_name: str = 'binary_labels' @staticmethod def label_to_binary_vector(label: List, num_labels: int) -> torch.Tensor: if (len(label) == 0): binary_labels = torch.zeros((num_labels,), dtype=torch.float) else: binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label), 1.0) assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label)) return binary_labels def encode_label(self, category, labels): labels = [category.encode(label) for label in labels] binary_labels = self.label_to_binary_vector(labels, len(category)) return binary_labels def forward(self, dataset: AugmentedDynamicItemDataset): if (not dataset.has_tool(self.category_encoder_name)): with dataset.output_keys_as([self.label_name]): all_labels = [] for item in dataset: all_labels.extend(item[self.label_name]) all_labels.sort() all_labels = set(all_labels) category = CategoryEncoder(all_labels) dataset.add_tool(self.category_encoder_name, category) category = dataset.get_tool(self.category_encoder_name) dataset.add_tool('output_size', len(category)) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class GenerateTokenizer(DataPipe): generate: bool = True tokenizer_name: str = 'tokenizer' text_name: str = 'transcription' vocab_type: str = 'character' text_file: str = None vocab_file: str = None slots_file: str = None vocab_args: dict = None def prepare_tokenizer(self, text_list: str=None) -> Tokenizer: 'Generates tokenizer from text data.\n\n Args:\n text_list (str, optional): List of text. Defaults to None.\n\n Returns:\n Tokenizer: Generated tokenizer\n ' vocab_args = (self.vocab_args or {}) assert isinstance(vocab_args, dict) if (text_list is not None): vocab_result = generate_vocab(self.vocab_type, text_list=text_list, **vocab_args) else: vocab_result = generate_vocab(self.vocab_type, text_file=self.text_file, **vocab_args) vocab_list = (vocab_result if isinstance(vocab_result, list) else None) vocab_file = (vocab_result if isinstance(vocab_result, str) else None) tokenizer = load_tokenizer(self.vocab_type, vocab_file=vocab_file, vocab_list=vocab_list, slots_file=self.slots_file) return tokenizer def forward(self, dataset: AugmentedDynamicItemDataset): try: tokenizer = dataset.get_tool(self.tokenizer_name) logger.info(f'Tokenizer (name = {self.tokenizer_name}) exists in dataset, skip generation.') except KeyError: if self.generate: if ((self.vocab_file is not None) and os.path.exists(self.vocab_file)): tokenizer = load_tokenizer(self.vocab_type, vocab_file=self.vocab_file, slots_file=self.slots_file) else: text_list = None if (self.text_file is None): with dataset.output_keys_as([self.text_name]): text_list = [item[self.text_name] for item in dataset] tokenizer = self.prepare_tokenizer(text_list) dataset.add_tool(self.tokenizer_name, tokenizer) else: logger.warning('No tokenizer is found or generated. No-op for this DataPipe') return dataset
@dataclass class EncodeText(DataPipe): text_name: str = 'transcription' output_text_name: str = 'tokenized_text' tokenizer_name: str = 'tokenizer' def encode_text(self, tokenizer: Tokenizer, text: str) -> torch.LongTensor: return torch.LongTensor(tokenizer.encode(text)) def forward(self, dataset: AugmentedDynamicItemDataset): try: tokenizer = dataset.get_tool(self.tokenizer_name) except KeyError: raise KeyError(f'Tokenizer (name = {self.tokenizer_name}) not found!') dataset.add_dynamic_item(self.encode_text, takes=[self.tokenizer_name, self.text_name], provides=self.output_text_name) dataset.add_tool('output_size', tokenizer.vocab_size) return dataset
@dataclass class Phonemize(DataPipe): text_name: str = 'transcription' phonemized_text_name: str = 'phonemized_text' output_text_name: str = 'tokenized_text' g2p_name: str = 'g2p' tokenizer_name: str = 'tokenizer' def grapheme2phoneme(self, g2p: G2P, text: str) -> str: return g2p.encode(text) def encode_text(self, tokenizer: Tokenizer, text: str) -> torch.LongTensor: return torch.LongTensor(tokenizer.encode(text)) def forward(self, dataset: AugmentedDynamicItemDataset): if (not dataset.has_tool(self.g2p_name)): logger.warning(f'Cannot find {self.g2p_name} in dataset, use default G2P instead.') dataset.add_tool(self.g2p_name, G2P()) if (not dataset.has_tool(self.tokenizer_name)): logger.warning(f'Cannot find {self.tokenizer_name} in dataset, use default tokenizer instead.') dataset.add_tool(self.tokenizer_name, default_phoneme_tokenizer()) dataset.add_dynamic_item(self.grapheme2phoneme, takes=[self.g2p_name, self.text_name], provides=self.phonemized_text_name) dataset.add_dynamic_item(self.encode_text, takes=[self.tokenizer_name, self.phonemized_text_name], provides=self.output_text_name) tokenizer = dataset.get_tool(self.tokenizer_name) dataset.add_tool('output_size', tokenizer.vocab_size) return dataset
@dataclass class RandomCrop(DataPipe): '\n Completely randomized for every batch even with the same datapoint id.\n Only suitable for training.\n ' sample_rate: int = 16000 max_secs: float = None wav_name: str = 'wav' crop_name: str = 'wav_crop' def crop_wav(self, wav): if ((self.max_secs is not None) and (wav.size(0) > (self.max_secs * self.sample_rate))): start = random.randint(0, (wav.size(0) - (self.max_secs * self.sample_rate))) end = (start + (self.max_secs * self.sample_rate)) wav = wav[round(start):round(end)] return (wav, wav.size(0)) def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: dataset.add_dynamic_item(self.crop_wav, takes=[self.wav_name], provides=[self.crop_name, f'{self.crop_name}_len']) return dataset
@dataclass class ExtractKaldiFeat(DataPipe): kaldi: dict = None delta: dict = None cmvn: dict = None wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n kaldi (dict): args for the kaldi extracter\n delta (dict): args for applying delta on features\n cmvn (dict): args for applying cmvn on features\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'kaldi': self.kaldi, 'delta': self.delta, 'cmvn': self.cmvn} (extracter, feat_dim, frame_shift) = kaldi_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_tool('frame_shift', frame_shift) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractOnlineFeat(DataPipe): win_ms: int = 25 hop_ms: int = 10 n_freq: int = 201 n_mels: int = 80 n_mfcc: int = 13 input: dict = None target: dict = None wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute + unsqueeze ->\n (1, 1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' wav = wav.permute(1, 0).unsqueeze(0) feat = extracter(wav)[0][0] return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'win_ms': self.win_ms, 'hop_ms': self.hop_ms, 'n_freq': self.n_freq, 'n_mels': self.n_mels, 'n_mfcc': self.n_mfcc, 'input': self.input, 'target': self.target} (extracter, feat_dim, _) = online_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractApcFeat(DataPipe): feat_type: str = 'fbank' feat_dim: int = 80 frame_length: int = 25 frame_shift: int = 10 decode_wav: bool = False cmvn: bool = True wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute ->\n (1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav.permute(1, 0)) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'feat_type': self.feat_type, 'feat_dim': self.feat_dim, 'frame_length': self.frame_length, 'frame_shift': self.frame_shift, 'decode_wav': self.decode_wav, 'cmvn': self.cmvn} (extracter, feat_dim) = apc_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractNpcFeat(DataPipe): feat_type: str = 'fbank' feat_dim: int = 80 frame_length: int = 25 frame_shift: int = 10 decode_wav: bool = False cmvn: bool = True wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute ->\n (1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav.permute(1, 0)) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'feat_type': self.feat_type, 'feat_dim': self.feat_dim, 'frame_length': self.frame_length, 'frame_shift': self.frame_shift, 'decode_wav': self.decode_wav, 'cmvn': self.cmvn} (extracter, feat_dim) = npc_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
class HearTimestampDatapipe(SequentialDataPipe): def __init__(self, sample_rate: int=16000, feat_frame_shift: int=160): super().__init__(UnfoldChunkBySec(min_chunk_secs=4.0, max_chunk_secs=4.0, step_secs=4.0), LoadAudio(audio_sample_rate=sample_rate), BuildMultiClassTagging(sample_rate=sample_rate, feat_frame_shift=feat_frame_shift, intra_or_inter='inter', all_category_name='category'), SetOutputKeys(dict(x='wav', x_len='wav_len', y='multiclass_tag', y_len='tag_len', record_id='unchunked_id', chunk_id='chunk_index')))
@dataclass class NoiseAugmentation(DataPipe): noise_proportion: float = 0.0 input_feat_name: str = 'input_feat' output_feat_name: str = 'output_feat' '\n Args:\n noise_proportion (float): for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise\n input_feat_name (str): handle for the `takes` (input)\n output_feat_name (str): handle for the `provides` (output)\n ' def apply_noise_on_data(self, input_feat): with torch.no_grad(): if (self.noise_proportion > 0): noised_feat = copy.deepcopy(input_feat) dice = random.random() if (dice < self.noise_proportion): noise_sampler = torch.distributions.Normal(0, 0.2) noised_feat += noise_sampler.sample(noised_feat.shape) noised_feat = noised_feat.to(dtype=torch.float32) return noised_feat else: return input_feat def __call__(self, dataset: AugmentedDynamicItemDataset): dataset.add_dynamic_item(self.apply_noise_on_data, takes=self.input_feat_name, provides=self.output_feat_name) return dataset
@dataclass class NormWavDecibel(DataPipe): target_level: int = (- 25) wav_name: str = 'wav' norm_wav_name: str = 'wav' '\n Args:\n target_level (int): normalize the wav decibel level to the target value\n wav_name (str): handle for the `takes` (input)\n norm_wav_name (str): handle for the `provides` (output)\n ' def normalize_wav_decibel(self, wav): wav = wav.squeeze() if (self.target_level == 0): return wav rms = wav.pow(2).mean().pow(0.5) scalar = ((10 ** (self.target_level / 20)) / (rms + 1e-10)) wav = (wav * scalar) return wav.unsqueeze(1) def __call__(self, dataset: AugmentedDynamicItemDataset): dataset.add_dynamic_item(self.normalize_wav_decibel, takes=self.wav_name, provides=self.norm_wav_name) return dataset
class PretrainApcPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, n_future: int=5, feat_type: str='fbank', feat_dim: int=80, frame_length: int=25, frame_shift: int=10, decode_wav: bool=False, cmvn: bool=True, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n n_future (int): number of future steps for the autoregressive predictive task\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='source_feat', label='target_feat', x_len='feat_len', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractApcFeat(feat_type=feat_type, feat_dim=feat_dim, frame_length=frame_length, frame_shift=frame_shift, decode_wav=decode_wav, cmvn=cmvn, feat_name='source_feat'), AutoregressivePrediction(n_future=n_future, source_feat_name='source_feat', target_feat_name='target_feat', source_feat_len_name='feat_len'), SetOutputKeys(output_keys=output_keys))
class PretrainAudioAlbertPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, win_ms: int=25, hop_ms: int=10, n_freq: int=201, n_mels: int=80, n_mfcc: int=13, input: dict={'channel': 0, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target: dict={'channel': 1, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target_level: int=(- 25), audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target_level (int): normalize the wav decibel level to the target value\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), NormWavDecibel(target_level=target_level), ExtractOnlineFeat(win_ms=win_ms, hop_ms=hop_ms, n_freq=n_freq, n_mels=n_mels, n_mfcc=n_mfcc, input=input, target=target, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='source_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class PretrainMockingjayPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, kaldi: dict={'feat_type': 'fbank', 'fbank': {'frame_length': 25.0, 'frame_shift': 10.0, 'num_mel_bins': 80, 'use_log_fbank': True}, 'mfcc': {'frame_length': 25.0, 'frame_shift': 10.0, 'num_ceps': 13}, 'spectrogram': {'frame_length': 25.0, 'frame_shift': 10.0}}, delta: dict={'order': 2, 'win_length': 5}, cmvn: dict={'use_cmvn': True}, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n kaldi (dict): args for the kaldi extracter\n delta (dict): args for applying delta on features\n cmvn (dict): args for applying cmvn on features\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractKaldiFeat(kaldi=kaldi, delta=delta, cmvn=cmvn, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='source_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class PretrainNpcPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, feat_type: str='fbank', feat_dim: int=80, frame_length: int=25, frame_shift: int=10, decode_wav: bool=False, cmvn: bool=True, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='source_feat', label='target_feat', label_mask='label_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractNpcFeat(feat_type=feat_type, feat_dim=feat_dim, frame_length=frame_length, frame_shift=frame_shift, decode_wav=decode_wav, cmvn=cmvn, feat_name='source_feat'), LabelMaskFromLen(target_feat_name='target_feat', label_mask_name='label_mask'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), SetOutputKeys(output_keys=output_keys))
class PretrainTeraPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, noise_proportion: float=0.0, win_ms: int=25, hop_ms: int=10, n_freq: int=201, n_mels: int=80, n_mfcc: int=13, input: dict={'channel': 0, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target: dict={'channel': 1, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target_level: int=(- 25), audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n noise_proportion (float): for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target_level (int): normalize the wav decibel level to the target value\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), NormWavDecibel(target_level=target_level), ExtractOnlineFeat(win_ms=win_ms, hop_ms=hop_ms, n_freq=n_freq, n_mels=n_mels, n_mfcc=n_mfcc, input=input, target=target, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), NoiseAugmentation(noise_proportion=noise_proportion, input_feat_name='source_feat', output_feat_name='noised_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='noised_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class SpeakerVerificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n label: str\n ' def __init__(self, audio_sample_rate: int=16000, audio_channel_reduction: str='first', random_crop_secs: float=(- 1), sox_effects: List[List]=None): pipes = [LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects)] output_keys = dict(x='wav', x_len='wav_len', label='label', unique_name='id') if (random_crop_secs != (- 1)): pipes.append(RandomCrop(sample_rate=audio_sample_rate, max_secs=random_crop_secs)) output_keys['x'] = 'wav_crop' output_keys['x_len'] = 'wav_crop_len' pipes.append(SetOutputKeys(output_keys)) super().__init__(*pipes)
class Speech2PhonemePipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n transcription: str\n ' def __init__(self): output_keys = dict(x='wav', x_len='wav_len', labels='phonemized_text', class_ids='tokenized_text', unique_name='id') super().__init__(LoadAudio(), Phonemize(), SetOutputKeys(output_keys=output_keys))
class Speech2TextPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n transcription: str\n ' def __init__(self, generate_tokenizer: bool=False, vocab_type: str='character', text_file: str=None, vocab_file: str=None, slots_file: str=None, vocab_args: dict=None): output_keys = dict(x='wav', x_len='wav_len', labels='transcription', class_ids='tokenized_text', unique_name='id') super().__init__(LoadAudio(), GenerateTokenizer(generate=generate_tokenizer, vocab_type=vocab_type, text_file=text_file, vocab_file=vocab_file, slots_file=slots_file, vocab_args=vocab_args), EncodeText(), SetOutputKeys(output_keys=output_keys))
class UtteranceClassificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n label: str\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first', sox_effects: list=None, train_category_encoder: bool=False): output_keys = (output_keys or dict(x='wav', x_len='wav_len', class_id='class_id', label='label', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects), EncodeCategory(train_category_encoder=train_category_encoder), SetOutputKeys(output_keys=output_keys))
class UtteranceMultipleCategoryClassificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n labels: List[str]\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first', sox_effects: list=None, train_category_encoder: bool=False): output_keys = (output_keys or dict(x='wav', x_len='wav_len', class_ids='class_ids', labels='labels', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects), EncodeMultipleCategory(train_category_encoder=train_category_encoder), SetOutputKeys(output_keys=output_keys))
class HearScenePipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n labels: List[str]\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first'): output_keys = (output_keys or dict(x='wav', x_len='wav_len', y='binary_labels', labels='labels', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), EncodeMultiLabel(), SetOutputKeys(output_keys=output_keys))
def generate_eval_pairs(file_list, train_file_list, eval_data_root, num_samples): X = [] for trgspk in TRGSPKS_TASK1: spk_file_list = [] for number in train_file_list: wav_path = os.path.join(eval_data_root, trgspk, (number + '.wav')) if os.path.isfile(wav_path): spk_file_list.append(wav_path) for srcspk in SRCSPKS: for number in file_list: random.shuffle(spk_file_list) pair = [os.path.join(eval_data_root, srcspk, (number + '.wav'))] pair.extend(spk_file_list[:num_samples]) X.append(pair) return X
class VCTK_VCC2020Dataset(Dataset): def __init__(self, split, trdev_data_root, eval_data_root, spk_embs_root, lists_root, eval_lists_root, fbank_config, spk_emb_source, num_ref_samples, train_dev_seed=1337, **kwargs): super(VCTK_VCC2020Dataset, self).__init__() self.split = split self.fbank_config = fbank_config self.spk_emb_source = spk_emb_source self.spk_embs_root = spk_embs_root os.makedirs(spk_embs_root, exist_ok=True) X = [] if ((split == 'train') or (split == 'dev')): file_list = open(os.path.join(lists_root, (split + '_list.txt'))).read().splitlines() for fname in file_list: (spk, number) = fname.split('_') wav_path = os.path.join(trdev_data_root, spk, (fname + '.wav')) X.append(wav_path) random.seed(train_dev_seed) random.shuffle(X) elif (split == 'test'): for num_samples in num_ref_samples: eval_pair_list_file = os.path.join(lists_root, 'eval_{}sample_list.txt'.format(num_samples)) if os.path.isfile(eval_pair_list_file): print('[Dataset] eval pair list file exists: {}'.format(eval_pair_list_file)) with open(eval_pair_list_file, 'r') as f: lines = f.read().splitlines() X += [line.split(',') for line in lines] else: print('[Dataset] eval pair list file does not exist: {}'.format(eval_pair_list_file)) file_list = open(os.path.join(eval_lists_root, 'eval_list.txt')).read().splitlines() train_file_list = open(os.path.join(eval_lists_root, 'E_train_list.txt')).read().splitlines() eval_pairs = generate_eval_pairs(file_list, train_file_list, eval_data_root, num_samples) with open(eval_pair_list_file, 'w') as f: for line in eval_pairs: f.write((','.join(line) + '\n')) X += eval_pairs else: raise ValueError("Invalid 'split' argument for dataset: VCTK_VCC2020Dataset!") print(((('[Dataset] - number of data for ' + split) + ': ') + str(len(X)))) self.X = X if (spk_emb_source == 'external'): print('[Dataset] Extracting speaker emebddings') self.extract_spk_embs() else: NotImplementedError def extract_spk_embs(self): spk_encoder = VoiceEncoder() if ((self.split == 'train') or (self.split == 'dev')): spk_emb_paths = [os.path.join(self.spk_embs_root, os.path.basename(wav_path).replace('.wav', '.h5')) for wav_path in self.X] self.X = list(zip(self.X, spk_emb_paths)) for (wav_path, spk_emb_path) in tqdm(self.X, dynamic_ncols=True, desc='Extracting speaker embedding'): if (not os.path.isfile(spk_emb_path)): wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) write_hdf5(spk_emb_path, 'spk_emb', embedding.astype(np.float32)) elif (self.split == 'test'): new_X = [] for wav_paths in self.X: source_wav_path = wav_paths[0] new_tuple = [source_wav_path] for wav_path in wav_paths[1:]: (spk, number) = wav_path.split(os.sep)[(- 2):] spk_emb_path = os.path.join(self.spk_embs_root, ((spk + '_') + number.replace('.wav', '.h5'))) new_tuple.append(spk_emb_path) if (not os.path.isfile(spk_emb_path)): wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) write_hdf5(spk_emb_path, 'spk_emb', embedding.astype(np.float32)) new_X.append(new_tuple) self.X = new_X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def get_all_lmspcs(self): lmspcs = [] for xs in tqdm(self.X, dynamic_ncols=True, desc='Extracting target acoustic features'): input_wav_path = xs[0] (input_wav_original, fs_original) = self._load_wav(input_wav_path, fs=None) lmspc = logmelspectrogram(x=input_wav_original, fs=fs_original, n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) lmspcs.append(lmspc) return lmspcs def __getitem__(self, index): input_wav_path = self.X[index][0] spk_emb_paths = self.X[index][1:] ref_spk_name = os.path.basename(spk_emb_paths[0]).split('_')[0] (input_wav_original, _) = self._load_wav(input_wav_path, fs=self.fbank_config['fs']) (input_wav_resample, fs_resample) = self._load_wav(input_wav_path, fs=FS) lmspc = logmelspectrogram(x=input_wav_original, fs=self.fbank_config['fs'], n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) if (self.spk_emb_source == 'external'): ref_spk_embs = [read_hdf5(spk_emb_path, 'spk_emb') for spk_emb_path in spk_emb_paths] ref_spk_embs = np.stack(ref_spk_embs, axis=0) ref_spk_emb = np.mean(ref_spk_embs, axis=0) else: ref_spk_emb = None if (self.split == 'test'): input_wav_name = input_wav_path.replace('.wav', '') input_wav_path = (input_wav_name + '_{}samples.wav'.format(len(spk_emb_paths))) return (input_wav_resample, input_wav_original, lmspc, ref_spk_emb, input_wav_path, ref_spk_name) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] acoustic_features = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)] acoustic_features_padded = pad_sequence(acoustic_features, batch_first=True) acoustic_feature_lengths = torch.from_numpy(np.array([acoustic_feature.size(0) for acoustic_feature in acoustic_features])) ref_spk_embs = torch.from_numpy(np.array([sorted_batch[i][3] for i in range(bs)])) wav_paths = [sorted_batch[i][4] for i in range(bs)] ref_spk_names = [sorted_batch[i][5] for i in range(bs)] return (wavs, wavs_2, acoustic_features, acoustic_features_padded, acoustic_feature_lengths, wav_paths, ref_spk_embs, ref_spk_names, None)
class CustomDataset(Dataset): def __init__(self, eval_pair_list_file, spk_emb_source, **kwargs): super(CustomDataset, self).__init__() self.spk_emb_source = spk_emb_source if os.path.isfile(eval_pair_list_file): print('[Dataset] Reading custom eval pair list file: {}'.format(eval_pair_list_file)) with open(eval_pair_list_file, 'r') as f: infos = yaml.load(f, Loader=yaml.FullLoader) X = [{'wav_name': k, **v} for (k, v) in infos.items()] else: raise ValueError('[Dataset] eval pair list file does not exist: {}'.format(eval_pair_list_file)) print(('[Dataset] - number of data for custom test: ' + str(len(X)))) self.X = X if (spk_emb_source == 'external'): print('[Dataset] Extracting speaker emebddings') self.extract_spk_embs() else: NotImplementedError def extract_spk_embs(self): spk_encoder = VoiceEncoder() new_X = [] for item in self.X: new_item = item new_item['ref_spk_embs'] = [] for wav_path in new_item['ref']: wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) new_item['ref_spk_embs'].append(embedding) new_X.append(new_item) self.X = new_X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def __getitem__(self, index): wav_name = self.X[index]['wav_name'] input_wav_path = self.X[index]['src'] ref_spk_embs = self.X[index]['ref_spk_embs'] ref_spk_name = self.X[index]['ref_spk_name'] (input_wav_original, _) = self._load_wav(input_wav_path, fs=None) (input_wav_resample, fs_resample) = self._load_wav(input_wav_path, fs=FS) if (self.spk_emb_source == 'external'): ref_spk_embs = np.stack(ref_spk_embs, axis=0) ref_spk_emb = np.mean(ref_spk_embs, axis=0) else: ref_spk_emb = None return (input_wav_resample, input_wav_original, ref_spk_emb, input_wav_path, ref_spk_name, wav_name) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] ref_spk_embs = torch.from_numpy(np.array([sorted_batch[i][2] for i in range(bs)])) wav_paths = [sorted_batch[i][3] for i in range(bs)] ref_spk_names = [sorted_batch[i][4] for i in range(bs)] save_wav_names = [sorted_batch[i][5] for i in range(bs)] return (wavs, wavs_2, None, None, None, wav_paths, ref_spk_embs, ref_spk_names, save_wav_names)
def get_basename(path): return os.path.splitext(os.path.split(path)[(- 1)])[0]
def get_trgspk_and_number(basename): if ('_' in basename): (trgspk, srcspk, number) = basename.split('_')[:3] return (trgspk, number) else: return basename
def _calculate_asv_score(model, file_list, gt_root, threshold): results = {} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) (trgspk, number) = get_trgspk_and_number(basename) gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) results[basename] = calculate_accept(cvt_wav_path, gt_wav_path, model, threshold) return (results, (100.0 * float(np.mean(np.array(list(results.values()))))))
def _calculate_asr_score(model, device, file_list, groundtruths): keys = ['hits', 'substitutions', 'deletions', 'insertions'] ers = {} c_results = {k: 0 for k in keys} w_results = {k: 0 for k in keys} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) (_, number) = get_trgspk_and_number(basename) groundtruth = groundtruths[number[1:]] (wav, _) = librosa.load(cvt_wav_path, sr=16000) transcription = transcribe(model, device, wav) (c_result, w_result, norm_groundtruth, norm_transcription) = calculate_measures(groundtruth, transcription) ers[basename] = [(c_result['cer'] * 100.0), (w_result['wer'] * 100.0), norm_transcription, norm_groundtruth] for k in keys: c_results[k] += c_result[k] w_results[k] += w_result[k] def er(r): return ((float(((r['substitutions'] + r['deletions']) + r['insertions'])) / float(((r['substitutions'] + r['deletions']) + r['hits']))) * 100.0) cer = er(c_results) wer = er(w_results) return (ers, cer, wer)
def _calculate_mcd_f0(file_list, gt_root, f0_all, results): for (i, cvt_wav_path) in enumerate(file_list): basename = get_basename(cvt_wav_path) (trgspk, number) = get_trgspk_and_number(basename) f0min = f0_all[trgspk]['f0min'] f0max = f0_all[trgspk]['f0max'] gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) (gt_wav, gt_fs) = librosa.load(gt_wav_path, sr=None) (cvt_wav, _) = librosa.load(cvt_wav_path, sr=gt_fs) (mcd, f0rmse, f0corr, ddur) = calculate_mcd_f0(cvt_wav, gt_wav, gt_fs, f0min, f0max) results.append([basename, mcd, f0rmse, f0corr, ddur])
def get_parser(): parser = argparse.ArgumentParser(description='objective evaluation script.') parser.add_argument('--wavdir', required=True, type=str, help='directory for converted waveforms') parser.add_argument('--task', required=True, type=str, choices=['task1', 'task2'], help='task 1 or task 2') parser.add_argument('--samples', required=True, type=int, help='number of reference samples') parser.add_argument('--data_root', type=str, default='./data', help='directory of data') parser.add_argument('--log_path', type=str, default=None, help='path of output log. If not specified, output to <wavdir>/obj.log') parser.add_argument('--n_jobs', default=10, type=int, help='number of parallel jobs') return parser
def main(): args = get_parser().parse_args() task = args.task gt_root = os.path.join(args.data_root, 'vcc2020') f0_path = os.path.join(args.data_root, 'f0.yaml') threshold_path = os.path.join(args.data_root, 'thresholds.yaml') transcription_path = os.path.join(args.data_root, 'vcc2020', 'prompts', 'Eng_transcriptions.txt') device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) with open(f0_path, 'r') as f: f0_all = yaml.load(f, Loader=yaml.FullLoader) with open(transcription_path, 'r') as f: lines = f.read().splitlines() groundtruths = {line.split(' ')[0]: ' '.join(line.split(' ')[1:]) for line in lines} if (args.task == 'task1'): query_string = f'TE*300??_{args.samples}samples*.wav' elif (args.task == 'task2'): query_string = 'T[FGM]*300??_{args.samples}samples*.wav' converted_files = sorted(find_files(args.wavdir, query=query_string)) print('number of reference samples = {}'.format(args.samples)) print('number of utterances = {}'.format(len(converted_files))) threshold = None threshold_all = {} if os.path.exists(threshold_path): with open(threshold_path, 'r') as f: threshold_all = yaml.load(f, Loader=yaml.FullLoader) if (threshold_all and (task in threshold_all)): (equal_error_rate, threshold) = threshold_all[task] if (not threshold): (equal_error_rate, threshold) = calculate_threshold(gt_root, task, device) if threshold_all: threshold_all[task] = [equal_error_rate, threshold] else: threshold_all = {task: [equal_error_rate, threshold]} with open(threshold_path, 'w') as f: yaml.safe_dump(threshold_all, f) print(f'[INFO]: Equal error rate: {equal_error_rate}') print(f'[INFO]: Threshold: {threshold}') print('Calculating ASV-based score...') asv_model = load_asv_model(device) (accept_results, accept_rate) = _calculate_asv_score(asv_model, converted_files, gt_root, threshold) print('Calculating ASR-based score...') asr_model = load_asr_model(device) (ers, cer, wer) = _calculate_asr_score(asr_model, device, converted_files, groundtruths) if (task == 'task1'): print('Calculating MCD and f0-related scores...') file_lists = np.array_split(converted_files, args.n_jobs) file_lists = [f_list.tolist() for f_list in file_lists] with mp.Manager() as manager: results = manager.list() processes = [] for f in file_lists: p = mp.Process(target=_calculate_mcd_f0, args=(f, gt_root, f0_all, results)) p.start() processes.append(p) for p in processes: p.join() results = sorted(results, key=(lambda x: x[0])) results = [((result + ers[result[0]]) + [accept_results[result[0]]]) for result in results] else: results = [] for f in converted_files: basename = get_basename(f) results.append((([basename] + ers[basename]) + [accept_results[basename]])) log_path = (args.log_path if args.log_path else os.path.join(args.wavdir, f'obj_{args.samples}samples.log')) with open(log_path, 'w') as f: if (task == 'task1'): mMCD = np.mean(np.array([result[1] for result in results])) mf0RMSE = np.mean(np.array([result[2] for result in results])) mf0CORR = np.mean(np.array([result[3] for result in results])) mDDUR = np.mean(np.array([result[4] for result in results])) mCER = cer mWER = wer mACCEPT = accept_rate for result in results: if (task == 'task1'): f.write('{} {:.2f} {:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) elif (task == 'task2'): f.write('{} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) if (task == 'task1'): print('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) f.write('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) elif (task == 'task2'): print('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT)) f.write('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT))
def get_parser(): parser = argparse.ArgumentParser(description='Extract results.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--upstream', type=str, required=True, help='upstream') parser.add_argument('--task', type=str, required=True, help='task') parser.add_argument('--tag', type=str, required=True, help='tag') parser.add_argument('--vocoder', type=str, required=True, help='vocoder name') parser.add_argument('--expdir', type=str, default='../../result/downstream', help='expdir') parser.add_argument('--num_samples', default=10, type=int) parser.add_argument('--start_epoch', default=10000, type=int) parser.add_argument('--end_epoch', default=50000, type=int) parser.add_argument('--step_epoch', default=1000, type=int) parser.add_argument('--out', '-O', type=str, help='The output filename. If omitted, then output to sys.stdout') return parser
def grep(filepath, query): lines = [] with open(filepath, 'r') as f: for line in f: if (query in line): lines.append(line.rstrip()) return lines
def encoder_init(m): 'Initialize encoder parameters.' if isinstance(m, torch.nn.Conv1d): torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('relu'))
class Taco2Encoder(torch.nn.Module): 'Encoder module of the Tacotron2 TTS model.\n\n Reference:\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, elayers=1, eunits=512, econv_layers=3, econv_chans=512, econv_filts=5, use_batch_norm=True, use_residual=False, dropout_rate=0.5): 'Initialize Tacotron2 encoder module.\n\n Args:\n idim (int) Dimension of the inputs.\n elayers (int, optional) The number of encoder blstm layers.\n eunits (int, optional) The number of encoder blstm units.\n econv_layers (int, optional) The number of encoder conv layers.\n econv_filts (int, optional) The number of encoder conv filter size.\n econv_chans (int, optional) The number of encoder conv filter channels.\n use_batch_norm (bool, optional) Whether to use batch normalization.\n use_residual (bool, optional) Whether to use residual connection.\n dropout_rate (float, optional) Dropout rate.\n\n ' super(Taco2Encoder, self).__init__() self.idim = idim self.use_residual = use_residual self.input_layer = torch.nn.Linear(idim, econv_chans) if (econv_layers > 0): self.convs = torch.nn.ModuleList() for layer in range(econv_layers): ichans = econv_chans if use_batch_norm: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.BatchNorm1d(econv_chans), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs = None if (elayers > 0): iunits = (econv_chans if (econv_layers != 0) else embed_dim) self.blstm = torch.nn.LSTM(iunits, (eunits // 2), elayers, batch_first=True, bidirectional=True) else: self.blstm = None self.apply(encoder_init) def forward(self, xs, ilens=None): 'Calculate forward propagation.\n Args:\n xs (Tensor): Batch of the padded acoustic feature sequence (B, Lmax, idim)\n ' xs = self.input_layer(xs).transpose(1, 2) if (self.convs is not None): for i in range(len(self.convs)): if self.use_residual: xs += self.convs[i](xs) else: xs = self.convs[i](xs) if (self.blstm is None): return xs.transpose(1, 2) if (not isinstance(ilens, torch.Tensor)): ilens = torch.tensor(ilens) xs = pack_padded_sequence(xs.transpose(1, 2), ilens.cpu(), batch_first=True) self.blstm.flatten_parameters() (xs, _) = self.blstm(xs) (xs, hlens) = pad_packed_sequence(xs, batch_first=True) return (xs, hlens)
class Taco2Prenet(torch.nn.Module): 'Prenet module for decoder of Tacotron2.\n\n The Prenet preforms nonlinear conversion\n of inputs before input to auto-regressive lstm,\n which helps alleviate the exposure bias problem.\n\n Note:\n This module alway applies dropout even in evaluation.\n See the detail in `Natural TTS Synthesis by\n Conditioning WaveNet on Mel Spectrogram Predictions`_.\n\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5): super(Taco2Prenet, self).__init__() self.dropout_rate = dropout_rate self.prenet = torch.nn.ModuleList() for layer in range(n_layers): n_inputs = (idim if (layer == 0) else n_units) self.prenet += [torch.nn.Sequential(torch.nn.Linear(n_inputs, n_units), torch.nn.ReLU())] def forward(self, x): if (len(self.prenet) == 0): return F.dropout(x, self.dropout_rate) for i in range(len(self.prenet)): x = F.dropout(self.prenet[i](x), self.dropout_rate) return x
class RNNLayer(nn.Module): ' RNN wrapper, includes time-downsampling' def __init__(self, input_dim, module, bidirection, dim, dropout, layer_norm, sample_rate, proj): super(RNNLayer, self).__init__() rnn_out_dim = ((2 * dim) if bidirection else dim) self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.sample_rate = sample_rate self.proj = proj self.layer = getattr(nn, module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, x_len): if (not self.training): self.layer.flatten_parameters() input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False) (output, _) = self.layer(input_x) (output, x_len) = pad_packed_sequence(output, batch_first=True) if self.layer_norm: output = self.ln(output) if (self.dropout > 0): output = self.dp(output) if (self.sample_rate > 1): (output, x_len) = downsample(output, x_len, self.sample_rate, 'drop') if self.proj: output = torch.tanh(self.pj(output)) return (output, x_len)
class RNNCell(nn.Module): ' RNN cell wrapper' def __init__(self, input_dim, module, dim, dropout, layer_norm, proj): super(RNNCell, self).__init__() rnn_out_dim = dim self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.proj = proj self.cell = getattr(nn, (module.upper() + 'Cell'))(input_dim, dim) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, z, c): (new_z, new_c) = self.cell(input_x, (z, c)) if self.layer_norm: new_z = self.ln(new_z) if (self.dropout > 0): new_z = self.dp(new_z) if self.proj: new_z = torch.tanh(self.pj(new_z)) return (new_z, new_c)
class Model(nn.Module): def __init__(self, input_dim, output_dim, resample_ratio, stats, ar, encoder_type, hidden_dim, lstmp_layers, lstmp_dropout_rate, lstmp_proj_dim, lstmp_layernorm, spk_emb_integration_type, spk_emb_dim, prenet_layers=2, prenet_dim=256, prenet_dropout_rate=0.5, **kwargs): super(Model, self).__init__() self.ar = ar self.encoder_type = encoder_type self.hidden_dim = hidden_dim self.output_dim = output_dim self.resample_ratio = resample_ratio self.spk_emb_integration_type = spk_emb_integration_type self.spk_emb_dim = spk_emb_dim if (spk_emb_integration_type == 'add'): assert (spk_emb_dim == hidden_dim) self.register_buffer('target_mean', torch.from_numpy(stats.mean_).float()) self.register_buffer('target_scale', torch.from_numpy(stats.scale_).float()) if (encoder_type == 'taco2'): self.encoder = Taco2Encoder(input_dim, eunits=hidden_dim) elif (encoder_type == 'ffn'): self.encoder = torch.nn.Sequential(torch.nn.Linear(input_dim, hidden_dim), torch.nn.ReLU()) else: raise ValueError('Encoder type not supported.') if (self.spk_emb_integration_type == 'add'): self.spk_emb_projection = torch.nn.Linear(spk_emb_dim, hidden_dim) elif (self.spk_emb_integration_type == 'concat'): self.spk_emb_projection = torch.nn.Linear((hidden_dim + spk_emb_dim), hidden_dim) else: raise ValueError('Integration type not supported.') self.prenet = Taco2Prenet(idim=output_dim, n_layers=prenet_layers, n_units=prenet_dim, dropout_rate=prenet_dropout_rate) self.lstmps = nn.ModuleList() for i in range(lstmp_layers): if ar: prev_dim = (output_dim if (prenet_layers == 0) else prenet_dim) rnn_input_dim = ((hidden_dim + prev_dim) if (i == 0) else hidden_dim) rnn_layer = RNNCell(rnn_input_dim, 'LSTM', hidden_dim, lstmp_dropout_rate, lstmp_layernorm, proj=True) else: rnn_input_dim = hidden_dim rnn_layer = RNNLayer(rnn_input_dim, 'LSTM', False, hidden_dim, lstmp_dropout_rate, lstmp_layernorm, sample_rate=1, proj=True) self.lstmps.append(rnn_layer) self.proj = torch.nn.Linear(hidden_dim, output_dim) def normalize(self, x): return ((x - self.target_mean) / self.target_scale) def _integrate_with_spk_emb(self, hs, spembs): 'Integrate speaker embedding with hidden states.\n Args:\n hs (Tensor): Batch of hidden state sequences (B, Lmax, hdim).\n spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).\n ' if (self.spk_emb_integration_type == 'add'): spembs = self.spk_emb_projection(F.normalize(spembs)) hs = (hs + spembs.unsqueeze(1)) elif (self.spk_emb_integration_type == 'concat'): spembs = F.normalize(spembs).unsqueeze(1).expand((- 1), hs.size(1), (- 1)) hs = self.spk_emb_projection(torch.cat([hs, spembs], dim=(- 1))) else: raise NotImplementedError('support only add or concat.') return hs def forward(self, features, lens, ref_spk_embs, targets=None): 'Calculate forward propagation.\n Args:\n features: Batch of the sequences of input features (B, Lmax, idim).\n targets: Batch of the sequences of padded target features (B, Lmax, odim).\n ref_spk_embs: Batch of the sequences of reference speaker embeddings (B, spk_emb_dim).\n ' B = features.shape[0] features = features.permute(0, 2, 1) resampled_features = F.interpolate(features, scale_factor=self.resample_ratio) resampled_features = resampled_features.permute(0, 2, 1) lens = (lens * self.resample_ratio) if (self.encoder_type == 'taco2'): (encoder_states, lens) = self.encoder(resampled_features, lens) elif (self.encoder_type == 'ffn'): encoder_states = self.encoder(resampled_features) encoder_states = self._integrate_with_spk_emb(encoder_states, ref_spk_embs) if self.ar: if (targets is not None): targets = targets.transpose(0, 1) predicted_list = [] c_list = [encoder_states.new_zeros(B, self.hidden_dim)] z_list = [encoder_states.new_zeros(B, self.hidden_dim)] for _ in range(1, len(self.lstmps)): c_list += [encoder_states.new_zeros(B, self.hidden_dim)] z_list += [encoder_states.new_zeros(B, self.hidden_dim)] prev_out = encoder_states.new_zeros(B, self.output_dim) for (t, encoder_state) in enumerate(encoder_states.transpose(0, 1)): concat = torch.cat([encoder_state, self.prenet(prev_out)], dim=1) for (i, lstmp) in enumerate(self.lstmps): lstmp_input = (concat if (i == 0) else z_list[(i - 1)]) (z_list[i], c_list[i]) = lstmp(lstmp_input, z_list[i], c_list[i]) predicted_list += [self.proj(z_list[(- 1)]).view(B, self.output_dim, (- 1))] prev_out = (targets[t] if (targets is not None) else predicted_list[(- 1)].squeeze((- 1))) prev_out = self.normalize(prev_out) predicted = torch.cat(predicted_list, dim=2) predicted = predicted.transpose(1, 2) else: predicted = encoder_states for (i, lstmp) in enumerate(self.lstmps): (predicted, lens) = lstmp(predicted, lens) predicted = self.proj(predicted) return (predicted, lens)
class VCC2020Dataset(Dataset): def __init__(self, split, trgspk, data_root, lists_root, fbank_config, train_dev_seed=1337, **kwargs): super(VCC2020Dataset, self).__init__() self.trgspk = trgspk self.trg_lang = trgspk[1] self.fbank_config = fbank_config X = [] if ((split == 'train') or (split == 'dev')): file_list = open(os.path.join(lists_root, (((self.trg_lang + '_') + split) + '_list.txt'))).read().splitlines() for number in file_list: wav_path = os.path.join(data_root, trgspk, (number + '.wav')) if os.path.isfile(wav_path): X.append(wav_path) random.seed(train_dev_seed) random.shuffle(X) elif (split == 'test'): file_list = open(os.path.join(lists_root, 'eval_list.txt')).read().splitlines() X = [os.path.join(data_root, srcspk, (number + '.wav')) for number in file_list for srcspk in SRCSPKS] else: raise ValueError("Invalid 'split' argument for dataset: VCC2020Dataset!") print(((('[Dataset] - number of data for ' + split) + ': ') + str(len(X)))) self.X = X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def __getitem__(self, index): wav_path = self.X[index] (wav_original, fs_original) = self._load_wav(wav_path, fs=None) (wav_resample, fs_resample) = self._load_wav(wav_path, fs=FS) lmspc = logmelspectrogram(x=wav_original, fs=fs_original, n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) return (wav_resample, wav_original, lmspc, wav_path) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] acoustic_features = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)] acoustic_features_padded = pad_sequence(acoustic_features, batch_first=True) acoustic_feature_lengths = torch.from_numpy(np.array([acoustic_feature.size(0) for acoustic_feature in acoustic_features])) wav_paths = [sorted_batch[i][3] for i in range(bs)] return (wavs, wavs_2, acoustic_features, acoustic_features_padded, acoustic_feature_lengths, wav_paths)