code
stringlengths
17
6.64M
class SubwordTokenizer(Tokenizer): 'Subword tokenizer using sentencepiece.' def __init__(self, spm): super().__init__() if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)): raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>') self.spm = spm def encode(self, s: str) -> List[int]: tokens = self.spm.encode_as_ids(s) assert (tokens[(- 1)] == self.eos_idx) return tokens def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append(idx) return self.spm.decode_ids(crop_idx) @classmethod def load_from_file(cls, filepath: str): import sentencepiece as splib spm = splib.SentencePieceProcessor() spm.load(filepath) spm.set_encode_extra_options('eos') return cls(spm) def __setstate__(self, state): self.__dict__.update(state) self.spm.set_encode_extra_options('eos') @property def vocab_size(self) -> int: return len(self.spm) @property def token_type(self) -> str: return 'subword'
class SubwordSlotTokenizer(Tokenizer): 'Subword tokenizer with slots.' def __init__(self, spm, slots): super().__init__() if ((spm.pad_id() != 0) or (spm.eos_id() != 1) or (spm.unk_id() != 2)): raise ValueError('Please train sentencepiece model with following argument:\n--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --model_type=bpe --eos_piece=<eos>') self.spm = spm self.slots = slots self.slot2id = {self.slots[i]: (i + len(self.spm)) for i in range(len(self.slots))} self.id2slot = {(i + len(self.spm)): self.slots[i] for i in range(len(self.slots))} def encode(self, sent: str, iobs: str) -> List[int]: sent = sent.strip('\r\n ') iobs = iobs.strip('\r\n ') sent = re.sub(' +', ' ', sent).strip(' ') sent = sent.split(' ') iobs = iobs.split(' ') assert (len(sent) == len(iobs)), f'transcription and iobs should have same number of words (split by space)' if (sent[0] == 'BOS'): sent = sent[1:] iobs = iobs[1:] if (sent[(- 1)] == 'EOS'): sent = sent[:(- 1)] iobs = iobs[:(- 1)] tokens = [] for (i, (wrd, iob)) in enumerate(zip(sent, iobs)): if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))): tokens.append(self.slot2id[('B-' + iob)]) encoded = self.spm.encode_as_ids(wrd) assert (encoded[(- 1)] == self.eos_idx) tokens += encoded[:(- 1)] if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))): tokens.append(self.slot2id[('E-' + iob)]) assert (tokens[(- 1)] != self.eos_idx) tokens.append(self.eos_idx) return tokens def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append(idx) (sent, ret) = ([], []) for (i, x) in enumerate(crop_idx): if (x >= len(self.spm)): slot = self.id2slot[x] ret.append(slot) if (len(sent) > 0): decoded = self.spm.decode_ids(sent) ret.insert((- 1), decoded) sent = [] else: sent.append(x) return ' '.join(ret) @classmethod def load_from_file(cls, filepath: str, slots_file: str): import sentencepiece as splib spm = splib.SentencePieceProcessor() spm.load(filepath) spm.set_encode_extra_options(':eos') org_slots = open(slots_file).read().split('\n') slots = [] for slot in [slot for slot in org_slots if (slot != 'O')]: slots.append(('B-' + slot)) slots.append(('E-' + slot)) return cls(spm, slots) def __setstate__(self, state): self.__dict__.update(state) self.spm.set_encode_extra_options('eos') @property def vocab_size(self) -> int: return (len(self.spm) + len(self.slots)) @property def token_type(self) -> str: return 'subword-slot'
class WordTokenizer(CharacterTokenizer): 'Word tokenizer.' def encode(self, s: str) -> List[int]: s = s.strip('\r\n ') words = s.split(' ') return ([self.vocab_to_idx(v) for v in words] + [self.eos_idx]) def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: vocabs = [] for (t, idx) in enumerate(idxs): v = self.idx_to_vocab(idx) if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: vocabs.append(v) return ' '.join(vocabs) @property def token_type(self) -> str: return 'word'
class PhonemeTokenizer(WordTokenizer): 'Phoneme tokenizer.' @property def token_type(self) -> str: return 'phoneme'
class BertTokenizer(Tokenizer): 'Bert Tokenizer.\n\n https://github.com/huggingface/pytorch-transformers/blob/master/pytorch_transformers/tokenization_bert.py\n ' def __init__(self, tokenizer): super().__init__() self._tokenizer = tokenizer self._tokenizer.pad_token = '<pad>' self._tokenizer.eos_token = '<eos>' self._tokenizer.unk_token = '<unk>' def encode(self, s: str) -> List[int]: reduced_idx = [] for idx in self._tokenizer.encode(s): try: r_idx = (idx - BERT_FIRST_IDX) assert (r_idx > 0) reduced_idx.append(r_idx) except AssertionError: reduced_idx.append(self.unk_idx) reduced_idx.append(self.eos_idx) return reduced_idx def decode(self, idxs: List[int], ignore_repeat: bool=False) -> str: crop_idx = [] for (t, idx) in enumerate(idxs): if (idx == self.eos_idx): break elif ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))): continue else: crop_idx.append((idx + BERT_FIRST_IDX)) return self._tokenizer.decode(crop_idx) @classmethod def load_from_file(cls, vocab_file: str): from pytorch_transformers import BertTokenizer as bert_tokenizer return cls(bert_tokenizer.from_pretrained(vocab_file)) @property def vocab_size(self) -> int: return ((BERT_LAST_IDX - BERT_FIRST_IDX) + 1) @property def token_type(self) -> str: return 'bert'
def load_tokenizer(mode: str, vocab_file: str=None, vocab_list: List[str]=None, slots_file: str=None) -> Tokenizer: 'Load a text tokenizer.\n\n Args:\n mode (str): Mode ("character", "character-slot", "subword", "subword-slot", "word", "bert-...")\n vocab_file (str, optional): Path to vocabularies. Defaults to None.\n vocab_list (List[str], optional): List of vocabularies. Defaults to None.\n slots_file (str, optional): Path to slots. Defaults to None.\n\n Raises:\n NotImplementedError: If mode is not implemented.\n\n Returns:\n Tokenizer: Text tokenizer.\n ' assert ((int((vocab_file is not None)) + int((vocab_list is not None))) <= 1), "For 'vocab_file' and 'vocab_list', at most one argument can be presented" with tempfile.NamedTemporaryFile('w') as f: if (vocab_list is not None): f.writelines([f'''{vocab} ''' for vocab in vocab_list]) f.flush() vocab_file = f.name if ((slots_file is not None) and (not mode.endswith('slot'))): mode = f'{mode}-slot' if (mode == 'character'): return CharacterTokenizer.load_from_file(vocab_file) elif (mode == 'character-slot'): return CharacterSlotTokenizer.load_from_file(vocab_file, slots_file) elif (mode == 'subword'): return SubwordTokenizer.load_from_file(vocab_file) elif (mode == 'subword-slot'): return SubwordSlotTokenizer.load_from_file(vocab_file, slots_file) elif (mode == 'word'): return WordTokenizer.load_from_file(vocab_file) elif (mode == 'phoneme'): return PhonemeTokenizer.load_from_file(vocab_file) elif mode.startswith('bert-'): return BertTokenizer.load_from_file(mode) else: raise NotImplementedError('`{}` is not yet supported.'.format(mode))
def default_phoneme_tokenizer() -> PhonemeTokenizer: 'Returns a default LibriSpeech phoneme tokenizer.\n\n Returns:\n PhonemeTokenizer: Vocabs include 71 phonemes\n ' return PhonemeTokenizer.load_from_file(vocab_list=PHONEME_VOCAB)
def generate_basic_vocab(mode: str, text_list: List[str], vocab_size: int=(- 1), coverage: float=1.0, sort_vocab: bool=True) -> List[str]: 'Generates basic vocabularies, including character and word-based vocabularies.\n\n Args:\n mode (str): Vocabulary type (character or word).\n text_list (List[str]): List of text data.\n vocab_size (int, optional):\n Vocabulary size, if not specified, vocab_size would be `coverage * actual vocab size`. Defaults to -1.\n coverage (float, optional): Vocabulary coverage. Defaults to 1.0.\n sort_vocab (bool, optional): Sort vocabularies alphabetically. Defaults to True.\n\n Returns:\n List[str]: A list of vocabularies.\n ' assert (mode in {'character', 'word'}), mode assert ((vocab_size == (- 1)) or (vocab_size > 0)), vocab_size assert ((coverage > 0.0) and (coverage <= 1.0)), coverage logger.info(f'Generating vocab (type = {mode}, coverage = {coverage}) from {len(text_list)} sentences.') counter = Counter() for text in text_list: if (mode == 'character'): counter.update(text) if (mode == 'word'): counter.update(text.split()) if (vocab_size < 0): vocab_size = int((len(counter) * coverage)) else: vocab_size = min(vocab_size, len(counter)) if (vocab_size < len(counter)): vocab_list = sorted(counter.keys(), key=(lambda k: counter[k]), reverse=True) vocab_list = vocab_list[:vocab_size] else: vocab_list = list(counter.keys()) if sort_vocab: vocab_list = sorted(vocab_list) logger.info(f'Generated {vocab_size} {mode} vocabularies.') return vocab_list
def generate_subword_vocab(text_list: List[str]=None, text_file: str=None, output_file: str=None, vocab_size: int=1000, character_coverage: float=1.0) -> str: 'Generates subword vocabularies based on `sentencepiece`.\n\n Args:\n text_list (List[str], optional): List of text data. Defaults to None.\n text_file (str, optional): Path to text data. Defaults to None.\n output_file (str, optional): Path to save trained subword vocabularies. Defaults to "".\n vocab_size (int, optional): Vocabulary size. Defaults to 8000.\n character_coverage (float, optional): Coverage of characters in text data. Defaults to 1.0.\n\n Raises:\n ImportError: If `sentencepiece` is not installed.\n\n Returns:\n str: Path to `${output_file}.model`.\n ' try: import sentencepiece as splib except ImportError: raise ImportError('`sentencepiece` cannot be imported, please run `pip install sentencepiece` first') assert (output_file is not None) output_file = str(output_file) assert (vocab_size > 0), vocab_size cmd = '--input={} --model_prefix={} --model_type=unigram --vocab_size={} --character_coverage={} --pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 --eos_piece=<eos> --remove_extra_whitespaces=true ' if (text_list is not None): assert isinstance(text_list, list) assert isinstance(text_list[0], str) logger.info(f'Generating vocab (type = subword, coverage = {character_coverage}) from {len(text_list)} sentences.') with tempfile.TemporaryDirectory() as directory: input_file = os.path.join(directory, 'text.txt') with open(input_file, 'w') as fp: for text in text_list: fp.write((text + '\n')) cmd = cmd.format(input_file, output_file, vocab_size, character_coverage) splib.SentencePieceTrainer.Train(cmd) if (text_file is not None): logger.info(f'Generating vocab (type = subword, coverage = {character_coverage}) from {text_file}') cmd = cmd.format(text_file, output_file, vocab_size, character_coverage) splib.SentencePieceTrainer.Train(cmd) return (output_file + '.model')
def generate_vocab(mode: str, text_list: List[str]=None, text_file: str=None, read_lines: int=10000000, **vocab_args) -> Union[(List[str], str)]: 'Generates vocabularies given text data.\n\n Args:\n mode (str): Vocabulary type\n text_list (List[str], optional): List of text data. Defaults to None.\n text_file (str, optional): Path to text data. Defaults to None.\n read_lines (int, optional): Maximum lines to read from `text_file`. Defaults to 10000000.\n vocab_args:\n if :code:`mode != subword`, arguments for :obj:`generate_basic_vocab`\n if :code:`mode == subword`, arguments for :obj:`generate_subword_vocab`\n\n Returns:\n Union[List[str], str]: A list of vocabularies or a path to `.vocab` file.\n ' if ((text_list is None) and (mode in {'character', 'word', 'phoneme'})): assert isinstance(text_file, str) with open(text_file, 'r', encoding='UTF-8') as fp: text_list = [line.strip('\r\n ') for (i, line) in enumerate(fp) if (i < read_lines)] if (mode == 'character'): return generate_basic_vocab('character', text_list, **vocab_args) if (mode in {'word', 'phoneme'}): return generate_basic_vocab('word', text_list, **vocab_args) if (mode == 'subword'): return generate_subword_vocab(text_list=text_list, text_file=text_file, **vocab_args) else: raise ValueError(f'Unsupported mode (vocabulary type): {mode}')
class BalancedWeightedSampler(): '\n This batch sampler is always randomized, hence cannot be used for testing\n ' def __init__(self, labels: List[str], batch_size: int, duplicate: int=1, seed: int=12345678) -> None: self.epoch = 0 self.seed = seed self.batch_size = batch_size self.duplicate = duplicate class2weight = Counter() for label in labels: class2weight.update([label]) weights = [] for label in labels: weights.append((len(labels) / class2weight[label])) self.weights = weights def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self) -> Iterator[T_co]: generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) sampler = WeightedRandomSampler(self.weights, (len(self.weights) * self.duplicate), generator=generator) indices = list(sampler) batch = [] for indice in indices: batch.append(indice) if (len(batch) == self.batch_size): (yield batch) batch = [] if (len(batch) > 0): (yield batch) def __len__(self): return len(list(iter(self)))
class DistributedBatchSamplerWrapper(): def __init__(self, batch_sampler: BatchSampler, num_replicas: Optional[int]=None, rank: Optional[int]=None, allow_duplicates: bool=False, allow_uneven: bool=False) -> None: if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() if ((rank >= num_replicas) or (rank < 0)): raise ValueError('Invalid rank {}, rank should be in the interval [0, {}]'.format(rank, (num_replicas - 1))) self.batch_sampler = batch_sampler self.num_replicas = num_replicas self.rank = rank self.allow_duplicates = allow_duplicates self.allow_uneven = allow_uneven def __iter__(self) -> Iterator[T_co]: logger.info(f'Building distributed batch sampler for rank={self.rank}, world_size={self.num_replicas}') all_rank_batch_indices = list(iter(self.batch_sampler)) if ((len(all_rank_batch_indices) % self.num_replicas) == 0): target_batch_indices = all_rank_batch_indices else: num_to_halve = (self.num_replicas - (len(all_rank_batch_indices) % self.num_replicas)) flatten_batch_indices = deepcopy(all_rank_batch_indices) while (num_to_halve > 0): newly_flatten = [] all_cant_be_halved = True for indices in flatten_batch_indices: if ((num_to_halve > 0) and (len(indices) > 1)): (indices1, indices2) = (indices[:(len(indices) // 2)], indices[(len(indices) // 2):]) newly_flatten += [indices1, indices2] num_to_halve -= 1 all_cant_be_halved = False else: newly_flatten.append(indices) flatten_batch_indices = deepcopy(newly_flatten) if all_cant_be_halved: if self.allow_duplicates: logger.warning('To ensure all the dataloaders in different processes get the same number of batches. Some batches are duplicated. This must not happen during the evaluation stage.') flatten_batch_indices = (flatten_batch_indices + all_rank_batch_indices[:num_to_halve]) elif self.allow_uneven: logger.warning('Total batches will not be evenly distributed across the dataloaders in different processes. This must not happen during the training stage and can lead to hanging, while might be okay during the evaluation stage.') else: raise ValueError('The provided batch sampler cannot be safely wrapped for distributed training. Please try increase the number of indices in each batch. Or, allowing duplicated batches or uneven number of batches across dataloaders.') target_batch_indices = flatten_batch_indices if (not self.allow_uneven): assert ((len(target_batch_indices) % self.num_replicas) == 0) batch_indices = target_batch_indices[self.rank::self.num_replicas] return iter(batch_indices) def __len__(self) -> int: return len(list(iter(self))) def set_epoch(self, epoch: int) -> None: if hasattr(self.batch_sampler, 'set_epoch'): self.batch_sampler.set_epoch(epoch)
class FixedBatchSizeBatchSampler(): '\n The reduced timestamps for a batch should not exceed the max_timestamp.\n If shuffled, each indices are first shuffled before aggregated into batches\n\n Args:\n data_source: __len__ is implemented\n ' def __init__(self, data_source, batch_size: int, shuffle: bool=False, seed: int=12345678) -> None: self.batch_size = batch_size self.seed = seed self.shuffle = shuffle if shuffle: self.generator = torch.Generator() self.sampler = RandomSampler(data_source, generator=self.generator) else: self.sampler = SequentialSampler(data_source) def set_epoch(self, epoch: int) -> None: if self.shuffle: self.generator.manual_seed((self.seed + epoch)) def _evaluate_reduced_timestamps(self, batch_indices): return self.reduce_func([self.timestamps[indice] for indice in batch_indices]) def __iter__(self): batch_sampler = BatchSampler(self.sampler, batch_size=self.batch_size, drop_last=False) return iter(batch_sampler) def __len__(self): return len(list(iter(self)))
class GroupSameItemSampler(): def __init__(self, items: List[str]) -> None: self.indices = defaultdict(list) for (idx, item) in enumerate(items): self.indices[item].append(idx) self.epoch = 0 def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): for batch_indices in self.indices.values(): (yield batch_indices) def __len__(self): return len(list(iter(self)))
class MaxTimestampBatchSampler(): '\n The reduced timestamps for a batch should not exceed the max_timestamp.\n If shuffled, each indices are first shuffled before aggregated into batches\n ' def __init__(self, lengths: List[int], max_length: int, shuffle: bool=False, seed: int=12345678, reduce_func: callable=None) -> None: self.lengths = lengths self.max_length = max_length self.shuffle = shuffle self.seed = seed self.epoch = 0 self.reduce_func = (reduce_func or self._default_reduce_func) @staticmethod def _default_reduce_func(timestamps): return (max(timestamps) * len(timestamps)) def set_epoch(self, epoch: int): self.epoch = epoch def _evaluate_reduced_timestamps(self, batch_indices): return self.reduce_func([self.lengths[indice] for indice in batch_indices]) def __iter__(self): if self.shuffle: generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) indices = torch.randperm(len(self.lengths), generator=generator).tolist() else: indices = list(range(len(self.lengths))) batch = [] for indice in indices: try_new_batch = (batch + [indice]) if (self._evaluate_reduced_timestamps(try_new_batch) <= self.max_length): batch = try_new_batch elif (len(batch) == 0): raise ValueError(f'There is a single length {self.lengths[indice]} larger than max_length {self.max_length}. Please increase the max_length.') else: (yield batch) batch = [indice] if (len(batch) > 0): (yield batch) def __len__(self): return len(list(iter(self)))
class SortedSliceSampler(): '\n This sampler should only be used for training hence is always in random shuffle mode\n\n Args:\n lengths (List[int])\n batch_size (int): the default batch size\n max_length (int): if a batch contains at least on utt longer than max_length, half the batch\n get_length_func (callable): get the length of each item in the dataset, if None, a default function will be used\n in_batch_shuffle (bool): if False, batches are sorted by length from long to short\n ' def __init__(self, lengths: List[int], batch_size: int, max_length: int=300000, seed: int=12345678, in_batch_shuffle: bool=False) -> None: self.lengths = lengths self.epoch = 0 self.seed = seed self.batch_size = batch_size self.max_length = max_length self.in_batch_shuffle = in_batch_shuffle sorted_ids = [(idx, length) for (idx, length) in enumerate(lengths)] sorted_ids = sorted(sorted_ids, key=(lambda x: x[1]), reverse=True) self.sorted_ids = [data_id for (data_id, length) in sorted_ids] def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) indices = torch.randperm(len(self.lengths), generator=generator).tolist() for indice in indices: length = self.lengths[indice] if (length > self.max_length): batch_size = (self.batch_size // 2) else: batch_size = self.batch_size start_position = self.sorted_ids.index(indice) batch = self.sorted_ids[start_position:(start_position + batch_size)] if self.in_batch_shuffle: inbatch_indices = torch.randperm(len(batch), generator=generator).tolist() batch = [batch[idx] for idx in inbatch_indices] (yield batch) def __len__(self): return len(list(iter(self)))
class SortedBucketingSampler(): '\n Args:\n lengths (List[int])\n batch_size (int): the default batch size\n max_length (int): if a batch contains at least on utt longer than max_length, half the batch\n get_length_func (callable): get the length of each item in the dataset, if None, a default function will be used\n shuffle (bool): Whether to shuffle the batches\n in_batch_shuffle (bool): if False, batches are sorted by length from long to short\n ' def __init__(self, lengths: List[int], batch_size: int, max_length: int=300000, shuffle: bool=False, in_batch_shuffle: bool=False, seed: int=12345678) -> None: self.epoch = 0 self.seed = seed self.batch_size = batch_size self.max_length = max_length self.shuffle = shuffle self.in_batch_shuffle = in_batch_shuffle self.lengths = lengths sorted_ids = [(idx, length) for (idx, length) in enumerate(self.lengths)] sorted_ids = sorted(sorted_ids, key=(lambda x: x[1]), reverse=True) self.sorted_ids = [data_id for (data_id, length) in sorted_ids] def set_epoch(self, epoch: int): self.epoch = epoch def __iter__(self): generator = torch.Generator() generator.manual_seed((self.epoch + self.seed)) batches = [] position = 0 while (position < len(self.sorted_ids)): indice = self.sorted_ids[position] length = self.lengths[indice] if (length > self.max_length): batch_size = (self.batch_size // 2) else: batch_size = self.batch_size batch = self.sorted_ids[position:min((position + batch_size), len(self.sorted_ids))] position += batch_size if self.in_batch_shuffle: shuffled_batch_indices = torch.randperm(len(batch), generator=generator) batch = [batch[idx] for idx in shuffled_batch_indices] batches.append(batch) if self.shuffle: shuffled_indices = torch.randperm(len(batches), generator=generator) batches = [batches[idx] for idx in shuffled_indices] return iter(batches) def __len__(self): return len(list(iter(self)))
class AugmentedDynamicItemDataset(DynamicItemDataset): def __init__(self, data, dynamic_items=[], output_keys=[], tools: dict={}): super().__init__(data, dynamic_items, output_keys) assert isinstance(data, OrderedDict) self._tools = {} for (name, item) in tools.items(): self.add_tool(name, item) def _dynamic_tools(self, id, name): return self._tools[name] def add_tool(self, name: str, item: Any) -> None: '\n Store the :code:`item` in this dataset with the name :code:`name` so it can be used in\n :code:`__getitem__`. That is, you can retrieve the :code:`item` with the :code:`takes` argument\n of :obj:`add_dynamic_item`.\n\n .. code-block:: python\n\n def tokenize_func(tokenizer, text):\n return tokenizer(text)\n\n self.add_tool("tokenizer", tokenizer)\n self.add_dynamic_item(tokenize_func, takes=["tokenizer", "text"], provides="tokenized_ids")\n\n You can also later retreive this tool by :obj:`get_tool` or :obj:`all_tools`\n ' self._tools[name] = item self.add_dynamic_item(partial(self._dynamic_tools, name=name), takes='id', provides=name) def add_tools(self, tools: dict) -> None: '\n Store each key-value pair in :code:`tools` as a tool. See :obj:`add_tool` for more information\n ' for (key, value) in tools.items(): self.add_tool(key, value) def get_tool(self, key) -> Any: '\n See :obj:`add_tool` for more information\n ' return self._tools[key] def has_tool(self, key) -> bool: '\n Checks whether has a tool named :code:`key`.\n ' return (key in self._tools) def all_tools(self, copy=True) -> dict: '\n Return:\n dict\n\n Containing all the tools in :code:`name: value` pairs.\n See :obj:`add_tool` for more information\n ' return (deepcopy(self._tools) if copy else self._tools) def update_output_keys(self, keys: dict) -> None: '\n Compared to :obj:`set_output_keys`, this method update the output keys mapping\n instead of replace it with a new dictionary. This can be useful when you only\n want to replace a few mapping and leave others unchanged.\n ' mapping = self.pipeline.output_mapping.copy() mapping.update(keys) self.set_output_keys(mapping) def keys(self) -> List[str]: '\n List all the :code:`static_item` and :code:`dynamic_item` in the dataset.\n :code:`static_item` resides directly in the memory and are given by the dataset\n initialization dictionary. :code:`dynamic_item` are content computed\n on-the-fly basing on :code:`static_item`.\n ' available_keys: List[str] = list(self.pipeline.key_to_node.keys()) for dynamic_item in self.pipeline.dynamic_items: provides = dynamic_item.provides assert isinstance(provides, (list, tuple)) available_keys += provides available_keys = [key for key in available_keys if ((not key.startswith('_')) and (key not in self._tools))] return available_keys def set_info(self, info): self._info = info def get_info(self, index): with self.output_keys_as(self._info): return self.__getitem__(index) def __getitem__(self, index): '\n This remain all the usage of the original SpeechBrain DynamicItemDataset.__getitem__,\n except that by default it uses :obj:`keys` as the default :code:`output_keys`\n ' if (len(self.pipeline.output_mapping) == 0): with self.output_keys_as(self.keys()): return super().__getitem__(index) else: return super().__getitem__(index)
class DataPipe(): def __call__(self, dataset: Union[(dict, AugmentedDynamicItemDataset)], tools: dict=None) -> Any: if isinstance(dataset, dict): dataset = AugmentedDynamicItemDataset(dataset) if (tools is not None): dataset.add_tools(tools) return self.forward(dataset) def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: raise NotImplementedError def __getattribute__(self, name): value = super().__getattribute__(name) if isinstance(value, DynamicItem): value.func = value.func.__get__(self) return value
class SequentialDataPipe(DataPipe): def __init__(self, *pipes: List[DataPipe]) -> None: self._pipes = pipes def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: for pipe in self._pipes: dataset = pipe(dataset) return dataset
def default_collate_fn(samples, padding_value: int=0): '\n Each item in **DynamicItemDataset** is a dict\n This function pad (or transform into numpy list) a batch of dict\n\n Args:\n samples (List[dict]): Suppose each Container is in\n\n .. code-block:: yaml\n\n wav: a single waveform\n label: a single string\n\n Return:\n dict\n\n .. code-block:: yaml\n\n wav: padded waveforms\n label: np.array([a list of string labels])\n ' assert isinstance(samples[0], dict) keys = samples[0].keys() padded_samples = dict() for key in keys: values = [sample[key] for sample in samples] if isinstance(values[0], int): values = torch.LongTensor(values) elif isinstance(values[0], float): values = torch.FloatTensor(values) elif isinstance(values[0], np.ndarray): values = [torch.from_numpy(value).float() for value in values] values = pad_sequence(values, batch_first=True, padding_value=padding_value) elif isinstance(values[0], torch.Tensor): values = pad_sequence(values, batch_first=True, padding_value=padding_value) else: values = np.array(values, dtype='object') padded_samples[key] = values return padded_samples
def _count_frames(data_len, size, step): return int((((data_len - size) + step) / step))
def _gen_frame_indices(data_length, size=2000, step=2000, use_last_samples=True): i = (- 1) for i in range(_count_frames(data_length, size, step)): (yield ((i * step), ((i * step) + size))) if (use_last_samples and (((i * step) + size) < data_length)): if ((data_length - ((i + 1) * step)) > 0): (yield (((i + 1) * step), data_length))
@dataclass class UnfoldChunkByFrame(DataPipe): "\n Given a dataset with items containing `start_sec_name` and `end_sec_name`.\n For each item, produce `((end_sec_name - start_sec_name) * sample_rate / feat_frame_shift) / chunk_frames`\n items with the smaller durations between `min_chunk_frames` and `max_chunk_frames`\n\n Args:\n sample_rate (int): The sample_rate of the audio.\n feat_frame_shift (int): The target feature's frame_shift\n min_chunk_frames (int): The min frames for a chunk\n max_chunk_frames (int): The max frames for a chunk\n step_frames (int): The step frames for each sliding window\n use_last_samples (bool): whether to drop the last samples of an utterance which cannot form an window\n start_sec_name (str): The key name for the starting second of the audio\n end_sec_name (str): The key name for the ending second of the audio\n " sample_rate: int = 16000 feat_frame_shift: int = 160 min_chunk_frames: int = 2000 max_chunk_frames: int = 2000 step_frames: int = 2000 use_last_samples: bool = True start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: unfolded_items = OrderedDict() for item in dataset: key = item.pop('id') data_len = int((((item[self.end_sec_name] - item[self.start_sec_name]) * self.sample_rate) / self.feat_frame_shift)) for (unfold_index, (start, end)) in enumerate(_gen_frame_indices(data_len, self.min_chunk_frames, self.step_frames)): start_sec = (item[self.start_sec_name] + ((start * self.feat_frame_shift) / self.sample_rate)) end_sec = (item[self.start_sec_name] + ((end * self.feat_frame_shift) / self.sample_rate)) dur_sec = (end_sec - start_sec) utt_id = f'{key}_start-{start_sec}_end-{end_sec}_dur-{dur_sec}' subitem = deepcopy(item) subitem['unchunked_id'] = key subitem['chunk_index'] = unfold_index subitem[self.start_sec_name] = start_sec subitem[self.end_sec_name] = end_sec unfolded_items[utt_id] = subitem new_dataset = AugmentedDynamicItemDataset(unfolded_items) new_dataset.add_tools(dataset.all_tools(False)) return new_dataset
@dataclass class UnfoldChunkBySec(DataPipe): use_last_samples: bool = True min_chunk_secs: float = 2.5 max_chunk_secs: float = 2.5 step_secs: int = 2.5 start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: unfolded_items = OrderedDict() for item in dataset: key = item.pop('id') for (unfold_index, (start, end)) in enumerate(_gen_frame_indices((item[self.end_sec_name] - item[self.start_sec_name]), self.min_chunk_secs, self.step_secs)): start_sec = (item[self.start_sec_name] + start) end_sec = (item[self.start_sec_name] + end) dur_sec = (end_sec - start_sec) utt_id = f'{key}_start-{start_sec}_end-{end_sec}_dur-{dur_sec}' subitem = deepcopy(item) subitem['unchunked_id'] = key subitem['chunk_index'] = unfold_index subitem[self.start_sec_name] = start_sec subitem[self.end_sec_name] = end_sec unfolded_items[utt_id] = subitem new_dataset = AugmentedDynamicItemDataset(unfolded_items) new_dataset.add_tools(dataset.all_tools(False)) return new_dataset
class SetOutputKeys(DataPipe): def __init__(self, output_keys: dict=None) -> None: super().__init__() self.output_keys = output_keys def forward(self, dataset: AugmentedDynamicItemDataset): dataset.update_output_keys(self.output_keys) return dataset
@dataclass class LoadAudio(DataPipe): audio_sample_rate: int = 16000 audio_channel_reduction: str = 'first' sox_effects: list = None wav_path_name: str = 'wav_path' wav_name: str = 'wav' start_sec_name: str = 'start_sec' end_sec_name: str = 'end_sec' def load_audio(self, wav_path, start_sec: float=None, end_sec: float=None): crop_segment = ((start_sec is not None) and (end_sec is not None)) torchaudio.set_audio_backend('sox_io') (wav, sr) = torchaudio.load(wav_path, frame_offset=(round((start_sec * self.audio_sample_rate)) if crop_segment else 0), num_frames=(round(((end_sec - start_sec) * self.audio_sample_rate)) if crop_segment else (- 1))) if (self.sox_effects is not None): (wav, sr) = torchaudio.sox_effects.apply_effects_tensor(wav, sr, effects=self.sox_effects) if (sr != self.audio_sample_rate): resampler = torchaudio.transforms.Resample(sr, self.audio_sample_rate) wav = resampler(wav) if (self.audio_channel_reduction == 'first'): wav = wav[0] elif (self.audio_channel_reduction == 'mean'): wav = wav.mean(dim=0) wav = wav.view((- 1), 1) return wav def compute_length(self, wav): return len(wav) def forward(self, dataset: AugmentedDynamicItemDataset): item = dataset[0] if ((self.start_sec_name in item) and (self.end_sec_name in item)): crop_segment = True else: crop_segment = False if (not crop_segment): dataset.add_dynamic_item(self.load_audio, takes=self.wav_path_name, provides=self.wav_name) else: dataset.add_dynamic_item(self.load_audio, takes=[self.wav_path_name, self.start_sec_name, self.end_sec_name], provides=self.wav_name) dataset.add_dynamic_item(self.compute_length, takes=self.wav_name, provides=f'{self.wav_name}_len') return dataset
@dataclass class EncodeCategory(DataPipe): train_category_encoder: bool = False label_name: str = 'label' category_encoder_name: str = 'category' encoded_target_name: str = 'class_id' def prepare_category(self, labels): return CategoryEncoder(sorted(list(set(labels)))) def encode_label(self, category, label): return category.encode(label) def forward(self, dataset: AugmentedDynamicItemDataset): if self.train_category_encoder: with dataset.output_keys_as([self.label_name]): labels = [item[self.label_name] for item in dataset] category = self.prepare_category(labels) dataset.add_tool(self.category_encoder_name, category) category = dataset.get_tool(self.category_encoder_name) dataset.add_tool('output_size', len(category)) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class EncodeMultipleCategory(EncodeCategory): train_category_encoder: bool = False label_name: str = 'labels' category_encoder_name: str = 'categories' encoded_target_name: str = 'class_ids' def encode_label(self, categories, labels): return torch.LongTensor([category.encode(label) for (category, label) in zip(categories, labels)]) def forward(self, dataset: AugmentedDynamicItemDataset): if self.train_category_encoder: with dataset.output_keys_as([self.label_name]): labels = [item[self.label_name] for item in dataset] label_types = list(zip(*labels)) categories = [self.prepare_category(label_type) for label_type in label_types] dataset.add_tool(self.category_encoder_name, categories) dataset.add_tool('output_size', sum([len(c) for c in categories])) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class EncodeMultiLabel(DataPipe): label_name: str = 'labels' category_encoder_name: str = 'category' encoded_target_name: str = 'binary_labels' @staticmethod def label_to_binary_vector(label: List, num_labels: int) -> torch.Tensor: if (len(label) == 0): binary_labels = torch.zeros((num_labels,), dtype=torch.float) else: binary_labels = torch.zeros((num_labels,)).scatter(0, torch.tensor(label), 1.0) assert (set(torch.where((binary_labels == 1.0))[0].numpy()) == set(label)) return binary_labels def encode_label(self, category, labels): labels = [category.encode(label) for label in labels] binary_labels = self.label_to_binary_vector(labels, len(category)) return binary_labels def forward(self, dataset: AugmentedDynamicItemDataset): if (not dataset.has_tool(self.category_encoder_name)): with dataset.output_keys_as([self.label_name]): all_labels = [] for item in dataset: all_labels.extend(item[self.label_name]) all_labels.sort() all_labels = set(all_labels) category = CategoryEncoder(all_labels) dataset.add_tool(self.category_encoder_name, category) category = dataset.get_tool(self.category_encoder_name) dataset.add_tool('output_size', len(category)) dataset.add_dynamic_item(self.encode_label, takes=[self.category_encoder_name, self.label_name], provides=self.encoded_target_name) return dataset
@dataclass class GenerateTokenizer(DataPipe): generate: bool = True tokenizer_name: str = 'tokenizer' text_name: str = 'transcription' vocab_type: str = 'character' text_file: str = None vocab_file: str = None slots_file: str = None vocab_args: dict = None def prepare_tokenizer(self, text_list: str=None) -> Tokenizer: 'Generates tokenizer from text data.\n\n Args:\n text_list (str, optional): List of text. Defaults to None.\n\n Returns:\n Tokenizer: Generated tokenizer\n ' vocab_args = (self.vocab_args or {}) assert isinstance(vocab_args, dict) if (text_list is not None): vocab_result = generate_vocab(self.vocab_type, text_list=text_list, **vocab_args) else: vocab_result = generate_vocab(self.vocab_type, text_file=self.text_file, **vocab_args) vocab_list = (vocab_result if isinstance(vocab_result, list) else None) vocab_file = (vocab_result if isinstance(vocab_result, str) else None) tokenizer = load_tokenizer(self.vocab_type, vocab_file=vocab_file, vocab_list=vocab_list, slots_file=self.slots_file) return tokenizer def forward(self, dataset: AugmentedDynamicItemDataset): try: tokenizer = dataset.get_tool(self.tokenizer_name) logger.info(f'Tokenizer (name = {self.tokenizer_name}) exists in dataset, skip generation.') except KeyError: if self.generate: if ((self.vocab_file is not None) and os.path.exists(self.vocab_file)): tokenizer = load_tokenizer(self.vocab_type, vocab_file=self.vocab_file, slots_file=self.slots_file) else: text_list = None if (self.text_file is None): with dataset.output_keys_as([self.text_name]): text_list = [item[self.text_name] for item in dataset] tokenizer = self.prepare_tokenizer(text_list) dataset.add_tool(self.tokenizer_name, tokenizer) else: logger.warning('No tokenizer is found or generated. No-op for this DataPipe') return dataset
@dataclass class EncodeText(DataPipe): text_name: str = 'transcription' output_text_name: str = 'tokenized_text' tokenizer_name: str = 'tokenizer' def encode_text(self, tokenizer: Tokenizer, text: str) -> torch.LongTensor: return torch.LongTensor(tokenizer.encode(text)) def forward(self, dataset: AugmentedDynamicItemDataset): try: tokenizer = dataset.get_tool(self.tokenizer_name) except KeyError: raise KeyError(f'Tokenizer (name = {self.tokenizer_name}) not found!') dataset.add_dynamic_item(self.encode_text, takes=[self.tokenizer_name, self.text_name], provides=self.output_text_name) dataset.add_tool('output_size', tokenizer.vocab_size) return dataset
@dataclass class Phonemize(DataPipe): text_name: str = 'transcription' phonemized_text_name: str = 'phonemized_text' output_text_name: str = 'tokenized_text' g2p_name: str = 'g2p' tokenizer_name: str = 'tokenizer' def grapheme2phoneme(self, g2p: G2P, text: str) -> str: return g2p.encode(text) def encode_text(self, tokenizer: Tokenizer, text: str) -> torch.LongTensor: return torch.LongTensor(tokenizer.encode(text)) def forward(self, dataset: AugmentedDynamicItemDataset): if (not dataset.has_tool(self.g2p_name)): logger.warning(f'Cannot find {self.g2p_name} in dataset, use default G2P instead.') dataset.add_tool(self.g2p_name, G2P()) if (not dataset.has_tool(self.tokenizer_name)): logger.warning(f'Cannot find {self.tokenizer_name} in dataset, use default tokenizer instead.') dataset.add_tool(self.tokenizer_name, default_phoneme_tokenizer()) dataset.add_dynamic_item(self.grapheme2phoneme, takes=[self.g2p_name, self.text_name], provides=self.phonemized_text_name) dataset.add_dynamic_item(self.encode_text, takes=[self.tokenizer_name, self.phonemized_text_name], provides=self.output_text_name) tokenizer = dataset.get_tool(self.tokenizer_name) dataset.add_tool('output_size', tokenizer.vocab_size) return dataset
@dataclass class RandomCrop(DataPipe): '\n Completely randomized for every batch even with the same datapoint id.\n Only suitable for training.\n ' sample_rate: int = 16000 max_secs: float = None wav_name: str = 'wav' crop_name: str = 'wav_crop' def crop_wav(self, wav): if ((self.max_secs is not None) and (wav.size(0) > (self.max_secs * self.sample_rate))): start = random.randint(0, (wav.size(0) - (self.max_secs * self.sample_rate))) end = (start + (self.max_secs * self.sample_rate)) wav = wav[round(start):round(end)] return (wav, wav.size(0)) def forward(self, dataset: AugmentedDynamicItemDataset) -> AugmentedDynamicItemDataset: dataset.add_dynamic_item(self.crop_wav, takes=[self.wav_name], provides=[self.crop_name, f'{self.crop_name}_len']) return dataset
@dataclass class ExtractKaldiFeat(DataPipe): kaldi: dict = None delta: dict = None cmvn: dict = None wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n kaldi (dict): args for the kaldi extracter\n delta (dict): args for applying delta on features\n cmvn (dict): args for applying cmvn on features\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'kaldi': self.kaldi, 'delta': self.delta, 'cmvn': self.cmvn} (extracter, feat_dim, frame_shift) = kaldi_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_tool('frame_shift', frame_shift) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractOnlineFeat(DataPipe): win_ms: int = 25 hop_ms: int = 10 n_freq: int = 201 n_mels: int = 80 n_mfcc: int = 13 input: dict = None target: dict = None wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute + unsqueeze ->\n (1, 1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' wav = wav.permute(1, 0).unsqueeze(0) feat = extracter(wav)[0][0] return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'win_ms': self.win_ms, 'hop_ms': self.hop_ms, 'n_freq': self.n_freq, 'n_mels': self.n_mels, 'n_mfcc': self.n_mfcc, 'input': self.input, 'target': self.target} (extracter, feat_dim, _) = online_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractApcFeat(DataPipe): feat_type: str = 'fbank' feat_dim: int = 80 frame_length: int = 25 frame_shift: int = 10 decode_wav: bool = False cmvn: bool = True wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute ->\n (1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav.permute(1, 0)) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'feat_type': self.feat_type, 'feat_dim': self.feat_dim, 'frame_length': self.frame_length, 'frame_shift': self.frame_shift, 'decode_wav': self.decode_wav, 'cmvn': self.cmvn} (extracter, feat_dim) = apc_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
@dataclass class ExtractNpcFeat(DataPipe): feat_type: str = 'fbank' feat_dim: int = 80 frame_length: int = 25 frame_shift: int = 10 decode_wav: bool = False cmvn: bool = True wav_name: str = 'wav' feat_name: str = 'feat' '\n Args:\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n wav_name (str): handle for the `takes` (input)\n feat_name (str): handle for the `provides` (output)\n ' def extract_feat(self, extracter, wav): '\n (wav_seq_len, 1) -> permute ->\n (1, wav_seq_len) -> extracter -> (feat_seq_len, feat_dim)\n ' feat = extracter(wav.permute(1, 0)) return feat def __call__(self, dataset: AugmentedDynamicItemDataset): _audio_config = {'feat_type': self.feat_type, 'feat_dim': self.feat_dim, 'frame_length': self.frame_length, 'frame_shift': self.frame_shift, 'decode_wav': self.decode_wav, 'cmvn': self.cmvn} (extracter, feat_dim) = npc_feat_extracter(_audio_config) dataset.add_tool('extracter', extracter) dataset.add_tool('feat_dim', feat_dim) dataset.add_dynamic_item(self.extract_feat, takes=['extracter', self.wav_name], provides=self.feat_name) return dataset
class HearTimestampDatapipe(SequentialDataPipe): def __init__(self, sample_rate: int=16000, feat_frame_shift: int=160): super().__init__(UnfoldChunkBySec(min_chunk_secs=4.0, max_chunk_secs=4.0, step_secs=4.0), LoadAudio(audio_sample_rate=sample_rate), BuildMultiClassTagging(sample_rate=sample_rate, feat_frame_shift=feat_frame_shift, intra_or_inter='inter', all_category_name='category'), SetOutputKeys(dict(x='wav', x_len='wav_len', y='multiclass_tag', y_len='tag_len', record_id='unchunked_id', chunk_id='chunk_index')))
@dataclass class NoiseAugmentation(DataPipe): noise_proportion: float = 0.0 input_feat_name: str = 'input_feat' output_feat_name: str = 'output_feat' '\n Args:\n noise_proportion (float): for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise\n input_feat_name (str): handle for the `takes` (input)\n output_feat_name (str): handle for the `provides` (output)\n ' def apply_noise_on_data(self, input_feat): with torch.no_grad(): if (self.noise_proportion > 0): noised_feat = copy.deepcopy(input_feat) dice = random.random() if (dice < self.noise_proportion): noise_sampler = torch.distributions.Normal(0, 0.2) noised_feat += noise_sampler.sample(noised_feat.shape) noised_feat = noised_feat.to(dtype=torch.float32) return noised_feat else: return input_feat def __call__(self, dataset: AugmentedDynamicItemDataset): dataset.add_dynamic_item(self.apply_noise_on_data, takes=self.input_feat_name, provides=self.output_feat_name) return dataset
@dataclass class NormWavDecibel(DataPipe): target_level: int = (- 25) wav_name: str = 'wav' norm_wav_name: str = 'wav' '\n Args:\n target_level (int): normalize the wav decibel level to the target value\n wav_name (str): handle for the `takes` (input)\n norm_wav_name (str): handle for the `provides` (output)\n ' def normalize_wav_decibel(self, wav): wav = wav.squeeze() if (self.target_level == 0): return wav rms = wav.pow(2).mean().pow(0.5) scalar = ((10 ** (self.target_level / 20)) / (rms + 1e-10)) wav = (wav * scalar) return wav.unsqueeze(1) def __call__(self, dataset: AugmentedDynamicItemDataset): dataset.add_dynamic_item(self.normalize_wav_decibel, takes=self.wav_name, provides=self.norm_wav_name) return dataset
class PretrainApcPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, n_future: int=5, feat_type: str='fbank', feat_dim: int=80, frame_length: int=25, frame_shift: int=10, decode_wav: bool=False, cmvn: bool=True, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n n_future (int): number of future steps for the autoregressive predictive task\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='source_feat', label='target_feat', x_len='feat_len', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractApcFeat(feat_type=feat_type, feat_dim=feat_dim, frame_length=frame_length, frame_shift=frame_shift, decode_wav=decode_wav, cmvn=cmvn, feat_name='source_feat'), AutoregressivePrediction(n_future=n_future, source_feat_name='source_feat', target_feat_name='target_feat', source_feat_len_name='feat_len'), SetOutputKeys(output_keys=output_keys))
class PretrainAudioAlbertPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, win_ms: int=25, hop_ms: int=10, n_freq: int=201, n_mels: int=80, n_mfcc: int=13, input: dict={'channel': 0, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target: dict={'channel': 1, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target_level: int=(- 25), audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target_level (int): normalize the wav decibel level to the target value\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), NormWavDecibel(target_level=target_level), ExtractOnlineFeat(win_ms=win_ms, hop_ms=hop_ms, n_freq=n_freq, n_mels=n_mels, n_mfcc=n_mfcc, input=input, target=target, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='source_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class PretrainMockingjayPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, kaldi: dict={'feat_type': 'fbank', 'fbank': {'frame_length': 25.0, 'frame_shift': 10.0, 'num_mel_bins': 80, 'use_log_fbank': True}, 'mfcc': {'frame_length': 25.0, 'frame_shift': 10.0, 'num_ceps': 13}, 'spectrogram': {'frame_length': 25.0, 'frame_shift': 10.0}}, delta: dict={'order': 2, 'win_length': 5}, cmvn: dict={'use_cmvn': True}, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n kaldi (dict): args for the kaldi extracter\n delta (dict): args for applying delta on features\n cmvn (dict): args for applying cmvn on features\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractKaldiFeat(kaldi=kaldi, delta=delta, cmvn=cmvn, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='source_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class PretrainNpcPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, feat_type: str='fbank', feat_dim: int=80, frame_length: int=25, frame_shift: int=10, decode_wav: bool=False, cmvn: bool=True, audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n feat_type (str): feature type\n feat_dim (int): feature dimension\n frame_length (int): window size in ms\n frame_shift (int): hop size in ms\n decode_wav (bool): whether to decode wav\n cmvn (bool): whether to apply uttr.-wised CMVN on feature\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='source_feat', label='target_feat', label_mask='label_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), ExtractNpcFeat(feat_type=feat_type, feat_dim=feat_dim, frame_length=frame_length, frame_shift=frame_shift, decode_wav=decode_wav, cmvn=cmvn, feat_name='source_feat'), LabelMaskFromLen(target_feat_name='target_feat', label_mask_name='label_mask'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), SetOutputKeys(output_keys=output_keys))
class PretrainTeraPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n ' def __init__(self, output_keys: dict=None, position_encoding_size: int=768, mask_proportion: float=0.15, mask_consecutive_min: int=7, mask_consecutive_max: int=7, mask_allow_overlap: bool=True, mask_bucket_ratio: float=1.5, mask_frequency: int=0.2, noise_proportion: float=0.0, win_ms: int=25, hop_ms: int=10, n_freq: int=201, n_mels: int=80, n_mfcc: int=13, input: dict={'channel': 0, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target: dict={'channel': 1, 'cmvn': True, 'delta': 0, 'feat_type': 'mel', 'log': True}, target_level: int=(- 25), audio_sample_rate: int=16000, audio_channel_reduction: str='first', n_jobs: int=6): '\n Args:\n output_keys (dict): args for the output handle\n position_encoding_size (int): this should be identical to `hidden_size`\n mask_proportion (float): mask this percentage of all spectrogram frames in each sequence at random during MAM training\n mask_consecutive_min (int): mask this amount of consecutive frames\n mask_consecutive_max (int): mask this amount of consecutive frames\n mask_allow_overlap (bool): allow overlap masking\n mask_bucket_ratio (float): only used when overlap is not allowed. sample a mask from each bucket in size of [sampled mask_consecutive * mask_bucket_ratio]\n mask_frequency (float): mask maximum this percentage of frequency bands, set to 0 for no frequency mask\n noise_proportion (float): for this percentage of the time, Gaussian noise will be applied on all frames during MAM training, set to 0 for no noise\n win_ms (int): window size in ms\n hop_ms (int): hop size in ms\n n_freq (int): number of frequency bins\n n_mels (int): number of mel features\n n_mfcc (int): number of mfcc features\n input (dict): args for the input feat, example - {"channel": 0, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target (dict): args for the output feat, example - {"channel": 1, "cmvn": True, "delta": 0, "feat_type": "mel", "log": True,}\n target_level (int): normalize the wav decibel level to the target value\n audio_sample_rate (int): audio sample rate\n audio_channel_reduction (str): "first" channel\n n_jobs (int): number of workers\n ' output_keys = (output_keys or dict(x='masked_feat', label='target_feat', label_mask='label_mask', position_encoding='pos_enc', attention_mask='attn_mask', unique_name='id')) super().__init__(LoadAudio(n_jobs=n_jobs, audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), NormWavDecibel(target_level=target_level), ExtractOnlineFeat(win_ms=win_ms, hop_ms=hop_ms, n_freq=n_freq, n_mels=n_mels, n_mfcc=n_mfcc, input=input, target=target, feat_name='source_feat'), PrepareTargetFeat(use_copy=True, source_feat_name='source_feat', target_feat_name='target_feat'), NoiseAugmentation(noise_proportion=noise_proportion, input_feat_name='source_feat', output_feat_name='noised_feat'), MaskedReconstruction(position_encoding_size=position_encoding_size, mask_proportion=mask_proportion, mask_consecutive_min=mask_consecutive_min, mask_consecutive_max=mask_consecutive_max, mask_allow_overlap=mask_allow_overlap, mask_bucket_ratio=mask_bucket_ratio, mask_frequency=mask_frequency, source_feat_name='noised_feat', target_feat_name='target_feat', masked_feat_name='masked_feat', pos_enc_name='pos_enc', attn_mask_name='attn_mask', label_mask_name='label_mask'), SetOutputKeys(output_keys=output_keys))
class SpeakerVerificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n label: str\n ' def __init__(self, audio_sample_rate: int=16000, audio_channel_reduction: str='first', random_crop_secs: float=(- 1), sox_effects: List[List]=None): pipes = [LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects)] output_keys = dict(x='wav', x_len='wav_len', label='label', unique_name='id') if (random_crop_secs != (- 1)): pipes.append(RandomCrop(sample_rate=audio_sample_rate, max_secs=random_crop_secs)) output_keys['x'] = 'wav_crop' output_keys['x_len'] = 'wav_crop_len' pipes.append(SetOutputKeys(output_keys)) super().__init__(*pipes)
class Speech2PhonemePipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n transcription: str\n ' def __init__(self): output_keys = dict(x='wav', x_len='wav_len', labels='phonemized_text', class_ids='tokenized_text', unique_name='id') super().__init__(LoadAudio(), Phonemize(), SetOutputKeys(output_keys=output_keys))
class Speech2TextPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n transcription: str\n ' def __init__(self, generate_tokenizer: bool=False, vocab_type: str='character', text_file: str=None, vocab_file: str=None, slots_file: str=None, vocab_args: dict=None): output_keys = dict(x='wav', x_len='wav_len', labels='transcription', class_ids='tokenized_text', unique_name='id') super().__init__(LoadAudio(), GenerateTokenizer(generate=generate_tokenizer, vocab_type=vocab_type, text_file=text_file, vocab_file=vocab_file, slots_file=slots_file, vocab_args=vocab_args), EncodeText(), SetOutputKeys(output_keys=output_keys))
class UtteranceClassificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n label: str\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first', sox_effects: list=None, train_category_encoder: bool=False): output_keys = (output_keys or dict(x='wav', x_len='wav_len', class_id='class_id', label='label', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects), EncodeCategory(train_category_encoder=train_category_encoder), SetOutputKeys(output_keys=output_keys))
class UtteranceMultipleCategoryClassificationPipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n labels: List[str]\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first', sox_effects: list=None, train_category_encoder: bool=False): output_keys = (output_keys or dict(x='wav', x_len='wav_len', class_ids='class_ids', labels='labels', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction, sox_effects=sox_effects), EncodeMultipleCategory(train_category_encoder=train_category_encoder), SetOutputKeys(output_keys=output_keys))
class HearScenePipe(SequentialDataPipe): '\n each item in the input dataset should have:\n wav_path: str\n labels: List[str]\n ' def __init__(self, output_keys: dict=None, audio_sample_rate: int=16000, audio_channel_reduction: str='first'): output_keys = (output_keys or dict(x='wav', x_len='wav_len', y='binary_labels', labels='labels', unique_name='id')) super().__init__(LoadAudio(audio_sample_rate=audio_sample_rate, audio_channel_reduction=audio_channel_reduction), EncodeMultiLabel(), SetOutputKeys(output_keys=output_keys))
def generate_eval_pairs(file_list, train_file_list, eval_data_root, num_samples): X = [] for trgspk in TRGSPKS_TASK1: spk_file_list = [] for number in train_file_list: wav_path = os.path.join(eval_data_root, trgspk, (number + '.wav')) if os.path.isfile(wav_path): spk_file_list.append(wav_path) for srcspk in SRCSPKS: for number in file_list: random.shuffle(spk_file_list) pair = [os.path.join(eval_data_root, srcspk, (number + '.wav'))] pair.extend(spk_file_list[:num_samples]) X.append(pair) return X
class VCTK_VCC2020Dataset(Dataset): def __init__(self, split, trdev_data_root, eval_data_root, spk_embs_root, lists_root, eval_lists_root, fbank_config, spk_emb_source, num_ref_samples, train_dev_seed=1337, **kwargs): super(VCTK_VCC2020Dataset, self).__init__() self.split = split self.fbank_config = fbank_config self.spk_emb_source = spk_emb_source self.spk_embs_root = spk_embs_root os.makedirs(spk_embs_root, exist_ok=True) X = [] if ((split == 'train') or (split == 'dev')): file_list = open(os.path.join(lists_root, (split + '_list.txt'))).read().splitlines() for fname in file_list: (spk, number) = fname.split('_') wav_path = os.path.join(trdev_data_root, spk, (fname + '.wav')) X.append(wav_path) random.seed(train_dev_seed) random.shuffle(X) elif (split == 'test'): for num_samples in num_ref_samples: eval_pair_list_file = os.path.join(lists_root, 'eval_{}sample_list.txt'.format(num_samples)) if os.path.isfile(eval_pair_list_file): print('[Dataset] eval pair list file exists: {}'.format(eval_pair_list_file)) with open(eval_pair_list_file, 'r') as f: lines = f.read().splitlines() X += [line.split(',') for line in lines] else: print('[Dataset] eval pair list file does not exist: {}'.format(eval_pair_list_file)) file_list = open(os.path.join(eval_lists_root, 'eval_list.txt')).read().splitlines() train_file_list = open(os.path.join(eval_lists_root, 'E_train_list.txt')).read().splitlines() eval_pairs = generate_eval_pairs(file_list, train_file_list, eval_data_root, num_samples) with open(eval_pair_list_file, 'w') as f: for line in eval_pairs: f.write((','.join(line) + '\n')) X += eval_pairs else: raise ValueError("Invalid 'split' argument for dataset: VCTK_VCC2020Dataset!") print(((('[Dataset] - number of data for ' + split) + ': ') + str(len(X)))) self.X = X if (spk_emb_source == 'external'): print('[Dataset] Extracting speaker emebddings') self.extract_spk_embs() else: NotImplementedError def extract_spk_embs(self): spk_encoder = VoiceEncoder() if ((self.split == 'train') or (self.split == 'dev')): spk_emb_paths = [os.path.join(self.spk_embs_root, os.path.basename(wav_path).replace('.wav', '.h5')) for wav_path in self.X] self.X = list(zip(self.X, spk_emb_paths)) for (wav_path, spk_emb_path) in tqdm(self.X, dynamic_ncols=True, desc='Extracting speaker embedding'): if (not os.path.isfile(spk_emb_path)): wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) write_hdf5(spk_emb_path, 'spk_emb', embedding.astype(np.float32)) elif (self.split == 'test'): new_X = [] for wav_paths in self.X: source_wav_path = wav_paths[0] new_tuple = [source_wav_path] for wav_path in wav_paths[1:]: (spk, number) = wav_path.split(os.sep)[(- 2):] spk_emb_path = os.path.join(self.spk_embs_root, ((spk + '_') + number.replace('.wav', '.h5'))) new_tuple.append(spk_emb_path) if (not os.path.isfile(spk_emb_path)): wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) write_hdf5(spk_emb_path, 'spk_emb', embedding.astype(np.float32)) new_X.append(new_tuple) self.X = new_X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def get_all_lmspcs(self): lmspcs = [] for xs in tqdm(self.X, dynamic_ncols=True, desc='Extracting target acoustic features'): input_wav_path = xs[0] (input_wav_original, fs_original) = self._load_wav(input_wav_path, fs=None) lmspc = logmelspectrogram(x=input_wav_original, fs=fs_original, n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) lmspcs.append(lmspc) return lmspcs def __getitem__(self, index): input_wav_path = self.X[index][0] spk_emb_paths = self.X[index][1:] ref_spk_name = os.path.basename(spk_emb_paths[0]).split('_')[0] (input_wav_original, _) = self._load_wav(input_wav_path, fs=self.fbank_config['fs']) (input_wav_resample, fs_resample) = self._load_wav(input_wav_path, fs=FS) lmspc = logmelspectrogram(x=input_wav_original, fs=self.fbank_config['fs'], n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) if (self.spk_emb_source == 'external'): ref_spk_embs = [read_hdf5(spk_emb_path, 'spk_emb') for spk_emb_path in spk_emb_paths] ref_spk_embs = np.stack(ref_spk_embs, axis=0) ref_spk_emb = np.mean(ref_spk_embs, axis=0) else: ref_spk_emb = None if (self.split == 'test'): input_wav_name = input_wav_path.replace('.wav', '') input_wav_path = (input_wav_name + '_{}samples.wav'.format(len(spk_emb_paths))) return (input_wav_resample, input_wav_original, lmspc, ref_spk_emb, input_wav_path, ref_spk_name) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] acoustic_features = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)] acoustic_features_padded = pad_sequence(acoustic_features, batch_first=True) acoustic_feature_lengths = torch.from_numpy(np.array([acoustic_feature.size(0) for acoustic_feature in acoustic_features])) ref_spk_embs = torch.from_numpy(np.array([sorted_batch[i][3] for i in range(bs)])) wav_paths = [sorted_batch[i][4] for i in range(bs)] ref_spk_names = [sorted_batch[i][5] for i in range(bs)] return (wavs, wavs_2, acoustic_features, acoustic_features_padded, acoustic_feature_lengths, wav_paths, ref_spk_embs, ref_spk_names, None)
class CustomDataset(Dataset): def __init__(self, eval_pair_list_file, spk_emb_source, **kwargs): super(CustomDataset, self).__init__() self.spk_emb_source = spk_emb_source if os.path.isfile(eval_pair_list_file): print('[Dataset] Reading custom eval pair list file: {}'.format(eval_pair_list_file)) with open(eval_pair_list_file, 'r') as f: infos = yaml.load(f, Loader=yaml.FullLoader) X = [{'wav_name': k, **v} for (k, v) in infos.items()] else: raise ValueError('[Dataset] eval pair list file does not exist: {}'.format(eval_pair_list_file)) print(('[Dataset] - number of data for custom test: ' + str(len(X)))) self.X = X if (spk_emb_source == 'external'): print('[Dataset] Extracting speaker emebddings') self.extract_spk_embs() else: NotImplementedError def extract_spk_embs(self): spk_encoder = VoiceEncoder() new_X = [] for item in self.X: new_item = item new_item['ref_spk_embs'] = [] for wav_path in new_item['ref']: wav = preprocess_wav(wav_path) embedding = spk_encoder.embed_utterance(wav) new_item['ref_spk_embs'].append(embedding) new_X.append(new_item) self.X = new_X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def __getitem__(self, index): wav_name = self.X[index]['wav_name'] input_wav_path = self.X[index]['src'] ref_spk_embs = self.X[index]['ref_spk_embs'] ref_spk_name = self.X[index]['ref_spk_name'] (input_wav_original, _) = self._load_wav(input_wav_path, fs=None) (input_wav_resample, fs_resample) = self._load_wav(input_wav_path, fs=FS) if (self.spk_emb_source == 'external'): ref_spk_embs = np.stack(ref_spk_embs, axis=0) ref_spk_emb = np.mean(ref_spk_embs, axis=0) else: ref_spk_emb = None return (input_wav_resample, input_wav_original, ref_spk_emb, input_wav_path, ref_spk_name, wav_name) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] ref_spk_embs = torch.from_numpy(np.array([sorted_batch[i][2] for i in range(bs)])) wav_paths = [sorted_batch[i][3] for i in range(bs)] ref_spk_names = [sorted_batch[i][4] for i in range(bs)] save_wav_names = [sorted_batch[i][5] for i in range(bs)] return (wavs, wavs_2, None, None, None, wav_paths, ref_spk_embs, ref_spk_names, save_wav_names)
def get_basename(path): return os.path.splitext(os.path.split(path)[(- 1)])[0]
def get_trgspk_and_number(basename): if ('_' in basename): (trgspk, srcspk, number) = basename.split('_')[:3] return (trgspk, number) else: return basename
def _calculate_asv_score(model, file_list, gt_root, threshold): results = {} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) (trgspk, number) = get_trgspk_and_number(basename) gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) results[basename] = calculate_accept(cvt_wav_path, gt_wav_path, model, threshold) return (results, (100.0 * float(np.mean(np.array(list(results.values()))))))
def _calculate_asr_score(model, device, file_list, groundtruths): keys = ['hits', 'substitutions', 'deletions', 'insertions'] ers = {} c_results = {k: 0 for k in keys} w_results = {k: 0 for k in keys} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) (_, number) = get_trgspk_and_number(basename) groundtruth = groundtruths[number[1:]] (wav, _) = librosa.load(cvt_wav_path, sr=16000) transcription = transcribe(model, device, wav) (c_result, w_result, norm_groundtruth, norm_transcription) = calculate_measures(groundtruth, transcription) ers[basename] = [(c_result['cer'] * 100.0), (w_result['wer'] * 100.0), norm_transcription, norm_groundtruth] for k in keys: c_results[k] += c_result[k] w_results[k] += w_result[k] def er(r): return ((float(((r['substitutions'] + r['deletions']) + r['insertions'])) / float(((r['substitutions'] + r['deletions']) + r['hits']))) * 100.0) cer = er(c_results) wer = er(w_results) return (ers, cer, wer)
def _calculate_mcd_f0(file_list, gt_root, f0_all, results): for (i, cvt_wav_path) in enumerate(file_list): basename = get_basename(cvt_wav_path) (trgspk, number) = get_trgspk_and_number(basename) f0min = f0_all[trgspk]['f0min'] f0max = f0_all[trgspk]['f0max'] gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) (gt_wav, gt_fs) = librosa.load(gt_wav_path, sr=None) (cvt_wav, _) = librosa.load(cvt_wav_path, sr=gt_fs) (mcd, f0rmse, f0corr, ddur) = calculate_mcd_f0(cvt_wav, gt_wav, gt_fs, f0min, f0max) results.append([basename, mcd, f0rmse, f0corr, ddur])
def get_parser(): parser = argparse.ArgumentParser(description='objective evaluation script.') parser.add_argument('--wavdir', required=True, type=str, help='directory for converted waveforms') parser.add_argument('--task', required=True, type=str, choices=['task1', 'task2'], help='task 1 or task 2') parser.add_argument('--samples', required=True, type=int, help='number of reference samples') parser.add_argument('--data_root', type=str, default='./data', help='directory of data') parser.add_argument('--log_path', type=str, default=None, help='path of output log. If not specified, output to <wavdir>/obj.log') parser.add_argument('--n_jobs', default=10, type=int, help='number of parallel jobs') return parser
def main(): args = get_parser().parse_args() task = args.task gt_root = os.path.join(args.data_root, 'vcc2020') f0_path = os.path.join(args.data_root, 'f0.yaml') threshold_path = os.path.join(args.data_root, 'thresholds.yaml') transcription_path = os.path.join(args.data_root, 'vcc2020', 'prompts', 'Eng_transcriptions.txt') device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) with open(f0_path, 'r') as f: f0_all = yaml.load(f, Loader=yaml.FullLoader) with open(transcription_path, 'r') as f: lines = f.read().splitlines() groundtruths = {line.split(' ')[0]: ' '.join(line.split(' ')[1:]) for line in lines} if (args.task == 'task1'): query_string = f'TE*300??_{args.samples}samples*.wav' elif (args.task == 'task2'): query_string = 'T[FGM]*300??_{args.samples}samples*.wav' converted_files = sorted(find_files(args.wavdir, query=query_string)) print('number of reference samples = {}'.format(args.samples)) print('number of utterances = {}'.format(len(converted_files))) threshold = None threshold_all = {} if os.path.exists(threshold_path): with open(threshold_path, 'r') as f: threshold_all = yaml.load(f, Loader=yaml.FullLoader) if (threshold_all and (task in threshold_all)): (equal_error_rate, threshold) = threshold_all[task] if (not threshold): (equal_error_rate, threshold) = calculate_threshold(gt_root, task, device) if threshold_all: threshold_all[task] = [equal_error_rate, threshold] else: threshold_all = {task: [equal_error_rate, threshold]} with open(threshold_path, 'w') as f: yaml.safe_dump(threshold_all, f) print(f'[INFO]: Equal error rate: {equal_error_rate}') print(f'[INFO]: Threshold: {threshold}') print('Calculating ASV-based score...') asv_model = load_asv_model(device) (accept_results, accept_rate) = _calculate_asv_score(asv_model, converted_files, gt_root, threshold) print('Calculating ASR-based score...') asr_model = load_asr_model(device) (ers, cer, wer) = _calculate_asr_score(asr_model, device, converted_files, groundtruths) if (task == 'task1'): print('Calculating MCD and f0-related scores...') file_lists = np.array_split(converted_files, args.n_jobs) file_lists = [f_list.tolist() for f_list in file_lists] with mp.Manager() as manager: results = manager.list() processes = [] for f in file_lists: p = mp.Process(target=_calculate_mcd_f0, args=(f, gt_root, f0_all, results)) p.start() processes.append(p) for p in processes: p.join() results = sorted(results, key=(lambda x: x[0])) results = [((result + ers[result[0]]) + [accept_results[result[0]]]) for result in results] else: results = [] for f in converted_files: basename = get_basename(f) results.append((([basename] + ers[basename]) + [accept_results[basename]])) log_path = (args.log_path if args.log_path else os.path.join(args.wavdir, f'obj_{args.samples}samples.log')) with open(log_path, 'w') as f: if (task == 'task1'): mMCD = np.mean(np.array([result[1] for result in results])) mf0RMSE = np.mean(np.array([result[2] for result in results])) mf0CORR = np.mean(np.array([result[3] for result in results])) mDDUR = np.mean(np.array([result[4] for result in results])) mCER = cer mWER = wer mACCEPT = accept_rate for result in results: if (task == 'task1'): f.write('{} {:.2f} {:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) elif (task == 'task2'): f.write('{} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) if (task == 'task1'): print('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) f.write('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) elif (task == 'task2'): print('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT)) f.write('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT))
def get_parser(): parser = argparse.ArgumentParser(description='Extract results.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--upstream', type=str, required=True, help='upstream') parser.add_argument('--task', type=str, required=True, help='task') parser.add_argument('--tag', type=str, required=True, help='tag') parser.add_argument('--vocoder', type=str, required=True, help='vocoder name') parser.add_argument('--expdir', type=str, default='../../result/downstream', help='expdir') parser.add_argument('--num_samples', default=10, type=int) parser.add_argument('--start_epoch', default=10000, type=int) parser.add_argument('--end_epoch', default=50000, type=int) parser.add_argument('--step_epoch', default=1000, type=int) parser.add_argument('--out', '-O', type=str, help='The output filename. If omitted, then output to sys.stdout') return parser
def grep(filepath, query): lines = [] with open(filepath, 'r') as f: for line in f: if (query in line): lines.append(line.rstrip()) return lines
def encoder_init(m): 'Initialize encoder parameters.' if isinstance(m, torch.nn.Conv1d): torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('relu'))
class Taco2Encoder(torch.nn.Module): 'Encoder module of the Tacotron2 TTS model.\n\n Reference:\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, elayers=1, eunits=512, econv_layers=3, econv_chans=512, econv_filts=5, use_batch_norm=True, use_residual=False, dropout_rate=0.5): 'Initialize Tacotron2 encoder module.\n\n Args:\n idim (int) Dimension of the inputs.\n elayers (int, optional) The number of encoder blstm layers.\n eunits (int, optional) The number of encoder blstm units.\n econv_layers (int, optional) The number of encoder conv layers.\n econv_filts (int, optional) The number of encoder conv filter size.\n econv_chans (int, optional) The number of encoder conv filter channels.\n use_batch_norm (bool, optional) Whether to use batch normalization.\n use_residual (bool, optional) Whether to use residual connection.\n dropout_rate (float, optional) Dropout rate.\n\n ' super(Taco2Encoder, self).__init__() self.idim = idim self.use_residual = use_residual self.input_layer = torch.nn.Linear(idim, econv_chans) if (econv_layers > 0): self.convs = torch.nn.ModuleList() for layer in range(econv_layers): ichans = econv_chans if use_batch_norm: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.BatchNorm1d(econv_chans), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs = None if (elayers > 0): iunits = (econv_chans if (econv_layers != 0) else embed_dim) self.blstm = torch.nn.LSTM(iunits, (eunits // 2), elayers, batch_first=True, bidirectional=True) else: self.blstm = None self.apply(encoder_init) def forward(self, xs, ilens=None): 'Calculate forward propagation.\n Args:\n xs (Tensor): Batch of the padded acoustic feature sequence (B, Lmax, idim)\n ' xs = self.input_layer(xs).transpose(1, 2) if (self.convs is not None): for i in range(len(self.convs)): if self.use_residual: xs += self.convs[i](xs) else: xs = self.convs[i](xs) if (self.blstm is None): return xs.transpose(1, 2) if (not isinstance(ilens, torch.Tensor)): ilens = torch.tensor(ilens) xs = pack_padded_sequence(xs.transpose(1, 2), ilens.cpu(), batch_first=True) self.blstm.flatten_parameters() (xs, _) = self.blstm(xs) (xs, hlens) = pad_packed_sequence(xs, batch_first=True) return (xs, hlens)
class Taco2Prenet(torch.nn.Module): 'Prenet module for decoder of Tacotron2.\n\n The Prenet preforms nonlinear conversion\n of inputs before input to auto-regressive lstm,\n which helps alleviate the exposure bias problem.\n\n Note:\n This module alway applies dropout even in evaluation.\n See the detail in `Natural TTS Synthesis by\n Conditioning WaveNet on Mel Spectrogram Predictions`_.\n\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5): super(Taco2Prenet, self).__init__() self.dropout_rate = dropout_rate self.prenet = torch.nn.ModuleList() for layer in range(n_layers): n_inputs = (idim if (layer == 0) else n_units) self.prenet += [torch.nn.Sequential(torch.nn.Linear(n_inputs, n_units), torch.nn.ReLU())] def forward(self, x): if (len(self.prenet) == 0): return F.dropout(x, self.dropout_rate) for i in range(len(self.prenet)): x = F.dropout(self.prenet[i](x), self.dropout_rate) return x
class RNNLayer(nn.Module): ' RNN wrapper, includes time-downsampling' def __init__(self, input_dim, module, bidirection, dim, dropout, layer_norm, sample_rate, proj): super(RNNLayer, self).__init__() rnn_out_dim = ((2 * dim) if bidirection else dim) self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.sample_rate = sample_rate self.proj = proj self.layer = getattr(nn, module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, x_len): if (not self.training): self.layer.flatten_parameters() input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False) (output, _) = self.layer(input_x) (output, x_len) = pad_packed_sequence(output, batch_first=True) if self.layer_norm: output = self.ln(output) if (self.dropout > 0): output = self.dp(output) if (self.sample_rate > 1): (output, x_len) = downsample(output, x_len, self.sample_rate, 'drop') if self.proj: output = torch.tanh(self.pj(output)) return (output, x_len)
class RNNCell(nn.Module): ' RNN cell wrapper' def __init__(self, input_dim, module, dim, dropout, layer_norm, proj): super(RNNCell, self).__init__() rnn_out_dim = dim self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.proj = proj self.cell = getattr(nn, (module.upper() + 'Cell'))(input_dim, dim) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, z, c): (new_z, new_c) = self.cell(input_x, (z, c)) if self.layer_norm: new_z = self.ln(new_z) if (self.dropout > 0): new_z = self.dp(new_z) if self.proj: new_z = torch.tanh(self.pj(new_z)) return (new_z, new_c)
class Model(nn.Module): def __init__(self, input_dim, output_dim, resample_ratio, stats, ar, encoder_type, hidden_dim, lstmp_layers, lstmp_dropout_rate, lstmp_proj_dim, lstmp_layernorm, spk_emb_integration_type, spk_emb_dim, prenet_layers=2, prenet_dim=256, prenet_dropout_rate=0.5, **kwargs): super(Model, self).__init__() self.ar = ar self.encoder_type = encoder_type self.hidden_dim = hidden_dim self.output_dim = output_dim self.resample_ratio = resample_ratio self.spk_emb_integration_type = spk_emb_integration_type self.spk_emb_dim = spk_emb_dim if (spk_emb_integration_type == 'add'): assert (spk_emb_dim == hidden_dim) self.register_buffer('target_mean', torch.from_numpy(stats.mean_).float()) self.register_buffer('target_scale', torch.from_numpy(stats.scale_).float()) if (encoder_type == 'taco2'): self.encoder = Taco2Encoder(input_dim, eunits=hidden_dim) elif (encoder_type == 'ffn'): self.encoder = torch.nn.Sequential(torch.nn.Linear(input_dim, hidden_dim), torch.nn.ReLU()) else: raise ValueError('Encoder type not supported.') if (self.spk_emb_integration_type == 'add'): self.spk_emb_projection = torch.nn.Linear(spk_emb_dim, hidden_dim) elif (self.spk_emb_integration_type == 'concat'): self.spk_emb_projection = torch.nn.Linear((hidden_dim + spk_emb_dim), hidden_dim) else: raise ValueError('Integration type not supported.') self.prenet = Taco2Prenet(idim=output_dim, n_layers=prenet_layers, n_units=prenet_dim, dropout_rate=prenet_dropout_rate) self.lstmps = nn.ModuleList() for i in range(lstmp_layers): if ar: prev_dim = (output_dim if (prenet_layers == 0) else prenet_dim) rnn_input_dim = ((hidden_dim + prev_dim) if (i == 0) else hidden_dim) rnn_layer = RNNCell(rnn_input_dim, 'LSTM', hidden_dim, lstmp_dropout_rate, lstmp_layernorm, proj=True) else: rnn_input_dim = hidden_dim rnn_layer = RNNLayer(rnn_input_dim, 'LSTM', False, hidden_dim, lstmp_dropout_rate, lstmp_layernorm, sample_rate=1, proj=True) self.lstmps.append(rnn_layer) self.proj = torch.nn.Linear(hidden_dim, output_dim) def normalize(self, x): return ((x - self.target_mean) / self.target_scale) def _integrate_with_spk_emb(self, hs, spembs): 'Integrate speaker embedding with hidden states.\n Args:\n hs (Tensor): Batch of hidden state sequences (B, Lmax, hdim).\n spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).\n ' if (self.spk_emb_integration_type == 'add'): spembs = self.spk_emb_projection(F.normalize(spembs)) hs = (hs + spembs.unsqueeze(1)) elif (self.spk_emb_integration_type == 'concat'): spembs = F.normalize(spembs).unsqueeze(1).expand((- 1), hs.size(1), (- 1)) hs = self.spk_emb_projection(torch.cat([hs, spembs], dim=(- 1))) else: raise NotImplementedError('support only add or concat.') return hs def forward(self, features, lens, ref_spk_embs, targets=None): 'Calculate forward propagation.\n Args:\n features: Batch of the sequences of input features (B, Lmax, idim).\n targets: Batch of the sequences of padded target features (B, Lmax, odim).\n ref_spk_embs: Batch of the sequences of reference speaker embeddings (B, spk_emb_dim).\n ' B = features.shape[0] features = features.permute(0, 2, 1) resampled_features = F.interpolate(features, scale_factor=self.resample_ratio) resampled_features = resampled_features.permute(0, 2, 1) lens = (lens * self.resample_ratio) if (self.encoder_type == 'taco2'): (encoder_states, lens) = self.encoder(resampled_features, lens) elif (self.encoder_type == 'ffn'): encoder_states = self.encoder(resampled_features) encoder_states = self._integrate_with_spk_emb(encoder_states, ref_spk_embs) if self.ar: if (targets is not None): targets = targets.transpose(0, 1) predicted_list = [] c_list = [encoder_states.new_zeros(B, self.hidden_dim)] z_list = [encoder_states.new_zeros(B, self.hidden_dim)] for _ in range(1, len(self.lstmps)): c_list += [encoder_states.new_zeros(B, self.hidden_dim)] z_list += [encoder_states.new_zeros(B, self.hidden_dim)] prev_out = encoder_states.new_zeros(B, self.output_dim) for (t, encoder_state) in enumerate(encoder_states.transpose(0, 1)): concat = torch.cat([encoder_state, self.prenet(prev_out)], dim=1) for (i, lstmp) in enumerate(self.lstmps): lstmp_input = (concat if (i == 0) else z_list[(i - 1)]) (z_list[i], c_list[i]) = lstmp(lstmp_input, z_list[i], c_list[i]) predicted_list += [self.proj(z_list[(- 1)]).view(B, self.output_dim, (- 1))] prev_out = (targets[t] if (targets is not None) else predicted_list[(- 1)].squeeze((- 1))) prev_out = self.normalize(prev_out) predicted = torch.cat(predicted_list, dim=2) predicted = predicted.transpose(1, 2) else: predicted = encoder_states for (i, lstmp) in enumerate(self.lstmps): (predicted, lens) = lstmp(predicted, lens) predicted = self.proj(predicted) return (predicted, lens)
class VCC2020Dataset(Dataset): def __init__(self, split, trgspk, data_root, lists_root, fbank_config, train_dev_seed=1337, **kwargs): super(VCC2020Dataset, self).__init__() self.trgspk = trgspk self.trg_lang = trgspk[1] self.fbank_config = fbank_config X = [] if ((split == 'train') or (split == 'dev')): file_list = open(os.path.join(lists_root, (((self.trg_lang + '_') + split) + '_list.txt'))).read().splitlines() for number in file_list: wav_path = os.path.join(data_root, trgspk, (number + '.wav')) if os.path.isfile(wav_path): X.append(wav_path) random.seed(train_dev_seed) random.shuffle(X) elif (split == 'test'): file_list = open(os.path.join(lists_root, 'eval_list.txt')).read().splitlines() X = [os.path.join(data_root, srcspk, (number + '.wav')) for number in file_list for srcspk in SRCSPKS] else: raise ValueError("Invalid 'split' argument for dataset: VCC2020Dataset!") print(((('[Dataset] - number of data for ' + split) + ': ') + str(len(X)))) self.X = X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def __getitem__(self, index): wav_path = self.X[index] (wav_original, fs_original) = self._load_wav(wav_path, fs=None) (wav_resample, fs_resample) = self._load_wav(wav_path, fs=FS) lmspc = logmelspectrogram(x=wav_original, fs=fs_original, n_mels=self.fbank_config['n_mels'], n_fft=self.fbank_config['n_fft'], n_shift=self.fbank_config['n_shift'], win_length=self.fbank_config['win_length'], window=self.fbank_config['window'], fmin=self.fbank_config['fmin'], fmax=self.fbank_config['fmax']) return (wav_resample, wav_original, lmspc, wav_path) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] acoustic_features = [torch.from_numpy(sorted_batch[i][2]) for i in range(bs)] acoustic_features_padded = pad_sequence(acoustic_features, batch_first=True) acoustic_feature_lengths = torch.from_numpy(np.array([acoustic_feature.size(0) for acoustic_feature in acoustic_features])) wav_paths = [sorted_batch[i][3] for i in range(bs)] return (wavs, wavs_2, acoustic_features, acoustic_features_padded, acoustic_feature_lengths, wav_paths)
class CustomDataset(Dataset): def __init__(self, eval_list_file, **kwargs): super(CustomDataset, self).__init__() X = [] if os.path.isfile(eval_list_file): print('[Dataset] Reading custom eval list file: {}'.format(eval_list_file)) X = open(eval_list_file, 'r').read().splitlines() else: raise ValueError('[Dataset] eval list file does not exist: {}'.format(eval_list_file)) print(('[Dataset] - number of data for custom test: ' + str(len(X)))) self.X = X def _load_wav(self, wav_path, fs): (wav, sr) = librosa.load(wav_path, sr=fs) return (wav, sr) def __len__(self): return len(self.X) def __getitem__(self, index): wav_path = self.X[index] (wav_original, fs_original) = self._load_wav(wav_path, fs=None) (wav_resample, fs_resample) = self._load_wav(wav_path, fs=FS) return (wav_resample, wav_original, wav_path) def collate_fn(self, batch): sorted_batch = sorted(batch, key=(lambda x: (- x[1].shape[0]))) bs = len(sorted_batch) wavs = [torch.from_numpy(sorted_batch[i][0]) for i in range(bs)] wavs_2 = [torch.from_numpy(sorted_batch[i][1]) for i in range(bs)] wav_paths = [sorted_batch[i][2] for i in range(bs)] return (wavs, wavs_2, None, None, None, wav_paths)
def get_basename(path): return os.path.splitext(os.path.split(path)[(- 1)])[0]
def get_number(basename): if ('_' in basename): return basename.split('_')[1] else: return basename
def _calculate_asv_score(model, file_list, gt_root, trgspk, threshold): results = {} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) number = get_number(basename) gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) results[basename] = calculate_accept(cvt_wav_path, gt_wav_path, model, threshold) return (results, (100.0 * float(np.mean(np.array(list(results.values()))))))
def _calculate_asr_score(model, device, file_list, groundtruths): keys = ['hits', 'substitutions', 'deletions', 'insertions'] ers = {} c_results = {k: 0 for k in keys} w_results = {k: 0 for k in keys} for (i, cvt_wav_path) in enumerate(tqdm(file_list)): basename = get_basename(cvt_wav_path) number = get_number(basename) groundtruth = groundtruths[number[1:]] (wav, _) = librosa.load(cvt_wav_path, sr=16000) transcription = transcribe(model, device, wav) (c_result, w_result, norm_groundtruth, norm_transcription) = calculate_measures(groundtruth, transcription) ers[basename] = [(c_result['cer'] * 100.0), (w_result['wer'] * 100.0), norm_transcription, norm_groundtruth] for k in keys: c_results[k] += c_result[k] w_results[k] += w_result[k] def er(r): return ((float(((r['substitutions'] + r['deletions']) + r['insertions'])) / float(((r['substitutions'] + r['deletions']) + r['hits']))) * 100.0) cer = er(c_results) wer = er(w_results) return (ers, cer, wer)
def _calculate_mcd_f0(file_list, gt_root, trgspk, f0min, f0max, results): for (i, cvt_wav_path) in enumerate(file_list): basename = get_basename(cvt_wav_path) number = get_number(basename) gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav')) (cvt_wav, cvt_fs) = librosa.load(cvt_wav_path, sr=None) (gt_wav, gt_fs) = librosa.load(gt_wav_path, sr=None) assert (cvt_fs == gt_fs) (mcd, f0rmse, f0corr, ddur) = calculate_mcd_f0(cvt_wav, gt_wav, gt_fs, f0min, f0max) results.append([basename, mcd, f0rmse, f0corr, ddur])
def get_parser(): parser = argparse.ArgumentParser(description='objective evaluation script.') parser.add_argument('--wavdir', required=True, type=str, help='directory for converted waveforms') parser.add_argument('--trgspk', required=True, type=str, help='target speaker') parser.add_argument('--data_root', type=str, default='./data', help='directory of data') parser.add_argument('--log_path', type=str, default=None, help='path of output log. If not specified, output to <wavdir>/obj.log') parser.add_argument('--n_jobs', default=10, type=int, help='number of parallel jobs') return parser
def main(): args = get_parser().parse_args() trgspk = args.trgspk task = ('task1' if (trgspk[1] == 'E') else 'task2') gt_root = os.path.join(args.data_root, 'vcc2020') f0_path = os.path.join(args.data_root, 'f0.yaml') threshold_path = os.path.join(args.data_root, 'thresholds.yaml') transcription_path = os.path.join(args.data_root, 'vcc2020', 'prompts', 'Eng_transcriptions.txt') device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) with open(f0_path, 'r') as f: f0_all = yaml.load(f, Loader=yaml.FullLoader) f0min = f0_all[trgspk]['f0min'] f0max = f0_all[trgspk]['f0max'] with open(transcription_path, 'r') as f: lines = f.read().splitlines() groundtruths = {line.split(' ')[0]: ' '.join(line.split(' ')[1:]) for line in lines} converted_files = sorted(find_files(args.wavdir, query='*300??*.wav')) print('number of utterances = {}'.format(len(converted_files))) threshold = None threshold_all = {} if os.path.exists(threshold_path): with open(threshold_path, 'r') as f: threshold_all = yaml.load(f, Loader=yaml.FullLoader) if (threshold_all and (task in threshold_all)): (equal_error_rate, threshold) = threshold_all[task] if (not threshold): (equal_error_rate, threshold) = calculate_threshold(gt_root, task, device) if threshold_all: threshold_all[task] = [equal_error_rate, threshold] else: threshold_all = {task: [equal_error_rate, threshold]} with open(threshold_path, 'w') as f: yaml.safe_dump(threshold_all, f) print(f'[INFO]: Equal error rate: {equal_error_rate}') print(f'[INFO]: Threshold: {threshold}') print('Calculating ASV-based score...') asv_model = load_asv_model(device) (accept_results, accept_rate) = _calculate_asv_score(asv_model, converted_files, gt_root, trgspk, threshold) print('Calculating ASR-based score...') asr_model = load_asr_model(device) (ers, cer, wer) = _calculate_asr_score(asr_model, device, converted_files, groundtruths) if (task == 'task1'): print('Calculating MCD and f0-related scores...') file_lists = np.array_split(converted_files, args.n_jobs) file_lists = [f_list.tolist() for f_list in file_lists] with mp.Manager() as manager: results = manager.list() processes = [] for f in file_lists: p = mp.Process(target=_calculate_mcd_f0, args=(f, gt_root, trgspk, f0min, f0max, results)) p.start() processes.append(p) for p in processes: p.join() results = sorted(results, key=(lambda x: x[0])) results = [((result + ers[result[0]]) + [accept_results[result[0]]]) for result in results] else: results = [] for f in converted_files: basename = get_basename(f) results.append((([basename] + ers[basename]) + [accept_results[basename]])) log_path = (args.log_path if args.log_path else os.path.join(args.wavdir, 'obj.log')) with open(log_path, 'w') as f: if (task == 'task1'): mMCD = np.mean(np.array([result[1] for result in results])) mf0RMSE = np.mean(np.array([result[2] for result in results])) mf0CORR = np.mean(np.array([result[3] for result in results])) mDDUR = np.mean(np.array([result[4] for result in results])) mCER = cer mWER = wer mACCEPT = accept_rate for result in results: if (task == 'task1'): f.write('{} {:.2f} {:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) elif (task == 'task2'): f.write('{} {:.1f} {:.1f} {} \t{} | {}\n'.format(*result)) if (task == 'task1'): print('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) f.write('Mean MCD, f0RMSE, f0CORR, DDUR, CER, WER, accept rate: {:.2f} {:.2f} {:.3f} {:.3f} {:.1f} {:.1f} {:.2f}'.format(mMCD, mf0RMSE, mf0CORR, mDDUR, mCER, mWER, mACCEPT)) elif (task == 'task2'): print('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT)) f.write('Mean CER, WER, accept rate: {:.1f} {:.1f} {:.2f}'.format(mCER, mWER, mACCEPT))
def get_parser(): parser = argparse.ArgumentParser(description='Extract results.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--upstream', type=str, required=True, help='upstream') parser.add_argument('--task', type=str, required=True, choices=['task1', 'task2'], help='task') parser.add_argument('--tag', type=str, required=True, help='tag') parser.add_argument('--vocoder', type=str, required=True, help='vocoder name') parser.add_argument('--expdir', type=str, default='result/downstream', help='expdir') parser.add_argument('--start_epoch', default=4000, type=int) parser.add_argument('--end_epoch', default=10000, type=int) parser.add_argument('--step_epoch', default=1000, type=int) parser.add_argument('--out', '-O', type=str, help='The output filename. If omitted, then output to sys.stdout') return parser
def grep(filepath, query): lines = [] with open(filepath, 'r') as f: for line in f: if (query in line): lines.append(line.rstrip()) return lines
def encoder_init(m): 'Initialize encoder parameters.' if isinstance(m, torch.nn.Conv1d): torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('relu'))
class Taco2Encoder(torch.nn.Module): 'Encoder module of the Tacotron2 TTS model.\n\n Reference:\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, elayers=1, eunits=512, econv_layers=3, econv_chans=512, econv_filts=5, use_batch_norm=True, use_residual=False, dropout_rate=0.5): 'Initialize Tacotron2 encoder module.\n\n Args:\n idim (int) Dimension of the inputs.\n elayers (int, optional) The number of encoder blstm layers.\n eunits (int, optional) The number of encoder blstm units.\n econv_layers (int, optional) The number of encoder conv layers.\n econv_filts (int, optional) The number of encoder conv filter size.\n econv_chans (int, optional) The number of encoder conv filter channels.\n use_batch_norm (bool, optional) Whether to use batch normalization.\n use_residual (bool, optional) Whether to use residual connection.\n dropout_rate (float, optional) Dropout rate.\n\n ' super(Taco2Encoder, self).__init__() self.idim = idim self.use_residual = use_residual self.input_layer = torch.nn.Linear(idim, econv_chans) if (econv_layers > 0): self.convs = torch.nn.ModuleList() for layer in range(econv_layers): ichans = econv_chans if use_batch_norm: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.BatchNorm1d(econv_chans), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs += [torch.nn.Sequential(torch.nn.Conv1d(ichans, econv_chans, econv_filts, stride=1, padding=((econv_filts - 1) // 2), bias=False), torch.nn.ReLU(), torch.nn.Dropout(dropout_rate))] else: self.convs = None if (elayers > 0): iunits = (econv_chans if (econv_layers != 0) else embed_dim) self.blstm = torch.nn.LSTM(iunits, (eunits // 2), elayers, batch_first=True, bidirectional=True) else: self.blstm = None self.apply(encoder_init) def forward(self, xs, ilens=None): 'Calculate forward propagation.\n Args:\n xs (Tensor): Batch of the padded acoustic feature sequence (B, Lmax, idim)\n ' xs = self.input_layer(xs).transpose(1, 2) if (self.convs is not None): for i in range(len(self.convs)): if self.use_residual: xs += self.convs[i](xs) else: xs = self.convs[i](xs) if (self.blstm is None): return xs.transpose(1, 2) if (not isinstance(ilens, torch.Tensor)): ilens = torch.tensor(ilens) xs = pack_padded_sequence(xs.transpose(1, 2), ilens.cpu(), batch_first=True) self.blstm.flatten_parameters() (xs, _) = self.blstm(xs) (xs, hlens) = pad_packed_sequence(xs, batch_first=True) return (xs, hlens)
class Taco2Prenet(torch.nn.Module): 'Prenet module for decoder of Tacotron2.\n\n The Prenet preforms nonlinear conversion\n of inputs before input to auto-regressive lstm,\n which helps alleviate the exposure bias problem.\n\n Note:\n This module alway applies dropout even in evaluation.\n See the detail in `Natural TTS Synthesis by\n Conditioning WaveNet on Mel Spectrogram Predictions`_.\n\n _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_\n https://arxiv.org/abs/1712.05884\n\n ' def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5): super(Taco2Prenet, self).__init__() self.dropout_rate = dropout_rate self.prenet = torch.nn.ModuleList() for layer in range(n_layers): n_inputs = (idim if (layer == 0) else n_units) self.prenet += [torch.nn.Sequential(torch.nn.Linear(n_inputs, n_units), torch.nn.ReLU())] def forward(self, x): if (len(self.prenet) == 0): return F.dropout(x, self.dropout_rate) for i in range(len(self.prenet)): x = F.dropout(self.prenet[i](x), self.dropout_rate) return x
class RNNLayer(nn.Module): ' RNN wrapper, includes time-downsampling' def __init__(self, input_dim, module, bidirection, dim, dropout, layer_norm, sample_rate, proj): super(RNNLayer, self).__init__() rnn_out_dim = ((2 * dim) if bidirection else dim) self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.sample_rate = sample_rate self.proj = proj self.layer = getattr(nn, module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, x_len): if (not self.training): self.layer.flatten_parameters() input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False) (output, _) = self.layer(input_x) (output, x_len) = pad_packed_sequence(output, batch_first=True) if self.layer_norm: output = self.ln(output) if (self.dropout > 0): output = self.dp(output) if (self.sample_rate > 1): (output, x_len) = downsample(output, x_len, self.sample_rate, 'drop') if self.proj: output = torch.tanh(self.pj(output)) return (output, x_len)
class RNNCell(nn.Module): ' RNN cell wrapper' def __init__(self, input_dim, module, dim, dropout, layer_norm, proj): super(RNNCell, self).__init__() rnn_out_dim = dim self.out_dim = rnn_out_dim self.dropout = dropout self.layer_norm = layer_norm self.proj = proj self.cell = getattr(nn, (module.upper() + 'Cell'))(input_dim, dim) if self.layer_norm: self.ln = nn.LayerNorm(rnn_out_dim) if (self.dropout > 0): self.dp = nn.Dropout(p=dropout) if self.proj: self.pj = nn.Linear(rnn_out_dim, rnn_out_dim) def forward(self, input_x, z, c): (new_z, new_c) = self.cell(input_x, (z, c)) if self.layer_norm: new_z = self.ln(new_z) if (self.dropout > 0): new_z = self.dp(new_z) if self.proj: new_z = torch.tanh(self.pj(new_z)) return (new_z, new_c)
class Model(nn.Module): def __init__(self, input_dim, output_dim, resample_ratio, stats, ar, encoder_type, hidden_dim, lstmp_layers, lstmp_dropout_rate, lstmp_proj_dim, lstmp_layernorm, prenet_layers=2, prenet_dim=256, prenet_dropout_rate=0.5, **kwargs): super(Model, self).__init__() self.ar = ar self.encoder_type = encoder_type self.hidden_dim = hidden_dim self.output_dim = output_dim self.resample_ratio = resample_ratio self.register_buffer('target_mean', torch.from_numpy(stats.mean_).float()) self.register_buffer('target_scale', torch.from_numpy(stats.scale_).float()) if (encoder_type == 'taco2'): self.encoder = Taco2Encoder(input_dim, eunits=hidden_dim) elif (encoder_type == 'ffn'): self.encoder = torch.nn.Sequential(torch.nn.Linear(input_dim, hidden_dim), torch.nn.ReLU()) else: raise ValueError('Encoder type not supported.') self.prenet = Taco2Prenet(idim=output_dim, n_layers=prenet_layers, n_units=prenet_dim, dropout_rate=prenet_dropout_rate) self.lstmps = nn.ModuleList() for i in range(lstmp_layers): if ar: prev_dim = (output_dim if (prenet_layers == 0) else prenet_dim) rnn_input_dim = ((hidden_dim + prev_dim) if (i == 0) else hidden_dim) rnn_layer = RNNCell(rnn_input_dim, 'LSTM', hidden_dim, lstmp_dropout_rate, lstmp_layernorm, proj=True) else: rnn_input_dim = hidden_dim rnn_layer = RNNLayer(rnn_input_dim, 'LSTM', False, hidden_dim, lstmp_dropout_rate, lstmp_layernorm, sample_rate=1, proj=True) self.lstmps.append(rnn_layer) self.proj = torch.nn.Linear(hidden_dim, output_dim) def normalize(self, x): return ((x - self.target_mean) / self.target_scale) def forward(self, features, lens, targets=None): 'Calculate forward propagation.\n Args:\n features: Batch of the sequences of input features (B, Lmax, idim).\n targets: Batch of the sequences of padded target features (B, Lmax, odim).\n ' B = features.shape[0] features = features.permute(0, 2, 1) resampled_features = F.interpolate(features, scale_factor=self.resample_ratio) resampled_features = resampled_features.permute(0, 2, 1) lens = (lens * self.resample_ratio) if (self.encoder_type == 'taco2'): (encoder_states, lens) = self.encoder(resampled_features, lens) elif (self.encoder_type == 'ffn'): encoder_states = self.encoder(resampled_features) if self.ar: if (targets is not None): targets = targets.transpose(0, 1) predicted_list = [] c_list = [encoder_states.new_zeros(B, self.hidden_dim)] z_list = [encoder_states.new_zeros(B, self.hidden_dim)] for _ in range(1, len(self.lstmps)): c_list += [encoder_states.new_zeros(B, self.hidden_dim)] z_list += [encoder_states.new_zeros(B, self.hidden_dim)] prev_out = encoder_states.new_zeros(B, self.output_dim) for (t, encoder_state) in enumerate(encoder_states.transpose(0, 1)): concat = torch.cat([encoder_state, self.prenet(prev_out)], dim=1) for (i, lstmp) in enumerate(self.lstmps): lstmp_input = (concat if (i == 0) else z_list[(i - 1)]) (z_list[i], c_list[i]) = lstmp(lstmp_input, z_list[i], c_list[i]) predicted_list += [self.proj(z_list[(- 1)]).view(B, self.output_dim, (- 1))] prev_out = (targets[t] if (targets is not None) else predicted_list[(- 1)].squeeze((- 1))) prev_out = self.normalize(prev_out) predicted = torch.cat(predicted_list, dim=2) predicted = predicted.transpose(1, 2) else: predicted = encoder_states for (i, lstmp) in enumerate(self.lstmps): (predicted, lens) = lstmp(predicted, lens) predicted = self.proj(predicted) return (predicted, lens)
def low_cut_filter(x, fs, cutoff=70): 'FUNCTION TO APPLY LOW CUT FILTER\n\n Args:\n x (ndarray): Waveform sequence\n fs (int): Sampling frequency\n cutoff (float): Cutoff frequency of low cut filter\n\n Return:\n (ndarray): Low cut filtered waveform sequence\n ' nyquist = (fs // 2) norm_cutoff = (cutoff / nyquist) fil = firwin(255, norm_cutoff, pass_zero=False) lcf_x = lfilter(fil, 1, x) return lcf_x
def spc2npow(spectrogram): 'Calculate normalized power sequence from spectrogram\n\n Parameters\n ----------\n spectrogram : array, shape (T, `fftlen / 2 + 1`)\n Array of spectrum envelope\n\n Return\n ------\n npow : array, shape (`T`, `1`)\n Normalized power sequence\n\n ' npow = np.apply_along_axis(_spvec2pow, 1, spectrogram) meanpow = np.mean(npow) npow = (10.0 * np.log10((npow / meanpow))) return npow
def _spvec2pow(specvec): 'Convert a spectrum envelope into a power\n\n Parameters\n ----------\n specvec : vector, shape (`fftlen / 2 + 1`)\n Vector of specturm envelope |H(w)|^2\n\n Return\n ------\n power : scala,\n Power of a frame\n\n ' fftl2 = (len(specvec) - 1) fftl = (fftl2 * 2) power = (specvec[0] + specvec[fftl2]) for k in range(1, fftl2): power += (2.0 * specvec[k]) power /= fftl return power
def extfrm(data, npow, power_threshold=(- 20)): 'Extract frame over the power threshold\n\n Parameters\n ----------\n data: array, shape (`T`, `dim`)\n Array of input data\n npow : array, shape (`T`)\n Vector of normalized power sequence.\n power_threshold : float, optional\n Value of power threshold [dB]\n Default set to -20\n\n Returns\n -------\n data: array, shape (`T_ext`, `dim`)\n Remaining data after extracting frame\n `T_ext` <= `T`\n\n ' T = data.shape[0] if (T != len(npow)): raise 'Length of two vectors is different.' valid_index = np.where((npow > power_threshold)) extdata = data[valid_index] assert (extdata.shape[0] <= T) return extdata
def world_extract(x, fs, f0min, f0max): x = (x * np.iinfo(np.int16).max) x = np.array(x, dtype=np.float64) x = low_cut_filter(x, fs) (f0, time_axis) = pw.harvest(x, fs, f0_floor=f0min, f0_ceil=f0max, frame_period=MCEP_SHIFT) sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=MCEP_FFTL) ap = pw.d4c(x, f0, time_axis, fs, fft_size=MCEP_FFTL) mcep = pysptk.sp2mc(sp, MCEP_DIM, MCEP_ALPHA) npow = spc2npow(sp) return {'sp': sp, 'mcep': mcep, 'ap': ap, 'f0': f0, 'npow': npow}
def calculate_mcd_f0(x, y, fs, f0min, f0max): '\n x and y must be in range [-1, 1]\n ' gt_feats = world_extract(x, fs, f0min, f0max) cvt_feats = world_extract(y, fs, f0min, f0max) gt_mcep_nonsil_pow = extfrm(gt_feats['mcep'], gt_feats['npow']) cvt_mcep_nonsil_pow = extfrm(cvt_feats['mcep'], cvt_feats['npow']) (_, path) = fastdtw(cvt_mcep_nonsil_pow, gt_mcep_nonsil_pow, dist=scipy.spatial.distance.euclidean) twf_pow = np.array(path).T cvt_mcep_dtw_pow = cvt_mcep_nonsil_pow[twf_pow[0]] gt_mcep_dtw_pow = gt_mcep_nonsil_pow[twf_pow[1]] diff2sum = np.sum(((cvt_mcep_dtw_pow - gt_mcep_dtw_pow) ** 2), 1) mcd = np.mean(((10.0 / np.log(10.0)) * np.sqrt((2 * diff2sum))), 0) gt_nonsil_f0_idx = np.where((gt_feats['f0'] > 0))[0] cvt_nonsil_f0_idx = np.where((cvt_feats['f0'] > 0))[0] try: gt_mcep_nonsil_f0 = gt_feats['mcep'][gt_nonsil_f0_idx] cvt_mcep_nonsil_f0 = cvt_feats['mcep'][cvt_nonsil_f0_idx] (_, path) = fastdtw(cvt_mcep_nonsil_f0, gt_mcep_nonsil_f0, dist=scipy.spatial.distance.euclidean) twf_f0 = np.array(path).T cvt_f0_dtw = cvt_feats['f0'][cvt_nonsil_f0_idx][twf_f0[0]] gt_f0_dtw = gt_feats['f0'][gt_nonsil_f0_idx][twf_f0[1]] f0rmse = np.sqrt(np.mean(((cvt_f0_dtw - gt_f0_dtw) ** 2))) f0corr = scipy.stats.pearsonr(cvt_f0_dtw, gt_f0_dtw)[0] except ValueError: logging.warning('No nonzero f0 is found. Skip f0rmse f0corr computation and set them to NaN. This might due to unconverge training. Please tune the training time and hypers.') f0rmse = np.nan f0corr = np.nan (x_trim, _) = librosa.effects.trim(y=x) (y_trim, _) = librosa.effects.trim(y=y) ddur = float((abs((len(x_trim) - len(y_trim))) / fs)) return (mcd, f0rmse, f0corr, ddur)
def load_asr_model(device): 'Load model' print(f'[INFO]: Load the pre-trained ASR by {ASR_PRETRAINED_MODEL}.') model = Wav2Vec2ForCTC.from_pretrained(ASR_PRETRAINED_MODEL).to(device) tokenizer = Wav2Vec2Tokenizer.from_pretrained(ASR_PRETRAINED_MODEL) models = {'model': model, 'tokenizer': tokenizer} return models
def normalize_sentence(sentence): 'Normalize sentence' sentence = sentence.upper() sentence = jiwer.RemovePunctuation()(sentence) sentence = jiwer.RemoveWhiteSpace(replace_by_space=True)(sentence) sentence = jiwer.RemoveMultipleSpaces()(sentence) sentence = jiwer.Strip()(sentence) sentence = sentence.upper() return sentence
def calculate_measures(groundtruth, transcription): 'Calculate character/word measures (hits, subs, inserts, deletes) for one given sentence' groundtruth = normalize_sentence(groundtruth) transcription = normalize_sentence(transcription) c_result = jiwer.cer(groundtruth, transcription, return_dict=True) w_result = jiwer.compute_measures(groundtruth, transcription) return (c_result, w_result, groundtruth, transcription)
def transcribe(model, device, wav): 'Calculate score on one single waveform' inputs = model['tokenizer'](wav, sampling_rate=16000, return_tensors='pt', padding='longest') input_values = inputs.input_values.to(device) attention_mask = inputs.attention_mask.to(device) logits = model['model'](input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=(- 1)) transcription = model['tokenizer'].batch_decode(predicted_ids)[0] return transcription
def load_asv_model(device): model = VoiceEncoder().to(device) return model
def get_embedding(wav_path, encoder): wav = preprocess_wav(wav_path) embedding = encoder.embed_utterance(wav) return embedding
def get_cosine_similarity(x_emb, y_emb): return (np.inner(x_emb, y_emb) / (np.linalg.norm(x_emb) * np.linalg.norm(y_emb)))