| | from __future__ import annotations |
| |
|
| | import os |
| | import random |
| | from collections import defaultdict |
| | from importlib.resources import files |
| |
|
| | import torch |
| | from torch.nn.utils.rnn import pad_sequence |
| |
|
| |
|
| | |
| |
|
| |
|
| | def seed_everything(seed=0): |
| | random.seed(seed) |
| | os.environ["PYTHONHASHSEED"] = str(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| | torch.backends.cudnn.deterministic = True |
| | torch.backends.cudnn.benchmark = False |
| |
|
| |
|
| | |
| |
|
| |
|
| | def exists(v): |
| | return v is not None |
| |
|
| |
|
| | def default(v, d): |
| | return v if exists(v) else d |
| |
|
| |
|
| | |
| |
|
| |
|
| | def lens_to_mask(t: int["b"], length: int | None = None) -> bool["b n"]: |
| | if not exists(length): |
| | length = t.amax() |
| |
|
| | seq = torch.arange(length, device=t.device) |
| | return seq[None, :] < t[:, None] |
| |
|
| |
|
| | def mask_from_start_end_indices(seq_len: int["b"], start: int["b"], end: int["b"]): |
| | max_seq_len = seq_len.max().item() |
| | seq = torch.arange(max_seq_len, device=start.device).long() |
| | start_mask = seq[None, :] >= start[:, None] |
| | end_mask = seq[None, :] < end[:, None] |
| | return start_mask & end_mask |
| |
|
| |
|
| | def mask_from_frac_lengths(seq_len: int["b"], frac_lengths: float["b"]): |
| | lengths = (frac_lengths * seq_len).long() |
| | max_start = seq_len - lengths |
| |
|
| | rand = torch.rand_like(frac_lengths) |
| | start = (max_start * rand).long().clamp(min=0) |
| | end = start + lengths |
| |
|
| | return mask_from_start_end_indices(seq_len, start, end) |
| |
|
| |
|
| | def maybe_masked_mean(t: float["b n d"], mask: bool["b n"] = None) -> float["b d"]: |
| | if not exists(mask): |
| | return t.mean(dim=1) |
| |
|
| | t = torch.where(mask[:, :, None], t, torch.tensor(0.0, device=t.device)) |
| | num = t.sum(dim=1) |
| | den = mask.float().sum(dim=1) |
| |
|
| | return num / den.clamp(min=1.0) |
| |
|
| |
|
| |
|
| |
|
| | |
| |
|
| |
|
| | def get_tokenizer(dataset_name, tokenizer: str = "pinyin"): |
| | """ |
| | tokenizer - "pinyin" do g2p for only chinese characters, need .txt vocab_file |
| | - "char" for char-wise tokenizer, need .txt vocab_file |
| | - "byte" for utf-8 tokenizer |
| | - "custom" if you're directly passing in a path to the vocab.txt you want to use |
| | vocab_size - if use "pinyin", all available pinyin types, common alphabets (also those with accent) and symbols |
| | - if use "char", derived from unfiltered character & symbol counts of custom dataset |
| | - if use "byte", set to 256 (unicode byte range) |
| | """ |
| | if tokenizer in ["pinyin", "char"]: |
| | tokenizer_path = os.path.join(files("f5_tts").joinpath("../../data"), f"{dataset_name}_{tokenizer}/vocab.txt") |
| | with open(tokenizer_path, "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| | assert vocab_char_map[" "] == 0, "make sure space is of idx 0 in vocab.txt, cuz 0 is used for unknown char" |
| |
|
| | elif tokenizer == "byte": |
| | vocab_char_map = None |
| | vocab_size = 256 |
| |
|
| | elif tokenizer == "custom": |
| | with open(dataset_name, "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| |
|
| | return vocab_char_map, vocab_size |
| |
|
| |
|
| |
|
| | |
| |
|
| |
|
| | def repetition_found(text, length=2, tolerance=10): |
| | pattern_count = defaultdict(int) |
| | for i in range(len(text) - length + 1): |
| | pattern = text[i : i + length] |
| | pattern_count[pattern] += 1 |
| | for pattern, count in pattern_count.items(): |
| | if count > tolerance: |
| | return True |
| | return False |