| | from __future__ import annotations |
| |
|
| | import os |
| | import random |
| | from collections import defaultdict |
| | from importlib.resources import files |
| |
|
| | import jieba |
| | import torch |
| | from pypinyin import Style, lazy_pinyin |
| | from torch.nn.utils.rnn import pad_sequence |
| |
|
| |
|
| | |
| |
|
| |
|
| | def seed_everything(seed=0): |
| | random.seed(seed) |
| | os.environ["PYTHONHASHSEED"] = str(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| | torch.backends.cudnn.deterministic = True |
| | torch.backends.cudnn.benchmark = False |
| |
|
| |
|
| | |
| |
|
| |
|
| | def exists(v): |
| | return v is not None |
| |
|
| |
|
| | def default(v, d): |
| | return v if exists(v) else d |
| |
|
| |
|
| | |
| |
|
| |
|
| | def lens_to_mask(t: int["b"], length: int | None = None) -> bool["b n"]: |
| | if not exists(length): |
| | length = t.amax() |
| |
|
| | seq = torch.arange(length, device=t.device) |
| | return seq[None, :] < t[:, None] |
| |
|
| |
|
| | def mask_from_start_end_indices(seq_len: int["b"], start: int["b"], end: int["b"]): |
| | max_seq_len = seq_len.max().item() |
| | seq = torch.arange(max_seq_len, device=start.device).long() |
| | start_mask = seq[None, :] >= start[:, None] |
| | end_mask = seq[None, :] < end[:, None] |
| | return start_mask & end_mask |
| |
|
| |
|
| | def mask_from_frac_lengths(seq_len: int["b"], frac_lengths: float["b"]): |
| | lengths = (frac_lengths * seq_len).long() |
| | max_start = seq_len - lengths |
| |
|
| | rand = torch.rand_like(frac_lengths) |
| | start = (max_start * rand).long().clamp(min=0) |
| | end = start + lengths |
| |
|
| | return mask_from_start_end_indices(seq_len, start, end) |
| |
|
| |
|
| | def maybe_masked_mean(t: float["b n d"], mask: bool["b n"] = None) -> float["b d"]: |
| | if not exists(mask): |
| | return t.mean(dim=1) |
| |
|
| | t = torch.where(mask[:, :, None], t, torch.tensor(0.0, device=t.device)) |
| | num = t.sum(dim=1) |
| | den = mask.float().sum(dim=1) |
| |
|
| | return num / den.clamp(min=1.0) |
| |
|
| |
|
| | |
| | def list_str_to_tensor(text: list[str], padding_value=-1) -> int["b nt"]: |
| | list_tensors = [torch.tensor([*bytes(t, "UTF-8")]) for t in text] |
| | text = pad_sequence(list_tensors, padding_value=padding_value, batch_first=True) |
| | return text |
| |
|
| |
|
| | |
| | def list_str_to_idx( |
| | text: list[str] | list[list[str]], |
| | vocab_char_map: dict[str, int], |
| | padding_value=-1, |
| | ) -> int["b nt"]: |
| | list_idx_tensors = [torch.tensor([vocab_char_map.get(c, 0) for c in t]) for t in text] |
| | text = pad_sequence(list_idx_tensors, padding_value=padding_value, batch_first=True) |
| | return text |
| |
|
| |
|
| | |
| |
|
| |
|
| | def get_tokenizer(dataset_name, tokenizer: str = "pinyin"): |
| | """ |
| | tokenizer - "pinyin" do g2p for only chinese characters, need .txt vocab_file |
| | - "char" for char-wise tokenizer, need .txt vocab_file |
| | - "byte" for utf-8 tokenizer |
| | - "custom" if you're directly passing in a path to the vocab.txt you want to use |
| | vocab_size - if use "pinyin", all available pinyin types, common alphabets (also those with accent) and symbols |
| | - if use "char", derived from unfiltered character & symbol counts of custom dataset |
| | - if use "byte", set to 256 (unicode byte range) |
| | """ |
| | if tokenizer in ["pinyin", "char"]: |
| | tokenizer_path = os.path.join(files("f5_tts").joinpath("../../data"), f"{dataset_name}_{tokenizer}/vocab.txt") |
| | with open(tokenizer_path, "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| | assert vocab_char_map[" "] == 0, "make sure space is of idx 0 in vocab.txt, cuz 0 is used for unknown char" |
| |
|
| | elif tokenizer == "byte": |
| | vocab_char_map = None |
| | vocab_size = 256 |
| |
|
| | elif tokenizer == "custom": |
| | with open(dataset_name, "r", encoding="utf-8") as f: |
| | vocab_char_map = {} |
| | for i, char in enumerate(f): |
| | vocab_char_map[char[:-1]] = i |
| | vocab_size = len(vocab_char_map) |
| |
|
| | return vocab_char_map, vocab_size |
| |
|
| |
|
| | |
| |
|
| |
|
| | def convert_char_to_pinyin(text_list, polyphone=True): |
| | if jieba.dt.initialized is False: |
| | jieba.default_logger.setLevel(50) |
| | jieba.initialize() |
| |
|
| | final_text_list = [] |
| | custom_trans = str.maketrans( |
| | {";": ",", "“": '"', "”": '"', "‘": "'", "’": "'"} |
| | ) |
| |
|
| | def is_chinese(c): |
| | return ( |
| | "\u3100" <= c <= "\u9fff" |
| | ) |
| |
|
| | for text in text_list: |
| | char_list = [] |
| | text = text.translate(custom_trans) |
| | for seg in jieba.cut(text): |
| | seg_byte_len = len(bytes(seg, "UTF-8")) |
| | if seg_byte_len == len(seg): |
| | if char_list and seg_byte_len > 1 and char_list[-1] not in " :'\"": |
| | char_list.append(" ") |
| | char_list.extend(seg) |
| | elif polyphone and seg_byte_len == 3 * len(seg): |
| | seg_ = lazy_pinyin(seg, style=Style.TONE3, tone_sandhi=True) |
| | for i, c in enumerate(seg): |
| | if is_chinese(c): |
| | char_list.append(" ") |
| | char_list.append(seg_[i]) |
| | else: |
| | for c in seg: |
| | if ord(c) < 256: |
| | char_list.extend(c) |
| | elif is_chinese(c): |
| | char_list.append(" ") |
| | char_list.extend(lazy_pinyin(c, style=Style.TONE3, tone_sandhi=True)) |
| | else: |
| | char_list.append(c) |
| | final_text_list.append(char_list) |
| |
|
| | return final_text_list |
| |
|
| |
|
| | |
| |
|
| |
|
| | def repetition_found(text, length=2, tolerance=10): |
| | pattern_count = defaultdict(int) |
| | for i in range(len(text) - length + 1): |
| | pattern = text[i : i + length] |
| | pattern_count[pattern] += 1 |
| | for pattern, count in pattern_count.items(): |
| | if count > tolerance: |
| | return True |
| | return False |
| |
|
| |
|
| | |
| |
|
| |
|
| | def get_epss_timesteps(n, device, dtype): |
| | dt = 1 / 32 |
| | predefined_timesteps = { |
| | 5: [0, 2, 4, 8, 16, 32], |
| | 6: [0, 2, 4, 6, 8, 16, 32], |
| | 7: [0, 2, 4, 6, 8, 16, 24, 32], |
| | 10: [0, 2, 4, 6, 8, 12, 16, 20, 24, 28, 32], |
| | 12: [0, 2, 4, 6, 8, 10, 12, 14, 16, 20, 24, 28, 32], |
| | 16: [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32], |
| | } |
| | t = predefined_timesteps.get(n, []) |
| | if not t: |
| | return torch.linspace(0, 1, n + 1, device=device, dtype=dtype) |
| | return dt * torch.tensor(t, device=device, dtype=dtype) |
| |
|