liumaolin commited on
Commit
6ea5c75
·
1 Parent(s): 037f445

Sync files.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. AR/__init__.py +0 -0
  2. AR/data/__init__.py +0 -0
  3. AR/data/bucket_sampler.py +163 -0
  4. AR/data/data_module.py +76 -0
  5. AR/data/dataset.py +323 -0
  6. AR/models/__init__.py +0 -0
  7. AR/models/t2s_lightning_module.py +144 -0
  8. AR/models/t2s_lightning_module_onnx.py +107 -0
  9. AR/models/t2s_model.py +902 -0
  10. AR/models/t2s_model_onnx.py +338 -0
  11. AR/models/utils.py +229 -0
  12. AR/modules/__init__.py +0 -0
  13. AR/modules/activation.py +429 -0
  14. AR/modules/activation_onnx.py +178 -0
  15. AR/modules/embedding.py +81 -0
  16. AR/modules/embedding_onnx.py +63 -0
  17. AR/modules/lr_schedulers.py +83 -0
  18. AR/modules/optim.py +622 -0
  19. AR/modules/patched_mha_with_cache.py +465 -0
  20. AR/modules/patched_mha_with_cache_onnx.py +92 -0
  21. AR/modules/scaling.py +335 -0
  22. AR/modules/transformer.py +379 -0
  23. AR/modules/transformer_onnx.py +292 -0
  24. AR/text_processing/__init__.py +0 -0
  25. AR/text_processing/phonemizer.py +79 -0
  26. AR/text_processing/symbols.py +10 -0
  27. AR/utils/__init__.py +37 -0
  28. AR/utils/initialize.py +38 -0
  29. AR/utils/io.py +34 -0
  30. README.md +348 -0
  31. TTS_infer_pack/TTS.py +1047 -0
  32. TTS_infer_pack/TTS_mid.py +1043 -0
  33. TTS_infer_pack/TextPreprocessor.py +247 -0
  34. TTS_infer_pack/__init__.py +1 -0
  35. TTS_infer_pack/text_segmentation_method.py +173 -0
  36. TTS_infer_pack/tts_infer_module.py +150 -0
  37. __init__.py +9 -0
  38. configs/.gitignore +1 -0
  39. configs/s2.json +3 -0
  40. download.py +5 -0
  41. export_torch_script.py +831 -0
  42. feature_extractor/__init__.py +6 -0
  43. feature_extractor/cnhubert.py +108 -0
  44. feature_extractor/whisper_enc.py +25 -0
  45. inference_gui.py +310 -0
  46. inference_webui.py +778 -0
  47. inference_webui_fast.py +336 -0
  48. module/__init__.py +0 -0
  49. module/attentions.py +709 -0
  50. module/attentions_onnx.py +406 -0
AR/__init__.py ADDED
File without changes
AR/data/__init__.py ADDED
File without changes
AR/data/bucket_sampler.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import math
5
+ import random
6
+ from random import shuffle
7
+ from typing import Iterator
8
+ from typing import Optional
9
+ from typing import TypeVar
10
+
11
+ import torch
12
+ import torch.distributed as dist
13
+ from torch.utils.data import Dataset
14
+ from torch.utils.data import Sampler
15
+
16
+ __all__ = [
17
+ "DistributedBucketSampler",
18
+ ]
19
+
20
+ T_co = TypeVar("T_co", covariant=True)
21
+
22
+
23
+ class DistributedBucketSampler(Sampler[T_co]):
24
+ r"""
25
+ sort the dataset wrt. input length
26
+ divide samples into buckets
27
+ sort within buckets
28
+ divide buckets into batches
29
+ sort batches
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ dataset: Dataset,
35
+ num_replicas: Optional[int] = None,
36
+ rank: Optional[int] = None,
37
+ shuffle: bool = True,
38
+ seed: int = 0,
39
+ drop_last: bool = False,
40
+ batch_size: int = 32,
41
+ ) -> None:
42
+ if num_replicas is None:
43
+ if not dist.is_available():
44
+ raise RuntimeError("Requires distributed package to be available")
45
+ num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1
46
+ if rank is None:
47
+ if not dist.is_available():
48
+ raise RuntimeError("Requires distributed package to be available")
49
+ rank = dist.get_rank() if torch.cuda.is_available() else 0
50
+ if torch.cuda.is_available():
51
+ torch.cuda.set_device(rank)
52
+ if rank >= num_replicas or rank < 0:
53
+ raise ValueError(
54
+ "Invalid rank {}, rank should be in the interval"
55
+ " [0, {}]".format(rank, num_replicas - 1)
56
+ )
57
+ self.dataset = dataset
58
+ self.num_replicas = num_replicas
59
+ self.rank = rank
60
+ self.epoch = 0
61
+ self.drop_last = drop_last
62
+ # If the dataset length is evenly divisible by # of replicas, then there
63
+ # is no need to drop any data, since the dataset will be split equally.
64
+ if (
65
+ self.drop_last and len(self.dataset) % self.num_replicas != 0
66
+ ): # type: ignore[arg-type]
67
+ # Split to nearest available length that is evenly divisible.
68
+ # This is to ensure each rank receives the same amount of data when
69
+ # using this Sampler.
70
+ self.num_samples = math.ceil(
71
+ (len(self.dataset) - self.num_replicas)
72
+ / self.num_replicas # type: ignore[arg-type]
73
+ )
74
+ else:
75
+ self.num_samples = math.ceil(
76
+ len(self.dataset) / self.num_replicas
77
+ ) # type: ignore[arg-type]
78
+ self.total_size = self.num_samples * self.num_replicas
79
+ self.shuffle = shuffle
80
+ self.seed = seed
81
+ self.batch_size = batch_size
82
+ self.id_with_length = self._get_sample_lengths()
83
+ self.id_buckets = self.make_buckets(bucket_width=2.0)
84
+
85
+ def _get_sample_lengths(self):
86
+ id_with_lengths = []
87
+ for i in range(len(self.dataset)):
88
+ id_with_lengths.append((i, self.dataset.get_sample_length(i)))
89
+ id_with_lengths.sort(key=lambda x: x[1])
90
+ return id_with_lengths
91
+
92
+ def make_buckets(self, bucket_width: float = 2.0):
93
+ buckets = []
94
+ cur = []
95
+ max_sec = bucket_width
96
+ for id, sec in self.id_with_length:
97
+ if sec < max_sec:
98
+ cur.append(id)
99
+ else:
100
+ buckets.append(cur)
101
+ cur = [id]
102
+ max_sec += bucket_width
103
+ if len(cur) > 0:
104
+ buckets.append(cur)
105
+ return buckets
106
+
107
+ def __iter__(self) -> Iterator[T_co]:
108
+ if self.shuffle:
109
+ # deterministically shuffle based on epoch and seed
110
+ g = torch.Generator()
111
+ g.manual_seed(self.seed + self.epoch)
112
+ random.seed(self.epoch + self.seed)
113
+ shuffled_bucket = []
114
+ for buc in self.id_buckets:
115
+ buc_copy = buc.copy()
116
+ shuffle(buc_copy)
117
+ shuffled_bucket.append(buc_copy)
118
+ grouped_batch_size = self.batch_size * self.num_replicas
119
+ shuffled_bucket = list(itertools.chain(*shuffled_bucket))
120
+ n_batch = int(math.ceil(len(shuffled_bucket) / grouped_batch_size))
121
+ batches = [
122
+ shuffled_bucket[b * grouped_batch_size : (b + 1) * grouped_batch_size]
123
+ for b in range(n_batch)
124
+ ]
125
+ shuffle(batches)
126
+ indices = list(itertools.chain(*batches))
127
+ else:
128
+ # type: ignore[arg-type]
129
+ indices = list(range(len(self.dataset)))
130
+
131
+ if not self.drop_last:
132
+ # add extra samples to make it evenly divisible
133
+ padding_size = self.total_size - len(indices)
134
+ if padding_size <= len(indices):
135
+ indices += indices[:padding_size]
136
+ else:
137
+ indices += (indices * math.ceil(padding_size / len(indices)))[
138
+ :padding_size
139
+ ]
140
+ else:
141
+ # remove tail of data to make it evenly divisible.
142
+ indices = indices[: self.total_size]
143
+ assert len(indices) == self.total_size
144
+
145
+ # subsample
146
+ indices = indices[self.rank : self.total_size : self.num_replicas]
147
+ assert len(indices) == self.num_samples
148
+
149
+ return iter(indices)
150
+
151
+ def __len__(self) -> int:
152
+ return self.num_samples
153
+
154
+ def set_epoch(self, epoch: int) -> None:
155
+ r"""
156
+ Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
157
+ use a different random ordering for each epoch. Otherwise, the next iteration of this
158
+ sampler will yield the same ordering.
159
+
160
+ Args:
161
+ epoch (int): Epoch number.
162
+ """
163
+ self.epoch = epoch
AR/data/data_module.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ from pytorch_lightning import LightningDataModule
4
+ from moyoyo_tts.AR.data.bucket_sampler import DistributedBucketSampler
5
+ from moyoyo_tts.AR.data.dataset import Text2SemanticDataset
6
+ from torch.utils.data import DataLoader
7
+
8
+
9
+ class Text2SemanticDataModule(LightningDataModule):
10
+ def __init__(
11
+ self,
12
+ config,
13
+ train_semantic_path,
14
+ train_phoneme_path,
15
+ dev_semantic_path=None,
16
+ dev_phoneme_path=None,
17
+ ):
18
+ super().__init__()
19
+ self.config = config
20
+ self.train_semantic_path = train_semantic_path
21
+ self.train_phoneme_path = train_phoneme_path
22
+ self.dev_semantic_path = dev_semantic_path
23
+ self.dev_phoneme_path = dev_phoneme_path
24
+ self.num_workers = self.config["data"]["num_workers"]
25
+
26
+ def prepare_data(self):
27
+ pass
28
+
29
+ def setup(self, stage=None, output_logs=False):
30
+ self._train_dataset = Text2SemanticDataset(
31
+ phoneme_path=self.train_phoneme_path,
32
+ semantic_path=self.train_semantic_path,
33
+ max_sec=self.config["data"]["max_sec"],
34
+ pad_val=self.config["data"]["pad_val"],
35
+ )
36
+ self._dev_dataset = self._train_dataset
37
+ # self._dev_dataset = Text2SemanticDataset(
38
+ # phoneme_path=self.dev_phoneme_path,
39
+ # semantic_path=self.dev_semantic_path,
40
+ # max_sample=self.config['data']['max_eval_sample'],
41
+ # max_sec=self.config['data']['max_sec'],
42
+ # pad_val=self.config['data']['pad_val'])
43
+
44
+ def train_dataloader(self):
45
+ batch_size=self.config["train"]["batch_size"]//2 if self.config["train"].get("if_dpo",False)==True else self.config["train"]["batch_size"]
46
+ batch_size = max(min(batch_size,len(self._train_dataset)//4),1)#防止不保存
47
+ sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
48
+ return DataLoader(
49
+ self._train_dataset,
50
+ batch_size=batch_size,
51
+ sampler=sampler,
52
+ collate_fn=self._train_dataset.collate,
53
+ num_workers=self.num_workers,
54
+ persistent_workers=True,
55
+ prefetch_factor=16,
56
+ )
57
+
58
+ def val_dataloader(self):
59
+ return DataLoader(
60
+ self._dev_dataset,
61
+ batch_size=1,
62
+ shuffle=False,
63
+ collate_fn=self._train_dataset.collate,
64
+ num_workers=max(self.num_workers, 12),
65
+ persistent_workers=True,
66
+ prefetch_factor=16,
67
+ )
68
+
69
+ # 这个会使用到嘛?
70
+ def test_dataloader(self):
71
+ return DataLoader(
72
+ self._dev_dataset,
73
+ batch_size=1,
74
+ shuffle=False,
75
+ collate_fn=self._train_dataset.collate,
76
+ )
AR/data/dataset.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import pdb
4
+ import sys
5
+
6
+ # sys.path.append("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert")
7
+ import traceback, os
8
+ from typing import Dict
9
+ from typing import List
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import torch, json
14
+ from torch.utils.data import DataLoader
15
+ from torch.utils.data import Dataset
16
+ from transformers import AutoTokenizer
17
+
18
+ version = os.environ.get('version',None)
19
+
20
+ from moyoyo_tts.text import cleaned_text_to_sequence
21
+
22
+ # from config import exp_dir
23
+
24
+
25
+ def batch_sequences(sequences: List[np.array], axis: int = 0, pad_value: int = 0):
26
+ seq = sequences[0]
27
+ ndim = seq.ndim
28
+ if axis < 0:
29
+ axis += ndim
30
+ dtype = seq.dtype
31
+ pad_value = dtype.type(pad_value)
32
+ seq_lengths = [seq.shape[axis] for seq in sequences]
33
+ max_length = np.max(seq_lengths)
34
+
35
+ padded_sequences = []
36
+ for seq, length in zip(sequences, seq_lengths):
37
+ padding = (
38
+ [(0, 0)] * axis + [(0, max_length - length)] + [(0, 0)] * (ndim - axis - 1)
39
+ )
40
+ padded_seq = np.pad(seq, padding, mode="constant", constant_values=pad_value)
41
+ padded_sequences.append(padded_seq)
42
+ batch = np.stack(padded_sequences)
43
+ return batch
44
+
45
+
46
+ class Text2SemanticDataset(Dataset):
47
+ """dataset class for text tokens to semantic model training."""
48
+
49
+ def __init__(
50
+ self,
51
+ phoneme_path: str,
52
+ semantic_path: str,
53
+ max_sample: int = None,
54
+ max_sec: int = 100,
55
+ pad_val: int = 1024,
56
+ # min value of phoneme/sec
57
+ min_ps_ratio: int = 3,
58
+ # max value of phoneme/sec
59
+ max_ps_ratio: int = 25,
60
+ ) -> None:
61
+ super().__init__()
62
+
63
+ self.semantic_data = pd.read_csv(
64
+ semantic_path, delimiter="\t", encoding="utf-8"
65
+ )
66
+ # get dict
67
+ self.path2 = phoneme_path # "%s/2-name2text.txt"%exp_dir#phoneme_path
68
+ self.path3 = "%s/3-bert" % (
69
+ os.path.dirname(phoneme_path)
70
+ ) # "%s/3-bert"%exp_dir#bert_dir
71
+ self.path6 = semantic_path # "%s/6-name2semantic.tsv"%exp_dir#semantic_path
72
+ assert os.path.exists(self.path2)
73
+ assert os.path.exists(self.path6)
74
+ self.phoneme_data = {}
75
+ with open(self.path2, "r", encoding="utf8") as f:
76
+ lines = f.read().strip("\n").split("\n")
77
+
78
+ for line in lines:
79
+ tmp = line.split("\t")
80
+ if len(tmp) != 4:
81
+ continue
82
+ self.phoneme_data[tmp[0]] = [tmp[1], tmp[2], tmp[3]]
83
+
84
+ # self.phoneme_data = np.load(phoneme_path, allow_pickle=True).item()
85
+ # pad for semantic tokens
86
+ self.PAD: int = pad_val
87
+ # self.hz = 25
88
+ # with open("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert/configs/s2.json", "r") as f:data = f.read()
89
+ # data=json.loads(data)["model"]["semantic_frame_rate"]#50hz
90
+ # self.hz=int(data[:-2])#
91
+ self.hz = int(os.environ.get("hz", "25hz")[:-2])
92
+
93
+ # max seconds of semantic token
94
+ self.max_sec = max_sec
95
+ self.min_ps_ratio = min_ps_ratio
96
+ self.max_ps_ratio = max_ps_ratio
97
+
98
+ if max_sample is not None:
99
+ self.semantic_data = self.semantic_data[:max_sample]
100
+
101
+ # {idx: (semantic, phoneme)}
102
+ # semantic list, phoneme list
103
+ self.semantic_phoneme = []
104
+ self.item_names = []
105
+
106
+ self.inited = False
107
+
108
+ if not self.inited:
109
+ # 调用初始化函数
110
+ self.init_batch()
111
+ self.inited = True
112
+ del self.semantic_data
113
+ del self.phoneme_data
114
+ # self.tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext-large")
115
+ # self.tokenizer = AutoTokenizer.from_pretrained("/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large")
116
+
117
+ def init_batch(self):
118
+ semantic_data_len = len(self.semantic_data)
119
+ phoneme_data_len = len(self.phoneme_data.keys())
120
+ print("semantic_data_len:", semantic_data_len)
121
+ print("phoneme_data_len:", phoneme_data_len)
122
+ print(self.semantic_data)
123
+ idx = 0
124
+ num_not_in = 0
125
+ num_deleted_bigger = 0
126
+ num_deleted_ps = 0
127
+ for i in range(semantic_data_len):
128
+ # 先依次遍历
129
+ # get str
130
+ item_name = self.semantic_data.iloc[i,0]
131
+ # print(self.phoneme_data)
132
+ try:
133
+ phoneme, word2ph, text = self.phoneme_data[item_name]
134
+ except Exception:
135
+ traceback.print_exc()
136
+ # print(f"{item_name} not in self.phoneme_data !")
137
+ num_not_in += 1
138
+ continue
139
+
140
+ semantic_str = self.semantic_data.iloc[i,1]
141
+ # get token list
142
+ semantic_ids = [int(idx) for idx in semantic_str.split(" ")]
143
+ # (T), 是否需要��成 (1, T) -> 不需要,因为需要求 len
144
+ # 过滤掉太长的样本
145
+ if (
146
+ len(semantic_ids) > self.max_sec * self.hz
147
+ ): #########1###根据token个数推测总时长过滤时长60s(config里)#40*25=1k
148
+ num_deleted_bigger += 1
149
+ continue
150
+ # (T, ), 这个速度不会很慢,所以可以在一开始就处理,无需在 __getitem__ 里面单个处理####
151
+ phoneme = phoneme.split(" ")
152
+
153
+ try:
154
+ phoneme_ids = cleaned_text_to_sequence(phoneme, version)
155
+ except:
156
+ traceback.print_exc()
157
+ # print(f"{item_name} not in self.phoneme_data !")
158
+ num_not_in += 1
159
+ continue
160
+ # if len(phoneme_ids) >400:###########2:改为恒定限制为semantic/2.5就行
161
+ if (
162
+ len(phoneme_ids) > self.max_sec * self.hz / 2.5
163
+ ): ###########2:改为恒定限制为semantic/2.5就行
164
+ num_deleted_ps += 1
165
+ continue
166
+ # if len(semantic_ids) > 1000:###########3
167
+ # num_deleted_bigger += 1
168
+ # continue
169
+
170
+ ps_ratio = len(phoneme_ids) / (len(semantic_ids) / self.hz)
171
+
172
+ if (
173
+ ps_ratio > self.max_ps_ratio or ps_ratio < self.min_ps_ratio
174
+ ): ##########4#3~25#每秒多少个phone
175
+ num_deleted_ps += 1
176
+ # print(item_name)
177
+ continue
178
+
179
+ self.semantic_phoneme.append((semantic_ids, phoneme_ids))
180
+ idx += 1
181
+ self.item_names.append(item_name)
182
+
183
+ min_num = 100 # 20直接不补#30补了也不存ckpt
184
+ leng = len(self.semantic_phoneme)
185
+ if leng < min_num:
186
+ tmp1 = self.semantic_phoneme
187
+ tmp2 = self.item_names
188
+ self.semantic_phoneme = []
189
+ self.item_names = []
190
+ for _ in range(max(2, int(min_num / leng))):
191
+ self.semantic_phoneme += tmp1
192
+ self.item_names += tmp2
193
+ if num_not_in > 0:
194
+ print(f"there are {num_not_in} semantic datas not in phoneme datas")
195
+ if num_deleted_bigger > 0:
196
+ print(
197
+ f"deleted {num_deleted_bigger} audios who's duration are bigger than {self.max_sec} seconds"
198
+ )
199
+ if num_deleted_ps > 0:
200
+ # 4702 for LibriTTS, LirbriTTS 是标注数据, 是否需要筛?=> 需要,有值为 100 的极端值
201
+ print(
202
+ f"deleted {num_deleted_ps} audios who's phoneme/sec are bigger than {self.max_ps_ratio} or smaller than {self.min_ps_ratio}"
203
+ )
204
+ """
205
+ there are 31 semantic datas not in phoneme datas
206
+ deleted 34 audios who's duration are bigger than 54 seconds
207
+ deleted 3190 audios who's phoneme/sec are bigger than 25 or smaller than 3
208
+ dataset.__len__(): 366463
209
+
210
+ """
211
+ # 345410 for LibriTTS
212
+ print("dataset.__len__():", self.__len__())
213
+
214
+ def __get_item_names__(self) -> List[str]:
215
+ return self.item_names
216
+
217
+ def __len__(self) -> int:
218
+ return len(self.semantic_phoneme)
219
+
220
+ def __getitem__(self, idx: int) -> Dict:
221
+ semantic_ids, phoneme_ids = self.semantic_phoneme[idx]
222
+ item_name = self.item_names[idx]
223
+ phoneme_ids_len = len(phoneme_ids)
224
+ # semantic tokens target
225
+ semantic_ids_len = len(semantic_ids)
226
+
227
+ flag = 0
228
+ path_bert = "%s/%s.pt" % (self.path3, item_name)
229
+ if os.path.exists(path_bert) == True:
230
+ bert_feature = torch.load(path_bert, map_location="cpu")
231
+ else:
232
+ flag = 1
233
+ if flag == 1:
234
+ # bert_feature=torch.zeros_like(phoneme_ids,dtype=torch.float32)
235
+ bert_feature = None
236
+ else:
237
+ assert bert_feature.shape[-1] == len(phoneme_ids)
238
+ return {
239
+ "idx": idx,
240
+ "phoneme_ids": phoneme_ids,
241
+ "phoneme_ids_len": phoneme_ids_len,
242
+ "semantic_ids": semantic_ids,
243
+ "semantic_ids_len": semantic_ids_len,
244
+ "bert_feature": bert_feature,
245
+ }
246
+
247
+ def get_sample_length(self, idx: int):
248
+ semantic_ids = self.semantic_phoneme[idx][0]
249
+ sec = 1.0 * len(semantic_ids) / self.hz
250
+ return sec
251
+
252
+ def collate(self, examples: List[Dict]) -> Dict:
253
+ sample_index: List[int] = []
254
+ phoneme_ids: List[torch.Tensor] = []
255
+ phoneme_ids_lens: List[int] = []
256
+ semantic_ids: List[torch.Tensor] = []
257
+ semantic_ids_lens: List[int] = []
258
+ # return
259
+
260
+ for item in examples:
261
+ sample_index.append(item["idx"])
262
+ phoneme_ids.append(np.array(item["phoneme_ids"], dtype=np.int64))
263
+ semantic_ids.append(np.array(item["semantic_ids"], dtype=np.int64))
264
+ phoneme_ids_lens.append(item["phoneme_ids_len"])
265
+ semantic_ids_lens.append(item["semantic_ids_len"])
266
+
267
+ # pad 0
268
+ phoneme_ids = batch_sequences(phoneme_ids)
269
+ semantic_ids = batch_sequences(semantic_ids, pad_value=self.PAD)
270
+
271
+ # # convert each batch to torch.tensor
272
+ phoneme_ids = torch.tensor(phoneme_ids)
273
+ semantic_ids = torch.tensor(semantic_ids)
274
+ phoneme_ids_lens = torch.tensor(phoneme_ids_lens)
275
+ semantic_ids_lens = torch.tensor(semantic_ids_lens)
276
+ bert_padded = torch.FloatTensor(len(examples), 1024, max(phoneme_ids_lens))
277
+ bert_padded.zero_()
278
+
279
+ for idx, item in enumerate(examples):
280
+ bert = item["bert_feature"]
281
+ if bert != None:
282
+ bert_padded[idx, :, : bert.shape[-1]] = bert
283
+
284
+ return {
285
+ # List[int]
286
+ "ids": sample_index,
287
+ # torch.Tensor (B, max_phoneme_length)
288
+ "phoneme_ids": phoneme_ids,
289
+ # torch.Tensor (B)
290
+ "phoneme_ids_len": phoneme_ids_lens,
291
+ # torch.Tensor (B, max_semantic_ids_length)
292
+ "semantic_ids": semantic_ids,
293
+ # torch.Tensor (B)
294
+ "semantic_ids_len": semantic_ids_lens,
295
+ # torch.Tensor (B, 1024, max_phoneme_length)
296
+ "bert_feature": bert_padded,
297
+ }
298
+
299
+
300
+ if __name__ == "__main__":
301
+ root_dir = "/data/docker/liujing04/gpt-vits/prepare/dump_mix/"
302
+ dataset = Text2SemanticDataset(
303
+ phoneme_path=root_dir + "phoneme_train.npy",
304
+ semantic_path=root_dir + "semantic_train.tsv",
305
+ )
306
+
307
+ batch_size = 12
308
+ dataloader = DataLoader(
309
+ dataset, batch_size=batch_size, collate_fn=dataset.collate, shuffle=False
310
+ )
311
+ for i, batch in enumerate(dataloader):
312
+ if i % 1000 == 0:
313
+ print(i)
314
+ # if i == 0:
315
+ # print('batch["ids"]:', batch["ids"])
316
+ # print('batch["phoneme_ids"]:', batch["phoneme_ids"],
317
+ # batch["phoneme_ids"].shape)
318
+ # print('batch["phoneme_ids_len"]:', batch["phoneme_ids_len"],
319
+ # batch["phoneme_ids_len"].shape)
320
+ # print('batch["semantic_ids"]:', batch["semantic_ids"],
321
+ # batch["semantic_ids"].shape)
322
+ # print('batch["semantic_ids_len"]:', batch["semantic_ids_len"],
323
+ # batch["semantic_ids_len"].shape)
AR/models/__init__.py ADDED
File without changes
AR/models/t2s_lightning_module.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os
4
+ import sys
5
+ from typing import Dict
6
+
7
+ import torch
8
+ from pytorch_lightning import LightningModule
9
+
10
+ now_dir = os.getcwd()
11
+ sys.path.append(now_dir)
12
+
13
+ from moyoyo_tts.AR.models.t2s_model import Text2SemanticDecoder
14
+ from moyoyo_tts.AR.modules.lr_schedulers import WarmupCosineLRSchedule
15
+ from moyoyo_tts.AR.modules.optim import ScaledAdam
16
+
17
+
18
+ class Text2SemanticLightningModule(LightningModule):
19
+ def __init__(self, config, output_dir, is_train=True):
20
+ super().__init__()
21
+ self.config = config
22
+ self.top_k = 3
23
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
24
+ pretrained_s1 = config.get("pretrained_s1")
25
+ if pretrained_s1 and is_train:
26
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
27
+ print(
28
+ self.load_state_dict(
29
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
30
+ )
31
+ )
32
+ if is_train:
33
+ self.automatic_optimization = False
34
+ self.save_hyperparameters()
35
+ self.eval_dir = output_dir / "eval"
36
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
37
+
38
+ def training_step(self, batch: Dict, batch_idx: int):
39
+ opt = self.optimizers()
40
+ scheduler = self.lr_schedulers()
41
+ forward=self.model.forward if self.config["train"].get("if_dpo",False)==True else self.model.forward_old
42
+ loss, acc = forward(
43
+ batch["phoneme_ids"],
44
+ batch["phoneme_ids_len"],
45
+ batch["semantic_ids"],
46
+ batch["semantic_ids_len"],
47
+ batch["bert_feature"],
48
+ )
49
+ self.manual_backward(loss)
50
+ if batch_idx > 0 and batch_idx % 4 == 0:
51
+ opt.step()
52
+ opt.zero_grad()
53
+ scheduler.step()
54
+
55
+ self.log(
56
+ "total_loss",
57
+ loss,
58
+ on_step=True,
59
+ on_epoch=True,
60
+ prog_bar=True,
61
+ sync_dist=True,
62
+ )
63
+ self.log(
64
+ "lr",
65
+ scheduler.get_last_lr()[0],
66
+ on_epoch=True,
67
+ prog_bar=True,
68
+ sync_dist=True,
69
+ )
70
+ self.log(
71
+ f"top_{self.top_k}_acc",
72
+ acc,
73
+ on_step=True,
74
+ on_epoch=True,
75
+ prog_bar=True,
76
+ sync_dist=True,
77
+ )
78
+
79
+ def validation_step(self, batch: Dict, batch_idx: int):
80
+ return
81
+
82
+ # # get loss
83
+ # loss, acc = self.model.forward(
84
+ # batch['phoneme_ids'], batch['phoneme_ids_len'],
85
+ # batch['semantic_ids'], batch['semantic_ids_len'],
86
+ # batch['bert_feature']
87
+ # )
88
+ #
89
+ # self.log(
90
+ # "val_total_loss",
91
+ # loss,
92
+ # on_step=True,
93
+ # on_epoch=True,
94
+ # prog_bar=True,
95
+ # sync_dist=True)
96
+ # self.log(
97
+ # f"val_top_{self.top_k}_acc",
98
+ # acc,
99
+ # on_step=True,
100
+ # on_epoch=True,
101
+ # prog_bar=True,
102
+ # sync_dist=True)
103
+ #
104
+ # # get infer output
105
+ # semantic_len = batch['semantic_ids'].size(1)
106
+ # prompt_len = min(int(semantic_len * 0.5), 150)
107
+ # prompt = batch['semantic_ids'][:, :prompt_len]
108
+ # pred_semantic = self.model.infer(batch['phoneme_ids'],
109
+ # batch['phoneme_ids_len'], prompt,
110
+ # batch['bert_feature']
111
+ # )
112
+ # save_name = f'semantic_toks_{batch_idx}.pt'
113
+ # save_path = os.path.join(self.eval_dir, save_name)
114
+ # torch.save(pred_semantic.detach().cpu(), save_path)
115
+
116
+ def configure_optimizers(self):
117
+ model_parameters = self.model.parameters()
118
+ parameters_names = []
119
+ parameters_names.append(
120
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
121
+ )
122
+ lm_opt = ScaledAdam(
123
+ model_parameters,
124
+ lr=0.01,
125
+ betas=(0.9, 0.95),
126
+ clipping_scale=2.0,
127
+ parameters_names=parameters_names,
128
+ show_dominant_parameters=False,
129
+ clipping_update_period=1000,
130
+ )
131
+
132
+ return {
133
+ "optimizer": lm_opt,
134
+ "lr_scheduler": {
135
+ "scheduler": WarmupCosineLRSchedule(
136
+ lm_opt,
137
+ init_lr=self.config["optimizer"]["lr_init"],
138
+ peak_lr=self.config["optimizer"]["lr"],
139
+ end_lr=self.config["optimizer"]["lr_end"],
140
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
141
+ total_steps=self.config["optimizer"]["decay_steps"],
142
+ )
143
+ },
144
+ }
AR/models/t2s_lightning_module_onnx.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os, sys
4
+
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ from typing import Dict
8
+
9
+ import torch
10
+ from pytorch_lightning import LightningModule
11
+ from moyoyo_tts.AR.models.t2s_model_onnx import Text2SemanticDecoder
12
+ from moyoyo_tts.AR.modules.lr_schedulers import WarmupCosineLRSchedule
13
+ from moyoyo_tts.AR.modules.optim import ScaledAdam
14
+
15
+
16
+ class Text2SemanticLightningModule(LightningModule):
17
+ def __init__(self, config, output_dir, is_train=True):
18
+ super().__init__()
19
+ self.config = config
20
+ self.top_k = 3
21
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
22
+ pretrained_s1 = config.get("pretrained_s1")
23
+ if pretrained_s1 and is_train:
24
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
25
+ print(
26
+ self.load_state_dict(
27
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
28
+ )
29
+ )
30
+ if is_train:
31
+ self.automatic_optimization = False
32
+ self.save_hyperparameters()
33
+ self.eval_dir = output_dir / "eval"
34
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
35
+
36
+ def training_step(self, batch: Dict, batch_idx: int):
37
+ opt = self.optimizers()
38
+ scheduler = self.lr_schedulers()
39
+ loss, acc = self.model.forward(
40
+ batch["phoneme_ids"],
41
+ batch["phoneme_ids_len"],
42
+ batch["semantic_ids"],
43
+ batch["semantic_ids_len"],
44
+ batch["bert_feature"],
45
+ )
46
+ self.manual_backward(loss)
47
+ if batch_idx > 0 and batch_idx % 4 == 0:
48
+ opt.step()
49
+ opt.zero_grad()
50
+ scheduler.step()
51
+
52
+ self.log(
53
+ "total_loss",
54
+ loss,
55
+ on_step=True,
56
+ on_epoch=True,
57
+ prog_bar=True,
58
+ sync_dist=True,
59
+ )
60
+ self.log(
61
+ "lr",
62
+ scheduler.get_last_lr()[0],
63
+ on_epoch=True,
64
+ prog_bar=True,
65
+ sync_dist=True,
66
+ )
67
+ self.log(
68
+ f"top_{self.top_k}_acc",
69
+ acc,
70
+ on_step=True,
71
+ on_epoch=True,
72
+ prog_bar=True,
73
+ sync_dist=True,
74
+ )
75
+
76
+ def validation_step(self, batch: Dict, batch_idx: int):
77
+ return
78
+
79
+ def configure_optimizers(self):
80
+ model_parameters = self.model.parameters()
81
+ parameters_names = []
82
+ parameters_names.append(
83
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
84
+ )
85
+ lm_opt = ScaledAdam(
86
+ model_parameters,
87
+ lr=0.01,
88
+ betas=(0.9, 0.95),
89
+ clipping_scale=2.0,
90
+ parameters_names=parameters_names,
91
+ show_dominant_parameters=False,
92
+ clipping_update_period=1000,
93
+ )
94
+
95
+ return {
96
+ "optimizer": lm_opt,
97
+ "lr_scheduler": {
98
+ "scheduler": WarmupCosineLRSchedule(
99
+ lm_opt,
100
+ init_lr=self.config["optimizer"]["lr_init"],
101
+ peak_lr=self.config["optimizer"]["lr"],
102
+ end_lr=self.config["optimizer"]["lr_end"],
103
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
104
+ total_steps=self.config["optimizer"]["decay_steps"],
105
+ )
106
+ },
107
+ }
AR/models/t2s_model.py ADDED
@@ -0,0 +1,902 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import math
4
+ from typing import List, Optional
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from torchmetrics.classification import MulticlassAccuracy
10
+ from tqdm import tqdm
11
+
12
+ from moyoyo_tts.AR.models.utils import make_pad_mask
13
+ from moyoyo_tts.AR.models.utils import (
14
+ topk_sampling,
15
+ sample,
16
+ dpo_loss,
17
+ make_reject_y,
18
+ get_batch_logps
19
+ )
20
+ from moyoyo_tts.AR.modules.embedding import SinePositionalEmbedding
21
+ from moyoyo_tts.AR.modules.embedding import TokenEmbedding
22
+ from moyoyo_tts.AR.modules.transformer import LayerNorm
23
+ from moyoyo_tts.AR.modules.transformer import TransformerEncoder
24
+ from moyoyo_tts.AR.modules.transformer import TransformerEncoderLayer
25
+
26
+ default_config = {
27
+ "embedding_dim": 512,
28
+ "hidden_dim": 512,
29
+ "num_head": 8,
30
+ "num_layers": 12,
31
+ "num_codebook": 8,
32
+ "p_dropout": 0.0,
33
+ "vocab_size": 1024 + 1,
34
+ "phoneme_vocab_size": 512,
35
+ "EOS": 1024,
36
+ }
37
+
38
+ # @torch.jit.script ## 使用的话首次推理会非常慢,而且推理速度不稳定
39
+ # Efficient implementation equivalent to the following:
40
+ def scaled_dot_product_attention(query:torch.Tensor, key:torch.Tensor, value:torch.Tensor, attn_mask:Optional[torch.Tensor]=None, scale:Optional[torch.Tensor]=None) -> torch.Tensor:
41
+ B, H, L, S =query.size(0), query.size(1), query.size(-2), key.size(-2)
42
+ if scale is None:
43
+ scale_factor = torch.tensor(1 / math.sqrt(query.size(-1)))
44
+ else:
45
+ scale_factor = scale
46
+ attn_bias = torch.zeros(B, H, L, S, dtype=query.dtype, device=query.device)
47
+
48
+ if attn_mask is not None:
49
+ if attn_mask.dtype == torch.bool:
50
+ attn_bias.masked_fill_(attn_mask, float("-inf"))
51
+ else:
52
+ attn_bias += attn_mask
53
+ attn_weight = query @ key.transpose(-2, -1) * scale_factor
54
+ attn_weight += attn_bias
55
+ attn_weight = torch.softmax(attn_weight, dim=-1)
56
+
57
+ if attn_mask is not None:
58
+ if attn_mask.dtype == torch.bool:
59
+ attn_weight.masked_fill_(attn_mask, 0)
60
+ else:
61
+ attn_mask[attn_mask!=float("-inf")] =0
62
+ attn_mask[attn_mask==float("-inf")] =1
63
+ attn_weight.masked_fill_(attn_mask, 0)
64
+
65
+ return attn_weight @ value
66
+
67
+ @torch.jit.script
68
+ class T2SMLP:
69
+ def __init__(self, w1, b1, w2, b2):
70
+ self.w1 = w1
71
+ self.b1 = b1
72
+ self.w2 = w2
73
+ self.b2 = b2
74
+
75
+ def forward(self, x):
76
+ x = F.relu(F.linear(x, self.w1, self.b1))
77
+ x = F.linear(x, self.w2, self.b2)
78
+ return x
79
+
80
+
81
+ @torch.jit.script
82
+ class T2SBlock:
83
+ def __init__(
84
+ self,
85
+ num_heads,
86
+ hidden_dim: int,
87
+ mlp: T2SMLP,
88
+ qkv_w,
89
+ qkv_b,
90
+ out_w,
91
+ out_b,
92
+ norm_w1,
93
+ norm_b1,
94
+ norm_eps1,
95
+ norm_w2,
96
+ norm_b2,
97
+ norm_eps2,
98
+ ):
99
+ self.num_heads = num_heads
100
+ self.mlp = mlp
101
+ self.hidden_dim: int = hidden_dim
102
+ self.qkv_w = qkv_w
103
+ self.qkv_b = qkv_b
104
+ self.out_w = out_w
105
+ self.out_b = out_b
106
+ self.norm_w1 = norm_w1
107
+ self.norm_b1 = norm_b1
108
+ self.norm_eps1 = norm_eps1
109
+ self.norm_w2 = norm_w2
110
+ self.norm_b2 = norm_b2
111
+ self.norm_eps2 = norm_eps2
112
+
113
+ self.false = torch.tensor(False, dtype=torch.bool)
114
+
115
+ @torch.jit.ignore
116
+ def to_mask(self, x:torch.Tensor, padding_mask:Optional[torch.Tensor]):
117
+ if padding_mask is None:
118
+ return x
119
+
120
+ if padding_mask.dtype == torch.bool:
121
+ return x.masked_fill(padding_mask, 0)
122
+ else:
123
+ return x * padding_mask
124
+
125
+ def process_prompt(self, x:torch.Tensor, attn_mask : torch.Tensor, padding_mask:Optional[torch.Tensor]=None, torch_sdpa:bool=True):
126
+
127
+
128
+ q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1)
129
+
130
+ batch_size = q.shape[0]
131
+ q_len = q.shape[1]
132
+ kv_len = k.shape[1]
133
+
134
+ q = self.to_mask(q, padding_mask)
135
+ k_cache = self.to_mask(k, padding_mask)
136
+ v_cache = self.to_mask(v, padding_mask)
137
+
138
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
139
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
140
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
141
+
142
+ if torch_sdpa:
143
+ attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask)
144
+ else:
145
+ attn = scaled_dot_product_attention(q, k, v, attn_mask)
146
+
147
+ attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
148
+ attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
149
+ attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b)
150
+
151
+ if padding_mask is not None:
152
+ for i in range(batch_size):
153
+ # mask = padding_mask[i,:,0]
154
+ if self.false.device!= padding_mask.device:
155
+ self.false = self.false.to(padding_mask.device)
156
+ idx = torch.where(padding_mask[i,:,0]==self.false)[0]
157
+ x_item = x[i,idx,:].unsqueeze(0)
158
+ attn_item = attn[i,idx,:].unsqueeze(0)
159
+ x_item = x_item + attn_item
160
+ x_item = F.layer_norm(
161
+ x_item, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
162
+ )
163
+ x_item = x_item + self.mlp.forward(x_item)
164
+ x_item = F.layer_norm(
165
+ x_item,
166
+ [self.hidden_dim],
167
+ self.norm_w2,
168
+ self.norm_b2,
169
+ self.norm_eps2,
170
+ )
171
+ x[i,idx,:] = x_item.squeeze(0)
172
+ x = self.to_mask(x, padding_mask)
173
+ else:
174
+ x = x + attn
175
+ x = F.layer_norm(
176
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
177
+ )
178
+ x = x + self.mlp.forward(x)
179
+ x = F.layer_norm(
180
+ x,
181
+ [self.hidden_dim],
182
+ self.norm_w2,
183
+ self.norm_b2,
184
+ self.norm_eps2,
185
+ )
186
+ return x, k_cache, v_cache
187
+
188
+ def decode_next_token(self, x:torch.Tensor, k_cache:torch.Tensor, v_cache:torch.Tensor, attn_mask:Optional[torch.Tensor]=None, torch_sdpa:bool=True):
189
+ q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1)
190
+
191
+ k_cache = torch.cat([k_cache, k], dim=1)
192
+ v_cache = torch.cat([v_cache, v], dim=1)
193
+
194
+ batch_size = q.shape[0]
195
+ q_len = q.shape[1]
196
+ kv_len = k_cache.shape[1]
197
+
198
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
199
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
200
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
201
+
202
+
203
+ if torch_sdpa:
204
+ attn = F.scaled_dot_product_attention(q, k, v)
205
+ else:
206
+ attn = scaled_dot_product_attention(q, k, v, attn_mask)
207
+
208
+ attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
209
+ attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
210
+ attn = F.linear(attn, self.out_w, self.out_b)
211
+
212
+ x = x + attn
213
+ x = F.layer_norm(
214
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
215
+ )
216
+ x = x + self.mlp.forward(x)
217
+ x = F.layer_norm(
218
+ x,
219
+ [self.hidden_dim],
220
+ self.norm_w2,
221
+ self.norm_b2,
222
+ self.norm_eps2,
223
+ )
224
+ return x, k_cache, v_cache
225
+
226
+
227
+ @torch.jit.script
228
+ class T2STransformer:
229
+ def __init__(self, num_blocks : int, blocks: List[T2SBlock]):
230
+ self.num_blocks : int = num_blocks
231
+ self.blocks = blocks
232
+
233
+ def process_prompt(
234
+ self, x:torch.Tensor, attn_mask : torch.Tensor,
235
+ padding_mask : Optional[torch.Tensor]=None,
236
+ torch_sdpa:bool=True
237
+ ):
238
+ k_cache : List[torch.Tensor] = []
239
+ v_cache : List[torch.Tensor] = []
240
+ for i in range(self.num_blocks):
241
+ x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask, torch_sdpa)
242
+ k_cache.append(k_cache_)
243
+ v_cache.append(v_cache_)
244
+ return x, k_cache, v_cache
245
+
246
+ def decode_next_token(
247
+ self, x:torch.Tensor,
248
+ k_cache: List[torch.Tensor],
249
+ v_cache: List[torch.Tensor],
250
+ attn_mask : Optional[torch.Tensor]=None,
251
+ torch_sdpa:bool=True
252
+ ):
253
+ for i in range(self.num_blocks):
254
+ x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(x, k_cache[i], v_cache[i], attn_mask, torch_sdpa)
255
+ return x, k_cache, v_cache
256
+
257
+
258
+ class Text2SemanticDecoder(nn.Module):
259
+ def __init__(self, config, norm_first=False, top_k=3):
260
+ super(Text2SemanticDecoder, self).__init__()
261
+ self.model_dim = config["model"]["hidden_dim"]
262
+ self.embedding_dim = config["model"]["embedding_dim"]
263
+ self.num_head = config["model"]["head"]
264
+ self.num_layers = config["model"]["n_layer"]
265
+ self.norm_first = norm_first
266
+ self.vocab_size = config["model"]["vocab_size"]
267
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
268
+ self.p_dropout = config["model"]["dropout"]
269
+ self.EOS = config["model"]["EOS"]
270
+ self.norm_first = norm_first
271
+ assert self.EOS == self.vocab_size - 1
272
+ # should be same as num of kmeans bin
273
+ # assert self.EOS == 1024
274
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
275
+ self.ar_text_embedding = TokenEmbedding(
276
+ self.embedding_dim, self.phoneme_vocab_size, self.p_dropout
277
+ )
278
+ self.ar_text_position = SinePositionalEmbedding(
279
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
280
+ )
281
+ self.ar_audio_embedding = TokenEmbedding(
282
+ self.embedding_dim, self.vocab_size, self.p_dropout
283
+ )
284
+ self.ar_audio_position = SinePositionalEmbedding(
285
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
286
+ )
287
+
288
+ self.h = TransformerEncoder(
289
+ TransformerEncoderLayer(
290
+ d_model=self.model_dim,
291
+ nhead=self.num_head,
292
+ dim_feedforward=self.model_dim * 4,
293
+ dropout=0.1,
294
+ batch_first=True,
295
+ norm_first=norm_first,
296
+ ),
297
+ num_layers=self.num_layers,
298
+ norm=LayerNorm(self.model_dim) if norm_first else None,
299
+ )
300
+
301
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
302
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
303
+
304
+ self.ar_accuracy_metric = MulticlassAccuracy(
305
+ self.vocab_size,
306
+ top_k=top_k,
307
+ average="micro",
308
+ multidim_average="global",
309
+ ignore_index=self.EOS,
310
+ )
311
+
312
+ blocks = []
313
+
314
+ for i in range(self.num_layers):
315
+ layer = self.h.layers[i]
316
+ t2smlp = T2SMLP(
317
+ layer.linear1.weight,
318
+ layer.linear1.bias,
319
+ layer.linear2.weight,
320
+ layer.linear2.bias
321
+ )
322
+
323
+ block = T2SBlock(
324
+ self.num_head,
325
+ self.model_dim,
326
+ t2smlp,
327
+ layer.self_attn.in_proj_weight,
328
+ layer.self_attn.in_proj_bias,
329
+ layer.self_attn.out_proj.weight,
330
+ layer.self_attn.out_proj.bias,
331
+ layer.norm1.weight,
332
+ layer.norm1.bias,
333
+ layer.norm1.eps,
334
+ layer.norm2.weight,
335
+ layer.norm2.bias,
336
+ layer.norm2.eps
337
+ )
338
+
339
+ blocks.append(block)
340
+
341
+ self.t2s_transformer = T2STransformer(self.num_layers, blocks)
342
+
343
+ def make_input_data(self, x, x_lens, y, y_lens, bert_feature):
344
+ x = self.ar_text_embedding(x)
345
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
346
+ x = self.ar_text_position(x)
347
+ x_mask = make_pad_mask(x_lens)
348
+
349
+ y_mask = make_pad_mask(y_lens)
350
+ y_mask_int = y_mask.type(torch.int64)
351
+ codes = y.type(torch.int64) * (1 - y_mask_int)
352
+
353
+ # Training
354
+ # AR Decoder
355
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
356
+ x_len = x_lens.max()
357
+ y_len = y_lens.max()
358
+ y_emb = self.ar_audio_embedding(y)
359
+ y_pos = self.ar_audio_position(y_emb)
360
+
361
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
362
+
363
+ ar_xy_padding_mask = xy_padding_mask
364
+
365
+ x_attn_mask = F.pad(
366
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
367
+ (0, y_len),
368
+ value=True,
369
+ )
370
+ # x_attn_mask[:, x_len]=False
371
+ y_attn_mask = F.pad(
372
+ torch.triu(
373
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
374
+ diagonal=1,
375
+ ),
376
+ (x_len, 0),
377
+ value=False,
378
+ )
379
+
380
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
381
+ bsz, src_len = x.shape[0], x_len + y_len
382
+ _xy_padding_mask = (
383
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
384
+ .expand(-1, self.num_head, -1, -1)
385
+ .reshape(bsz * self.num_head, 1, src_len)
386
+ )
387
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
388
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
389
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
390
+ xy_attn_mask = new_attn_mask
391
+ # x 和完整的 y 一次性输入模型
392
+ xy_pos = torch.concat([x, y_pos], dim=1)
393
+
394
+ return xy_pos, xy_attn_mask, targets
395
+
396
+ def forward(self, x, x_lens, y, y_lens, bert_feature):
397
+ """
398
+ x: phoneme_ids
399
+ y: semantic_ids
400
+ """
401
+
402
+ reject_y, reject_y_lens = make_reject_y(y, y_lens)
403
+
404
+ xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature)
405
+
406
+ xy_dec, _ = self.h(
407
+ (xy_pos, None),
408
+ mask=xy_attn_mask,
409
+ )
410
+ x_len = x_lens.max()
411
+ logits = self.ar_predict_layer(xy_dec[:, x_len:])
412
+
413
+ ###### DPO #############
414
+ reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data(x, x_lens, reject_y, reject_y_lens, bert_feature)
415
+
416
+ reject_xy_dec, _ = self.h(
417
+ (reject_xy_pos, None),
418
+ mask=reject_xy_attn_mask,
419
+ )
420
+ x_len = x_lens.max()
421
+ reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len:])
422
+
423
+ # loss
424
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
425
+
426
+ loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum")
427
+ acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item()
428
+
429
+ A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets)
430
+ loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True)
431
+
432
+ loss = loss_1 + loss_2
433
+
434
+ return loss, acc
435
+
436
+ def forward_old(self, x, x_lens, y, y_lens, bert_feature):
437
+ """
438
+ x: phoneme_ids
439
+ y: semantic_ids
440
+ """
441
+ x = self.ar_text_embedding(x)
442
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
443
+ x = self.ar_text_position(x)
444
+ x_mask = make_pad_mask(x_lens)
445
+
446
+ y_mask = make_pad_mask(y_lens)
447
+ y_mask_int = y_mask.type(torch.int64)
448
+ codes = y.type(torch.int64) * (1 - y_mask_int)
449
+
450
+ # Training
451
+ # AR Decoder
452
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
453
+ x_len = x_lens.max()
454
+ y_len = y_lens.max()
455
+ y_emb = self.ar_audio_embedding(y)
456
+ y_pos = self.ar_audio_position(y_emb)
457
+
458
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
459
+ ar_xy_padding_mask = xy_padding_mask
460
+
461
+ x_attn_mask = F.pad(
462
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
463
+ (0, y_len),
464
+ value=True,
465
+ )
466
+ y_attn_mask = F.pad(
467
+ torch.triu(
468
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
469
+ diagonal=1,
470
+ ),
471
+ (x_len, 0),
472
+ value=False,
473
+ )
474
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
475
+ bsz, src_len = x.shape[0], x_len + y_len
476
+ _xy_padding_mask = (
477
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
478
+ .expand(-1, self.num_head, -1, -1)
479
+ .reshape(bsz * self.num_head, 1, src_len)
480
+ )
481
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
482
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
483
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
484
+ xy_attn_mask = new_attn_mask
485
+ # x 和完整的 y 一次性输入模型
486
+ xy_pos = torch.concat([x, y_pos], dim=1)
487
+ xy_dec, _ = self.h(
488
+ (xy_pos, None),
489
+ mask=xy_attn_mask,
490
+ )
491
+ logits = self.ar_predict_layer(xy_dec[:, x_len:]).permute(0, 2, 1)
492
+ # loss
493
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
494
+ loss = F.cross_entropy(logits, targets, reduction="sum")
495
+ acc = self.ar_accuracy_metric(logits.detach(), targets).item()
496
+ return loss, acc
497
+
498
+ # 需要看下这个函数和 forward 的区别以及没有 semantic 的时候 prompts 输入什么
499
+ def infer(
500
+ self,
501
+ x,
502
+ x_lens,
503
+ prompts,
504
+ bert_feature,
505
+ top_k: int = -100,
506
+ early_stop_num: int = -1,
507
+ temperature: float = 1.0,
508
+ ):
509
+ x = self.ar_text_embedding(x)
510
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
511
+ x = self.ar_text_position(x)
512
+
513
+ # AR Decoder
514
+ y = prompts
515
+ prefix_len = y.shape[1]
516
+ x_len = x.shape[1]
517
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
518
+ stop = False
519
+ for _ in tqdm(range(1500)):
520
+ y_emb = self.ar_audio_embedding(y)
521
+ y_pos = self.ar_audio_position(y_emb)
522
+ # x 和逐渐增长的 y 一起输入给模型
523
+ xy_pos = torch.concat([x, y_pos], dim=1)
524
+ y_len = y.shape[1]
525
+ x_attn_mask_pad = F.pad(
526
+ x_attn_mask,
527
+ (0, y_len),
528
+ value=True,
529
+ )
530
+ y_attn_mask = F.pad(
531
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
532
+ (x_len, 0),
533
+ value=False,
534
+ )
535
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
536
+ y.device
537
+ )
538
+
539
+ xy_dec, _ = self.h(
540
+ (xy_pos, None),
541
+ mask=xy_attn_mask,
542
+ )
543
+ logits = self.ar_predict_layer(xy_dec[:, -1])
544
+ samples = topk_sampling(
545
+ logits, top_k=top_k, top_p=1.0, temperature=temperature
546
+ )
547
+
548
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
549
+ print("use early stop num:", early_stop_num)
550
+ stop = True
551
+
552
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
553
+ # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
554
+ stop = True
555
+ if stop:
556
+ if prompts.shape[1] == y.shape[1]:
557
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
558
+ print("bad zero prediction")
559
+ #print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
560
+ break
561
+ # 本次生成的 semantic_ids 和之前的 y 构成新的 y
562
+ # print(samples.shape)#[1,1]#第一个1是bs
563
+ # import os
564
+ # os._exit(2333)
565
+ y = torch.concat([y, samples], dim=1)
566
+ return y
567
+
568
+ def pad_y_eos(self, y, y_mask_int, eos_id):
569
+ targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(
570
+ y_mask_int, (0, 1), value=1
571
+ )
572
+ # 错位
573
+ return targets[:, :-1], targets[:, 1:]
574
+
575
+ def infer_panel_batch_infer(
576
+ self,
577
+ x:List[torch.LongTensor], #####全部文本token
578
+ x_lens:torch.LongTensor,
579
+ prompts:torch.LongTensor, ####参考音频token
580
+ bert_feature:List[torch.LongTensor],
581
+ top_k: int = -100,
582
+ top_p: int = 100,
583
+ early_stop_num: int = -1,
584
+ temperature: float = 1.0,
585
+ repetition_penalty: float = 1.35,
586
+ **kwargs,
587
+ ):
588
+ if prompts is None:
589
+ print("Warning: Prompt free is not supported batch_infer! switch to naive_infer")
590
+ return self.infer_panel_naive_batched(x, x_lens, prompts, bert_feature, top_k=top_k, top_p=top_p, early_stop_num=early_stop_num, temperature=temperature, **kwargs)
591
+
592
+
593
+ max_len = kwargs.get("max_len",x_lens.max())
594
+ x_list = []
595
+ for x_item, bert_item in zip(x, bert_feature):
596
+ # max_len = max(max_len, x_item.shape[0], bert_item.shape[1])
597
+ x_item = self.ar_text_embedding(x_item.unsqueeze(0))
598
+ x_item = x_item + self.bert_proj(bert_item.transpose(0, 1).unsqueeze(0))
599
+ x_item = self.ar_text_position(x_item).squeeze(0)
600
+ x_item = F.pad(x_item,(0,0,0,max_len-x_item.shape[0]),value=0) if x_item.shape[0]<max_len else x_item
601
+ x_list.append(x_item)
602
+ x = torch.stack(x_list, dim=0)
603
+
604
+
605
+ # AR Decoder
606
+ y = prompts
607
+
608
+ x_len = x.shape[1]
609
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
610
+ stop = False
611
+
612
+ k_cache = None
613
+ v_cache = None
614
+ ################### first step ##########################
615
+ if y is not None:
616
+ y_emb = self.ar_audio_embedding(y)
617
+ y_len = y_emb.shape[1]
618
+ prefix_len = y.shape[1]
619
+ y_lens = torch.LongTensor([y_emb.shape[1]]*y_emb.shape[0]).to(x.device)
620
+ y_pos = self.ar_audio_position(y_emb)
621
+ xy_pos = torch.concat([x, y_pos], dim=1)
622
+ ref_free = False
623
+ else:
624
+ y_emb = None
625
+ y_len = 0
626
+ prefix_len = 0
627
+ y_lens = torch.LongTensor([y_len]*x.shape[0]).to(x.device)
628
+ y_pos = None
629
+ xy_pos = x
630
+ y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
631
+ ref_free = True
632
+
633
+
634
+ ##### create mask #####
635
+ bsz = x.shape[0]
636
+ src_len = x_len + y_len
637
+ y_paddind_mask = make_pad_mask(y_lens, y_len)
638
+ x_paddind_mask = make_pad_mask(x_lens, max_len)
639
+
640
+ # (bsz, x_len + y_len)
641
+ xy_padding_mask = torch.concat([x_paddind_mask, y_paddind_mask], dim=1)
642
+
643
+ x_mask = F.pad(
644
+ x_attn_mask,
645
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
646
+ value=True,
647
+ )
648
+ y_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
649
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
650
+ (x_len, 0),
651
+ value=False,
652
+ )
653
+
654
+ xy_mask = torch.concat([x_mask, y_mask], dim=0).view(1 , src_len, src_len).repeat(bsz, 1, 1).to(x.device)
655
+ _xy_padding_mask = xy_padding_mask.view(bsz, 1, src_len).repeat(1, src_len, 1)
656
+
657
+ for i in range(bsz):
658
+ l = x_lens[i]
659
+ _xy_padding_mask[i,l:max_len,:]=True
660
+
661
+ xy_attn_mask = xy_mask.logical_or(_xy_padding_mask)
662
+ xy_attn_mask = xy_attn_mask.unsqueeze(1).expand(-1, self.num_head, -1, -1)
663
+ xy_attn_mask = xy_attn_mask.bool()
664
+ xy_padding_mask = xy_padding_mask.view(bsz, src_len, 1).expand(-1, -1, self.model_dim)
665
+
666
+ ###### decode #####
667
+ y_list = [None]*y.shape[0]
668
+ batch_idx_map = list(range(y.shape[0]))
669
+ idx_list = [None]*y.shape[0]
670
+ # for idx in tqdm(range(1500)):
671
+ for idx in range(1500):
672
+ if idx == 0:
673
+ xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, xy_padding_mask, False)
674
+ else:
675
+ xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache, xy_attn_mask, False)
676
+ logits = self.ar_predict_layer(
677
+ xy_dec[:, -1]
678
+ )
679
+
680
+ if idx == 0:
681
+ xy_attn_mask = F.pad(xy_attn_mask[:,:,-1].unsqueeze(-2),(0,1),value=False)
682
+ logits = logits[:, :-1]
683
+ else:
684
+ xy_attn_mask = F.pad(xy_attn_mask,(0,1),value=False)
685
+
686
+ samples = sample(
687
+ logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
688
+ )[0]
689
+
690
+ y = torch.concat([y, samples], dim=1)
691
+
692
+ ####### 移除batch中已经生成完毕的序列,进一步优化计算量
693
+ tokens = torch.argmax(logits, dim=-1)
694
+ reserved_idx_of_batch_for_y = None
695
+ if (self.EOS in samples[:, 0]) or \
696
+ (self.EOS in tokens): ###如果生成到EOS,则停止
697
+ l1 = samples[:, 0]==self.EOS
698
+ l2 = tokens==self.EOS
699
+ l = l1.logical_or(l2)
700
+ removed_idx_of_batch_for_y = torch.where(l==True)[0].tolist()
701
+ reserved_idx_of_batch_for_y = torch.where(l==False)[0]
702
+ # batch_indexs = torch.tensor(batch_idx_map, device=y.device)[removed_idx_of_batch_for_y]
703
+ for i in removed_idx_of_batch_for_y:
704
+ batch_index = batch_idx_map[i]
705
+ idx_list[batch_index] = idx - 1
706
+ y_list[batch_index] = y[i, :-1]
707
+
708
+ batch_idx_map = [batch_idx_map[i] for i in reserved_idx_of_batch_for_y.tolist()]
709
+
710
+ # 只保留batch中未生成完毕的序列
711
+ if reserved_idx_of_batch_for_y is not None:
712
+ # index = torch.LongTensor(batch_idx_map).to(y.device)
713
+ y = torch.index_select(y, dim=0, index=reserved_idx_of_batch_for_y)
714
+ xy_attn_mask = torch.index_select(xy_attn_mask, dim=0, index=reserved_idx_of_batch_for_y)
715
+ if k_cache is not None :
716
+ for i in range(len(k_cache)):
717
+ k_cache[i] = torch.index_select(k_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
718
+ v_cache[i] = torch.index_select(v_cache[i], dim=0, index=reserved_idx_of_batch_for_y)
719
+
720
+
721
+ if (early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num) or idx==1499:
722
+ print("use early stop num:", early_stop_num)
723
+ stop = True
724
+ for i, batch_index in enumerate(batch_idx_map):
725
+ batch_index = batch_idx_map[i]
726
+ idx_list[batch_index] = idx
727
+ y_list[batch_index] = y[i, :-1]
728
+
729
+ if not (None in idx_list):
730
+ stop = True
731
+
732
+ if stop:
733
+ if y.shape[1]==0:
734
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
735
+ print("bad zero prediction")
736
+ #print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
737
+ break
738
+
739
+ ####################### update next step ###################################
740
+ y_emb = self.ar_audio_embedding(y[:, -1:])
741
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to( dtype= y_emb.dtype,device=y_emb.device)
742
+
743
+ if (None in idx_list):
744
+ for i in range(x.shape[0]):
745
+ if idx_list[i] is None:
746
+ idx_list[i] = 1500-1 ###如果没有生成到EOS,就用最大长度代替
747
+
748
+ if ref_free:
749
+ return y_list, [0]*x.shape[0]
750
+ # print(idx_list)
751
+ return y_list, idx_list
752
+
753
+ def infer_panel_naive_batched(self,
754
+ x:List[torch.LongTensor], #####全部文本token
755
+ x_lens:torch.LongTensor,
756
+ prompts:torch.LongTensor, ####参考音频token
757
+ bert_feature:List[torch.LongTensor],
758
+ top_k: int = -100,
759
+ top_p: int = 100,
760
+ early_stop_num: int = -1,
761
+ temperature: float = 1.0,
762
+ repetition_penalty: float = 1.35,
763
+ **kwargs
764
+ ):
765
+ y_list = []
766
+ idx_list = []
767
+ for i in range(len(x)):
768
+ y, idx = self.infer_panel_naive(x[i].unsqueeze(0),
769
+ x_lens[i],
770
+ prompts[i].unsqueeze(0) if prompts is not None else None,
771
+ bert_feature[i].unsqueeze(0),
772
+ top_k,
773
+ top_p,
774
+ early_stop_num,
775
+ temperature,
776
+ repetition_penalty,
777
+ **kwargs)
778
+ y_list.append(y[0])
779
+ idx_list.append(idx)
780
+
781
+ return y_list, idx_list
782
+
783
+ def infer_panel_naive(
784
+ self,
785
+ x:torch.LongTensor, #####全部文本token
786
+ x_lens:torch.LongTensor,
787
+ prompts:torch.LongTensor, ####参考音频token
788
+ bert_feature:torch.LongTensor,
789
+ top_k: int = -100,
790
+ top_p: int = 100,
791
+ early_stop_num: int = -1,
792
+ temperature: float = 1.0,
793
+ repetition_penalty: float = 1.35,
794
+ **kwargs
795
+ ):
796
+ x = self.ar_text_embedding(x)
797
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
798
+ x = self.ar_text_position(x)
799
+
800
+ # AR Decoder
801
+ y = prompts
802
+
803
+ x_len = x.shape[1]
804
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
805
+ stop = False
806
+ # print(1111111,self.num_layers)
807
+
808
+ k_cache = None
809
+ v_cache = None
810
+ ################### first step ##########################
811
+ if y is not None:
812
+ y_emb = self.ar_audio_embedding(y)
813
+ y_len = y_emb.shape[1]
814
+ prefix_len = y.shape[1]
815
+ y_pos = self.ar_audio_position(y_emb)
816
+ xy_pos = torch.concat([x, y_pos], dim=1)
817
+ ref_free = False
818
+ else:
819
+ y_emb = None
820
+ y_len = 0
821
+ prefix_len = 0
822
+ y_pos = None
823
+ xy_pos = x
824
+ y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
825
+ ref_free = True
826
+
827
+ bsz = x.shape[0]
828
+ src_len = x_len + y_len
829
+ x_attn_mask_pad = F.pad(
830
+ x_attn_mask,
831
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
832
+ value=True,
833
+ )
834
+ y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
835
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
836
+ (x_len, 0),
837
+ value=False,
838
+ )
839
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)\
840
+ .unsqueeze(0)\
841
+ .expand(bsz*self.num_head, -1, -1)\
842
+ .view(bsz, self.num_head, src_len, src_len)\
843
+ .to(device=x.device, dtype=torch.bool)
844
+
845
+ # for idx in tqdm(range(1500)):
846
+ for idx in range(1500):
847
+ if xy_attn_mask is not None:
848
+ xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None)
849
+ else:
850
+ xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache)
851
+
852
+ logits = self.ar_predict_layer(
853
+ xy_dec[:, -1]
854
+ )
855
+
856
+ if idx == 0:
857
+ xy_attn_mask = None
858
+ if(idx<11):###至少预测出10个token不然不给停止(0.4s)
859
+ logits = logits[:, :-1]
860
+
861
+ samples = sample(
862
+ logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature
863
+ )[0]
864
+
865
+ y = torch.concat([y, samples], dim=1)
866
+
867
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
868
+ print("use early stop num:", early_stop_num)
869
+ stop = True
870
+
871
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
872
+ stop = True
873
+ if stop:
874
+ if y.shape[1] == 0:
875
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
876
+ print("bad zero prediction")
877
+ #print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
878
+ break
879
+
880
+ ####################### update next step ###################################
881
+ y_emb = self.ar_audio_embedding(y[:, -1:])
882
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
883
+
884
+ if ref_free:
885
+ return y[:, :-1], 0
886
+ return y[:, :-1], idx - 1
887
+
888
+
889
+ def infer_panel(
890
+ self,
891
+ x:torch.LongTensor, #####全部文本token
892
+ x_lens:torch.LongTensor,
893
+ prompts:torch.LongTensor, ####参考音频token
894
+ bert_feature:torch.LongTensor,
895
+ top_k: int = -100,
896
+ top_p: int = 100,
897
+ early_stop_num: int = -1,
898
+ temperature: float = 1.0,
899
+ repetition_penalty: float = 1.35,
900
+ **kwargs
901
+ ):
902
+ return self.infer_panel_naive(x, x_lens, prompts, bert_feature, top_k, top_p, early_stop_num, temperature, repetition_penalty, **kwargs)
AR/models/t2s_model_onnx.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+ from moyoyo_tts.AR.modules.embedding_onnx import SinePositionalEmbedding
7
+ from moyoyo_tts.AR.modules.embedding_onnx import TokenEmbedding
8
+ from moyoyo_tts.AR.modules.transformer_onnx import LayerNorm
9
+ from moyoyo_tts.AR.modules.transformer_onnx import TransformerEncoder
10
+ from moyoyo_tts.AR.modules.transformer_onnx import TransformerEncoderLayer
11
+ from torch import nn
12
+ from torch.nn import functional as F
13
+ from torchmetrics.classification import MulticlassAccuracy
14
+
15
+ default_config = {
16
+ "embedding_dim": 512,
17
+ "hidden_dim": 512,
18
+ "num_head": 8,
19
+ "num_layers": 12,
20
+ "num_codebook": 8,
21
+ "p_dropout": 0.0,
22
+ "vocab_size": 1024 + 1,
23
+ "phoneme_vocab_size": 512,
24
+ "EOS": 1024,
25
+ }
26
+
27
+ inf_tensor_value = torch.FloatTensor([-float("Inf")]).float()
28
+
29
+ def logits_to_probs(
30
+ logits,
31
+ previous_tokens = None,
32
+ temperature: float = 1.0,
33
+ top_k = None,
34
+ top_p = None,
35
+ repetition_penalty: float = 1.0,
36
+ ):
37
+ previous_tokens = previous_tokens.squeeze()
38
+ if previous_tokens is not None and repetition_penalty != 1.0:
39
+ previous_tokens = previous_tokens.long()
40
+ score = torch.gather(logits, dim=0, index=previous_tokens)
41
+ score = torch.where(
42
+ score < 0, score * repetition_penalty, score / repetition_penalty
43
+ )
44
+ logits.scatter_(dim=0, index=previous_tokens, src=score)
45
+
46
+ if top_p is not None and top_p < 1.0:
47
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
48
+ cum_probs = torch.cumsum(
49
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
50
+ )
51
+ sorted_indices_to_remove = cum_probs > top_p
52
+ sorted_indices_to_remove[0] = False # keep at least one option
53
+ indices_to_remove = sorted_indices_to_remove.scatter(
54
+ dim=0, index=sorted_indices, src=sorted_indices_to_remove
55
+ )
56
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
57
+
58
+ logits = logits / max(temperature, 1e-5)
59
+
60
+ if top_k is not None:
61
+ v, _ = torch.topk(logits, top_k)
62
+ pivot = v.select(-1, -1).unsqueeze(-1)
63
+ logits = torch.where(logits < pivot, inf_tensor_value, logits)
64
+
65
+ probs = torch.nn.functional.softmax(logits, dim=-1)
66
+ return probs
67
+
68
+
69
+ def multinomial_sample_one_no_sync(
70
+ probs_sort
71
+ ): # Does multinomial sampling without a cuda synchronization
72
+ q = torch.randn_like(probs_sort)
73
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
74
+
75
+
76
+ def sample(
77
+ logits,
78
+ previous_tokens,
79
+ **sampling_kwargs,
80
+ ):
81
+ probs = logits_to_probs(
82
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
83
+ )
84
+ idx_next = multinomial_sample_one_no_sync(probs)
85
+ return idx_next, probs
86
+
87
+
88
+ class OnnxEncoder(nn.Module):
89
+ def __init__(self, ar_text_embedding, bert_proj, ar_text_position):
90
+ super().__init__()
91
+ self.ar_text_embedding = ar_text_embedding
92
+ self.bert_proj = bert_proj
93
+ self.ar_text_position = ar_text_position
94
+
95
+ def forward(self, x, bert_feature):
96
+ x = self.ar_text_embedding(x)
97
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
98
+ return self.ar_text_position(x)
99
+
100
+
101
+ class T2SFirstStageDecoder(nn.Module):
102
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
103
+ top_k, early_stop_num, num_layers):
104
+ super().__init__()
105
+ self.ar_audio_embedding = ar_audio_embedding
106
+ self.ar_audio_position = ar_audio_position
107
+ self.h = h
108
+ self.ar_predict_layer = ar_predict_layer
109
+ self.loss_fct = loss_fct
110
+ self.ar_accuracy_metric = ar_accuracy_metric
111
+ self.top_k = top_k
112
+ self.early_stop_num = early_stop_num
113
+ self.num_layers = num_layers
114
+
115
+ def forward(self, x, prompt):
116
+ y = prompt
117
+ x_example = x[:,:,0] * 0.0
118
+ #N, 1, 512
119
+ cache = {
120
+ "all_stage": self.num_layers,
121
+ "k": None,
122
+ "v": None,
123
+ "y_emb": None,
124
+ "first_infer": 1,
125
+ "stage": 0,
126
+ }
127
+
128
+ y_emb = self.ar_audio_embedding(y)
129
+
130
+ cache["y_emb"] = y_emb
131
+ y_pos = self.ar_audio_position(y_emb)
132
+
133
+ xy_pos = torch.concat([x, y_pos], dim=1)
134
+
135
+ y_example = y_pos[:,:,0] * 0.0
136
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1) , x_example).bool()
137
+ y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64)
138
+ y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum(
139
+ torch.ones_like(y_example.transpose(0, 1), dtype=torch.int64), dim=0
140
+ )
141
+ y_attn_mask = y_attn_mask > 0
142
+
143
+ x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool()
144
+ y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool()
145
+ x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1)
146
+ y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1)
147
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
148
+ cache["k"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
149
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
150
+ cache["v"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
151
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
152
+
153
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
154
+ logits = self.ar_predict_layer(xy_dec[:, -1])
155
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
156
+
157
+ y = torch.concat([y, samples], dim=1)
158
+
159
+ return y, cache["k"], cache["v"], cache["y_emb"], x_example
160
+
161
+
162
+ class T2SStageDecoder(nn.Module):
163
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
164
+ top_k, early_stop_num, num_layers):
165
+ super().__init__()
166
+ self.ar_audio_embedding = ar_audio_embedding
167
+ self.ar_audio_position = ar_audio_position
168
+ self.h = h
169
+ self.ar_predict_layer = ar_predict_layer
170
+ self.loss_fct = loss_fct
171
+ self.ar_accuracy_metric = ar_accuracy_metric
172
+ self.top_k = top_k
173
+ self.early_stop_num = early_stop_num
174
+ self.num_layers = num_layers
175
+
176
+ def forward(self, y, k, v, y_emb, x_example):
177
+ cache = {
178
+ "all_stage": self.num_layers,
179
+ "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)),
180
+ "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)),
181
+ "y_emb": y_emb,
182
+ "first_infer": 0,
183
+ "stage": 0,
184
+ }
185
+
186
+ y_emb = torch.cat(
187
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
188
+ )
189
+ cache["y_emb"] = y_emb
190
+ y_pos = self.ar_audio_position(y_emb)
191
+
192
+ xy_pos = y_pos[:, -1:]
193
+
194
+ y_example = y_pos[:,:,0] * 0.0
195
+
196
+ xy_attn_mask = torch.cat([x_example, y_example], dim=1)
197
+ xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool)
198
+
199
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
200
+ logits = self.ar_predict_layer(xy_dec[:, -1])
201
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
202
+
203
+ y = torch.concat([y, samples], dim=1)
204
+
205
+ return y, cache["k"], cache["v"], cache["y_emb"], logits, samples
206
+
207
+
208
+ class Text2SemanticDecoder(nn.Module):
209
+ def __init__(self, config, norm_first=False, top_k=3):
210
+ super(Text2SemanticDecoder, self).__init__()
211
+ self.model_dim = config["model"]["hidden_dim"]
212
+ self.embedding_dim = config["model"]["embedding_dim"]
213
+ self.num_head = config["model"]["head"]
214
+ self.num_layers = config["model"]["n_layer"]
215
+ self.norm_first = norm_first
216
+ self.vocab_size = config["model"]["vocab_size"]
217
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
218
+ self.p_dropout = float(config["model"]["dropout"])
219
+ self.EOS = config["model"]["EOS"]
220
+ self.norm_first = norm_first
221
+ assert self.EOS == self.vocab_size - 1
222
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
223
+ self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout)
224
+ self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
225
+ self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout)
226
+ self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
227
+ self.h = TransformerEncoder(
228
+ TransformerEncoderLayer(
229
+ d_model=self.model_dim,
230
+ nhead=self.num_head,
231
+ dim_feedforward=self.model_dim * 4,
232
+ dropout=0.1,
233
+ batch_first=True,
234
+ norm_first=norm_first,
235
+ ),
236
+ num_layers=self.num_layers,
237
+ norm=LayerNorm(self.model_dim) if norm_first else None,
238
+ )
239
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
240
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
241
+ self.ar_accuracy_metric = MulticlassAccuracy(
242
+ self.vocab_size,
243
+ top_k=top_k,
244
+ average="micro",
245
+ multidim_average="global",
246
+ ignore_index=self.EOS,
247
+ )
248
+ self.top_k = torch.LongTensor([1])
249
+ self.early_stop_num = torch.LongTensor([-1])
250
+
251
+ def init_onnx(self):
252
+ self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position)
253
+ self.first_stage_decoder = T2SFirstStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
254
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
255
+ self.num_layers)
256
+ self.stage_decoder = T2SStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
257
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
258
+ self.num_layers)
259
+
260
+ def forward(self, x, prompts, bert_feature):
261
+ early_stop_num = self.early_stop_num
262
+ prefix_len = prompts.shape[1]
263
+
264
+ x = self.onnx_encoder(x, bert_feature)
265
+ y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts)
266
+
267
+ stop = False
268
+ for idx in range(1, 1500):
269
+ enco = self.stage_decoder(y, k, v, y_emb, stage, x_example)
270
+ y, k, v, y_emb, stage, logits, samples = enco
271
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
272
+ stop = True
273
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
274
+ stop = True
275
+ if stop:
276
+ break
277
+ y[0, -1] = 0
278
+ return y, idx
279
+
280
+ def infer(self, x, prompts, bert_feature):
281
+ top_k = self.top_k
282
+ early_stop_num = self.early_stop_num
283
+
284
+ x = self.onnx_encoder(x, bert_feature)
285
+
286
+ y = prompts
287
+ prefix_len = y.shape[1]
288
+ x_len = x.shape[1]
289
+ x_example = x[:,:,0] * 0.0
290
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example)
291
+ x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool)
292
+
293
+ stop = False
294
+ cache = {
295
+ "all_stage": self.num_layers,
296
+ "k": [None] * self.num_layers,
297
+ "v": [None] * self.num_layers,
298
+ "y_emb": None,
299
+ "first_infer": 1,
300
+ "stage": 0,
301
+ }
302
+ for idx in range(1500):
303
+ if cache["first_infer"] == 1:
304
+ y_emb = self.ar_audio_embedding(y)
305
+ else:
306
+ y_emb = torch.cat(
307
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
308
+ )
309
+ cache["y_emb"] = y_emb
310
+ y_pos = self.ar_audio_position(y_emb)
311
+ if cache["first_infer"] == 1:
312
+ xy_pos = torch.concat([x, y_pos], dim=1)
313
+ else:
314
+ xy_pos = y_pos[:, -1:]
315
+ y_len = y_pos.shape[1]
316
+ if cache["first_infer"] == 1:
317
+ x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True)
318
+ y_attn_mask = F.pad(
319
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
320
+ (x_len, 0), value=False
321
+ )
322
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
323
+ else:
324
+ xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool)
325
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
326
+ logits = self.ar_predict_layer(xy_dec[:, -1])
327
+ samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
328
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
329
+ stop = True
330
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
331
+ stop = True
332
+ if stop:
333
+ if prompts.shape[1] == y.shape[1]:
334
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
335
+ break
336
+ y = torch.concat([y, samples], dim=1)
337
+ cache["first_infer"] = 0
338
+ return y, idx
AR/models/utils.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from typing import Tuple
6
+
7
+ def sequence_mask(length, max_length=None):
8
+ if max_length is None:
9
+ max_length = length.max()
10
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
11
+ return x.unsqueeze(0) < length.unsqueeze(1)
12
+
13
+
14
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
15
+ """
16
+ Args:
17
+ lengths:
18
+ A 1-D tensor containing sentence lengths.
19
+ max_len:
20
+ The length of masks.
21
+ Returns:
22
+ Return a 2-D bool tensor, where masked positions
23
+ are filled with `True` and non-masked positions are
24
+ filled with `False`.
25
+
26
+ #>>> lengths = torch.tensor([1, 3, 2, 5])
27
+ #>>> make_pad_mask(lengths)
28
+ tensor([[False, True, True, True, True],
29
+ [False, False, False, True, True],
30
+ [False, False, True, True, True],
31
+ [False, False, False, False, False]])
32
+ """
33
+ assert lengths.ndim == 1, lengths.ndim
34
+ max_len = max(max_len, lengths.max())
35
+ n = lengths.size(0)
36
+ seq_range = torch.arange(0, max_len, device=lengths.device)
37
+ expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len)
38
+
39
+ return expaned_lengths >= lengths.unsqueeze(-1)
40
+
41
+
42
+ # https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
43
+ def top_k_top_p_filtering(
44
+ logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1
45
+ ):
46
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
47
+ Args:
48
+ logits: logits distribution shape (batch size, vocabulary size)
49
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
50
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
51
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
52
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
53
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
54
+ """
55
+ if top_k > 0:
56
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
57
+ # Remove all tokens with a probability less than the last token of the top-k
58
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
59
+ logits[indices_to_remove] = filter_value
60
+
61
+ if top_p < 1.0:
62
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
63
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
64
+
65
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
66
+ sorted_indices_to_remove = cumulative_probs > top_p
67
+ if min_tokens_to_keep > 1:
68
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
69
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
70
+ # Shift the indices to the right to keep also the first token above the threshold
71
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
72
+ sorted_indices_to_remove[..., 0] = 0
73
+
74
+ # scatter sorted tensors to original indexing
75
+ indices_to_remove = sorted_indices_to_remove.scatter(
76
+ 1, sorted_indices, sorted_indices_to_remove
77
+ )
78
+ logits[indices_to_remove] = filter_value
79
+ return logits
80
+
81
+
82
+ def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
83
+ # temperature: (`optional`) float
84
+ # The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
85
+ # top_k: (`optional`) int
86
+ # The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
87
+ # top_p: (`optional`) float
88
+ # The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
89
+
90
+ # Temperature (higher temperature => more likely to sample low probability tokens)
91
+ if temperature != 1.0:
92
+ logits = logits / temperature
93
+ # Top-p/top-k filtering
94
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
95
+ # Sample
96
+ token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
97
+ return token
98
+
99
+
100
+ from typing import Optional, Tuple
101
+
102
+
103
+ def multinomial_sample_one_no_sync(
104
+ probs_sort,
105
+ ): # Does multinomial sampling without a cuda synchronization
106
+ q = torch.empty_like(probs_sort).exponential_(1)
107
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
108
+
109
+
110
+ def logits_to_probs(
111
+ logits,
112
+ previous_tokens: Optional[torch.Tensor] = None,
113
+ temperature: float = 1.0,
114
+ top_k: Optional[int] = None,
115
+ top_p: Optional[int] = None,
116
+ repetition_penalty: float = 1.0,
117
+ ):
118
+ # if previous_tokens is not None:
119
+ # previous_tokens = previous_tokens.squeeze()
120
+ # print(logits.shape,previous_tokens.shape)
121
+ # pdb.set_trace()
122
+ if previous_tokens is not None and repetition_penalty != 1.0:
123
+ previous_tokens = previous_tokens.long()
124
+ score = torch.gather(logits, dim=1, index=previous_tokens)
125
+ score = torch.where(
126
+ score < 0, score * repetition_penalty, score / repetition_penalty
127
+ )
128
+ logits.scatter_(dim=1, index=previous_tokens, src=score)
129
+
130
+ if top_p is not None and top_p < 1.0:
131
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
132
+ cum_probs = torch.cumsum(
133
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
134
+ )
135
+ sorted_indices_to_remove = cum_probs > top_p
136
+ sorted_indices_to_remove[:, 0] = False # keep at least one option
137
+ indices_to_remove = sorted_indices_to_remove.scatter(
138
+ dim=1, index=sorted_indices, src=sorted_indices_to_remove
139
+ )
140
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
141
+
142
+ logits = logits / max(temperature, 1e-5)
143
+
144
+ if top_k is not None:
145
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
146
+ pivot = v[: , -1].unsqueeze(-1)
147
+ logits = torch.where(logits < pivot, -float("Inf"), logits)
148
+
149
+ probs = torch.nn.functional.softmax(logits, dim=-1)
150
+ return probs
151
+
152
+
153
+ def sample(
154
+ logits,
155
+ previous_tokens: Optional[torch.Tensor] = None,
156
+ **sampling_kwargs,
157
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
158
+ probs = logits_to_probs(
159
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
160
+ )
161
+ idx_next = multinomial_sample_one_no_sync(probs)
162
+ return idx_next, probs
163
+
164
+ def dpo_loss(policy_chosen_logps: torch.FloatTensor,
165
+ policy_rejected_logps: torch.FloatTensor,
166
+ reference_chosen_logps: torch.FloatTensor,
167
+ reference_rejected_logps: torch.FloatTensor,
168
+ beta: float,
169
+ reference_free: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
170
+ pi_logratios = policy_chosen_logps - policy_rejected_logps
171
+ ref_logratios = reference_chosen_logps - reference_rejected_logps
172
+
173
+ if reference_free:
174
+ ref_logratios = 0
175
+
176
+ logits = pi_logratios - ref_logratios
177
+
178
+ losses = -F.logsigmoid(beta * logits)
179
+ chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
180
+ rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
181
+
182
+ return losses.mean(), chosen_rewards, rejected_rewards
183
+
184
+ def get_batch_logps(logits_target: torch.FloatTensor, logits_reject: torch.FloatTensor, labels_target: torch.LongTensor, labels_reject: torch.LongTensor, average_log_prob: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
185
+
186
+ # dummy token; we'll ignore the losses on these tokens later
187
+
188
+ per_token_logps_target = torch.gather(logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)).squeeze(2)
189
+ per_token_logps_reject = torch.gather(logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)).squeeze(2)
190
+
191
+ return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
192
+
193
+ def make_reject_y(y_o, y_lens):
194
+ def repeat_P(y):
195
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
196
+ pre = y[:range_idx[0]]
197
+ shf = y[range_idx[1]:]
198
+ range_text = y[range_idx[0]:range_idx[1]]
199
+ new_y = torch.cat([pre, range_text, range_text, shf])
200
+ return new_y
201
+ def lost_P(y):
202
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
203
+ pre = y[:range_idx[0]]
204
+ shf = y[range_idx[1]:]
205
+ range_text = y[range_idx[0]:range_idx[1]]
206
+ new_y = torch.cat([pre, shf])
207
+ return new_y
208
+ bs = len(y_lens)
209
+ reject_y = []
210
+ reject_y_lens = []
211
+ for b in range(bs):
212
+ process_item_idx = torch.randint(0, 1, size=(1, ))[0]
213
+ if process_item_idx == 0:
214
+ new_y = repeat_P(y_o[b])
215
+ reject_y.append(new_y)
216
+ reject_y_lens.append(len(new_y))
217
+ elif process_item_idx==1:
218
+ new_y = lost_P(y_o[b])
219
+ reject_y.append(new_y)
220
+ reject_y_lens.append(len(new_y))
221
+ max_length = max(reject_y_lens)
222
+ for b in range(bs):
223
+ pad_length = max_length - reject_y_lens[b]
224
+ reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
225
+
226
+ reject_y = torch.stack(reject_y, dim = 0)
227
+ reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
228
+
229
+ return reject_y, reject_y_lens
AR/modules/__init__.py ADDED
File without changes
AR/modules/activation.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ from torch.nn import Linear
8
+ from torch.nn import Module
9
+ from torch.nn import functional as F
10
+ from torch.nn.init import constant_
11
+ from torch.nn.init import xavier_normal_
12
+ from torch.nn.init import xavier_uniform_
13
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
14
+ from torch.nn.parameter import Parameter
15
+
16
+ from moyoyo_tts.AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched
17
+
18
+ F.multi_head_attention_forward = multi_head_attention_forward_patched
19
+
20
+
21
+ class MultiheadAttention(Module):
22
+ r"""Allows the model to jointly attend to information
23
+ from different representation subspaces as described in the paper:
24
+ `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
25
+
26
+ Multi-Head Attention is defined as:
27
+
28
+ .. math::
29
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
30
+
31
+ where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
32
+
33
+ ``forward()`` will use a special optimized implementation if all of the following
34
+ conditions are met:
35
+
36
+ - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
37
+ restriction will be loosened in the future.)
38
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
39
+ - training is disabled (using ``.eval()``)
40
+ - dropout is 0
41
+ - ``add_bias_kv`` is ``False``
42
+ - ``add_zero_attn`` is ``False``
43
+ - ``batch_first`` is ``True`` and the input is batched
44
+ - ``kdim`` and ``vdim`` are equal to ``embed_dim``
45
+ - at most one of ``key_padding_mask`` or ``attn_mask`` is passed
46
+ - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
47
+ nor ``attn_mask`` is passed
48
+
49
+ If the optimized implementation is in use, a
50
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
51
+ ``query``/``key``/``value`` to represent padding more efficiently than using a
52
+ padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
53
+ will be returned, and an additional speedup proportional to the fraction of the input
54
+ that is padding can be expected.
55
+
56
+ Args:
57
+ embed_dim: Total dimension of the model.
58
+ num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
59
+ across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
60
+ dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
61
+ bias: If specified, adds bias to input / output projection layers. Default: ``True``.
62
+ add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
63
+ add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
64
+ Default: ``False``.
65
+ kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
66
+ vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
67
+ batch_first: If ``True``, then the input and output tensors are provided
68
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
69
+
70
+ Examples::
71
+
72
+ >>> # xdoctest: +SKIP
73
+ >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
74
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
75
+
76
+ """
77
+ __constants__ = ["batch_first"]
78
+ bias_k: Optional[torch.Tensor]
79
+ bias_v: Optional[torch.Tensor]
80
+
81
+ def __init__(
82
+ self,
83
+ embed_dim,
84
+ num_heads,
85
+ dropout=0.0,
86
+ bias=True,
87
+ add_bias_kv=False,
88
+ add_zero_attn=False,
89
+ kdim=None,
90
+ vdim=None,
91
+ batch_first=False,
92
+ linear1_cls=Linear,
93
+ linear2_cls=Linear,
94
+ device=None,
95
+ dtype=None,
96
+ ) -> None:
97
+ factory_kwargs = {"device": device, "dtype": dtype}
98
+ super(MultiheadAttention, self).__init__()
99
+ self.embed_dim = embed_dim
100
+ self.kdim = kdim if kdim is not None else embed_dim
101
+ self.vdim = vdim if vdim is not None else embed_dim
102
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
103
+
104
+ self.num_heads = num_heads
105
+ self.dropout = dropout
106
+ self.batch_first = batch_first
107
+ self.head_dim = embed_dim // num_heads
108
+ assert (
109
+ self.head_dim * num_heads == self.embed_dim
110
+ ), "embed_dim must be divisible by num_heads"
111
+
112
+ if add_bias_kv:
113
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
114
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
115
+ else:
116
+ self.bias_k = self.bias_v = None
117
+
118
+ if linear1_cls == Linear:
119
+ if not self._qkv_same_embed_dim:
120
+ self.q_proj_weight = Parameter(
121
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
122
+ )
123
+ self.k_proj_weight = Parameter(
124
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
125
+ )
126
+ self.v_proj_weight = Parameter(
127
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
128
+ )
129
+ self.register_parameter("in_proj_weight", None)
130
+ else:
131
+ self.in_proj_weight = Parameter(
132
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
133
+ )
134
+ self.register_parameter("q_proj_weight", None)
135
+ self.register_parameter("k_proj_weight", None)
136
+ self.register_parameter("v_proj_weight", None)
137
+
138
+ if bias:
139
+ self.in_proj_bias = Parameter(
140
+ torch.empty(3 * embed_dim, **factory_kwargs)
141
+ )
142
+ else:
143
+ self.register_parameter("in_proj_bias", None)
144
+ self.out_proj = NonDynamicallyQuantizableLinear(
145
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
146
+ )
147
+
148
+ self._reset_parameters()
149
+ else:
150
+ if not self._qkv_same_embed_dim:
151
+ raise NotImplementedError
152
+ else:
153
+ self.in_proj_linear = linear1_cls(
154
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
155
+ )
156
+ self.in_proj_weight = self.in_proj_linear.weight
157
+
158
+ self.register_parameter("q_proj_weight", None)
159
+ self.register_parameter("k_proj_weight", None)
160
+ self.register_parameter("v_proj_weight", None)
161
+
162
+ if bias:
163
+ self.in_proj_bias = self.in_proj_linear.bias
164
+ else:
165
+ self.register_parameter("in_proj_bias", None)
166
+
167
+ self.out_proj = linear2_cls(
168
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
169
+ )
170
+
171
+ if self.bias_k is not None:
172
+ xavier_normal_(self.bias_k)
173
+ if self.bias_v is not None:
174
+ xavier_normal_(self.bias_v)
175
+
176
+ self.add_zero_attn = add_zero_attn
177
+
178
+ def _reset_parameters(self):
179
+ if self._qkv_same_embed_dim:
180
+ xavier_uniform_(self.in_proj_weight)
181
+ else:
182
+ xavier_uniform_(self.q_proj_weight)
183
+ xavier_uniform_(self.k_proj_weight)
184
+ xavier_uniform_(self.v_proj_weight)
185
+
186
+ if self.in_proj_bias is not None:
187
+ constant_(self.in_proj_bias, 0.0)
188
+ constant_(self.out_proj.bias, 0.0)
189
+
190
+ if self.bias_k is not None:
191
+ xavier_normal_(self.bias_k)
192
+ if self.bias_v is not None:
193
+ xavier_normal_(self.bias_v)
194
+
195
+ def __setstate__(self, state):
196
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
197
+ if "_qkv_same_embed_dim" not in state:
198
+ state["_qkv_same_embed_dim"] = True
199
+
200
+ super(MultiheadAttention, self).__setstate__(state)
201
+
202
+ def forward(
203
+ self,
204
+ query: Tensor,
205
+ key: Tensor,
206
+ value: Tensor,
207
+ key_padding_mask: Optional[Tensor] = None,
208
+ need_weights: bool = True,
209
+ attn_mask: Optional[Tensor] = None,
210
+ average_attn_weights: bool = True,
211
+ cache=None,
212
+ ) -> Tuple[Tensor, Optional[Tensor]]:
213
+ r"""
214
+ Args:
215
+ query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
216
+ or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
217
+ :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
218
+ Queries are compared against key-value pairs to produce the output.
219
+ See "Attention Is All You Need" for more details.
220
+ key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
221
+ or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
222
+ :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
223
+ See "Attention Is All You Need" for more details.
224
+ value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
225
+ ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
226
+ sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
227
+ See "Attention Is All You Need" for more details.
228
+ key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
229
+ to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
230
+ Binary and byte masks are supported.
231
+ For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
232
+ the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
233
+ need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
234
+ Default: ``True``.
235
+ attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
236
+ :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
237
+ :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
238
+ broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
239
+ Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
240
+ corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
241
+ corresponding position is not allowed to attend. For a float mask, the mask values will be added to
242
+ the attention weight.
243
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
244
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
245
+ effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
246
+
247
+ Outputs:
248
+ - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
249
+ :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
250
+ where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
251
+ embedding dimension ``embed_dim``.
252
+ - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
253
+ returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
254
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
255
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
256
+ head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
257
+
258
+ .. note::
259
+ `batch_first` argument is ignored for unbatched inputs.
260
+ """
261
+ is_batched = query.dim() == 3
262
+ if key_padding_mask is not None:
263
+ _kpm_dtype = key_padding_mask.dtype
264
+ if _kpm_dtype != torch.bool and not torch.is_floating_point(
265
+ key_padding_mask
266
+ ):
267
+ raise AssertionError(
268
+ "only bool and floating types of key_padding_mask are supported"
269
+ )
270
+ why_not_fast_path = ""
271
+ if not is_batched:
272
+ why_not_fast_path = (
273
+ f"input not batched; expected query.dim() of 3 but got {query.dim()}"
274
+ )
275
+ elif query is not key or key is not value:
276
+ # When lifting this restriction, don't forget to either
277
+ # enforce that the dtypes all match or test cases where
278
+ # they don't!
279
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
280
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
281
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
282
+ elif (
283
+ self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype
284
+ ):
285
+ # this case will fail anyway, but at least they'll get a useful error message.
286
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
287
+ elif self.training:
288
+ why_not_fast_path = "training is enabled"
289
+ elif not self.batch_first:
290
+ why_not_fast_path = "batch_first was not True"
291
+ elif self.bias_k is not None:
292
+ why_not_fast_path = "self.bias_k was not None"
293
+ elif self.bias_v is not None:
294
+ why_not_fast_path = "self.bias_v was not None"
295
+ elif self.dropout:
296
+ why_not_fast_path = f"dropout was {self.dropout}, required zero"
297
+ elif self.add_zero_attn:
298
+ why_not_fast_path = "add_zero_attn was enabled"
299
+ elif not self._qkv_same_embed_dim:
300
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
301
+ elif attn_mask is not None:
302
+ why_not_fast_path = "attn_mask was not None"
303
+ elif query.is_nested and key_padding_mask is not None:
304
+ why_not_fast_path = (
305
+ "key_padding_mask is not supported with NestedTensor input"
306
+ )
307
+ elif self.num_heads % 2 == 1:
308
+ why_not_fast_path = "num_heads is odd"
309
+ elif torch.is_autocast_enabled():
310
+ why_not_fast_path = "autocast is enabled"
311
+
312
+ if not why_not_fast_path:
313
+ tensor_args = (
314
+ query,
315
+ key,
316
+ value,
317
+ self.in_proj_weight,
318
+ self.in_proj_bias,
319
+ self.out_proj.weight,
320
+ self.out_proj.bias,
321
+ )
322
+ # We have to use list comprehensions below because TorchScript does not support
323
+ # generator expressions.
324
+ if torch.overrides.has_torch_function(tensor_args):
325
+ why_not_fast_path = "some Tensor argument has_torch_function"
326
+ elif not all(
327
+ [
328
+ (x is None or x.is_cuda or "cpu" in str(x.device))
329
+ for x in tensor_args
330
+ ]
331
+ ):
332
+ why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
333
+ elif torch.is_grad_enabled() and any(
334
+ [x is not None and x.requires_grad for x in tensor_args]
335
+ ):
336
+ why_not_fast_path = (
337
+ "grad is enabled and at least one of query or the "
338
+ "input/output projection weights or biases requires_grad"
339
+ )
340
+ if not why_not_fast_path:
341
+ return torch._native_multi_head_attention(
342
+ query,
343
+ key,
344
+ value,
345
+ self.embed_dim,
346
+ self.num_heads,
347
+ self.in_proj_weight,
348
+ self.in_proj_bias,
349
+ self.out_proj.weight,
350
+ self.out_proj.bias,
351
+ key_padding_mask if key_padding_mask is not None else attn_mask,
352
+ need_weights,
353
+ average_attn_weights,
354
+ 1
355
+ if key_padding_mask is not None
356
+ else 0
357
+ if attn_mask is not None
358
+ else None,
359
+ )
360
+
361
+ any_nested = query.is_nested or key.is_nested or value.is_nested
362
+ assert not any_nested, (
363
+ "MultiheadAttention does not support NestedTensor outside of its fast path. "
364
+ + f"The fast path was not hit because {why_not_fast_path}"
365
+ )
366
+
367
+ if self.batch_first and is_batched:
368
+ # make sure that the transpose op does not affect the "is" property
369
+ if key is value:
370
+ if query is key:
371
+ query = key = value = query.transpose(1, 0)
372
+ else:
373
+ query, key = [x.transpose(1, 0) for x in (query, key)]
374
+ value = key
375
+ else:
376
+ query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
377
+
378
+ if not self._qkv_same_embed_dim:
379
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
380
+ query,
381
+ key,
382
+ value,
383
+ self.embed_dim,
384
+ self.num_heads,
385
+ self.in_proj_weight,
386
+ self.in_proj_bias,
387
+ self.bias_k,
388
+ self.bias_v,
389
+ self.add_zero_attn,
390
+ self.dropout,
391
+ self.out_proj.weight,
392
+ self.out_proj.bias,
393
+ training=self.training,
394
+ key_padding_mask=key_padding_mask,
395
+ need_weights=need_weights,
396
+ attn_mask=attn_mask,
397
+ use_separate_proj_weight=True,
398
+ q_proj_weight=self.q_proj_weight,
399
+ k_proj_weight=self.k_proj_weight,
400
+ v_proj_weight=self.v_proj_weight,
401
+ average_attn_weights=average_attn_weights,
402
+ cache=cache,
403
+ )
404
+ else:
405
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
406
+ query,
407
+ key,
408
+ value,
409
+ self.embed_dim,
410
+ self.num_heads,
411
+ self.in_proj_weight,
412
+ self.in_proj_bias,
413
+ self.bias_k,
414
+ self.bias_v,
415
+ self.add_zero_attn,
416
+ self.dropout,
417
+ self.out_proj.weight,
418
+ self.out_proj.bias,
419
+ training=self.training,
420
+ key_padding_mask=key_padding_mask,
421
+ need_weights=need_weights,
422
+ attn_mask=attn_mask,
423
+ average_attn_weights=average_attn_weights,
424
+ cache=cache,
425
+ )
426
+ if self.batch_first and is_batched:
427
+ return attn_output.transpose(1, 0), attn_output_weights
428
+ else:
429
+ return attn_output, attn_output_weights
AR/modules/activation_onnx.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import Linear
7
+ from torch.nn import Module
8
+ from torch.nn.init import constant_
9
+ from torch.nn.init import xavier_normal_
10
+ from torch.nn.init import xavier_uniform_
11
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
12
+ from torch.nn.parameter import Parameter
13
+
14
+ from torch.nn import functional as F
15
+ from moyoyo_tts.AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched
16
+
17
+
18
+ class MultiheadAttention(Module):
19
+ __constants__ = ["batch_first"]
20
+ bias_k: Optional[torch.Tensor]
21
+ bias_v: Optional[torch.Tensor]
22
+
23
+ def __init__(
24
+ self,
25
+ embed_dim,
26
+ num_heads,
27
+ dropout=0.0,
28
+ bias=True,
29
+ add_bias_kv=False,
30
+ add_zero_attn=False,
31
+ kdim=None,
32
+ vdim=None,
33
+ batch_first=False,
34
+ linear1_cls=Linear,
35
+ linear2_cls=Linear,
36
+ device=None,
37
+ dtype=None,
38
+ ) -> None:
39
+ factory_kwargs = {"device": device, "dtype": dtype}
40
+ super(MultiheadAttention, self).__init__()
41
+ self.embed_dim = embed_dim
42
+ self.kdim = kdim if kdim is not None else embed_dim
43
+ self.vdim = vdim if vdim is not None else embed_dim
44
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
45
+
46
+ self.num_heads = num_heads
47
+ self.dropout = dropout
48
+ self.batch_first = batch_first
49
+ self.head_dim = embed_dim // num_heads
50
+ assert (
51
+ self.head_dim * num_heads == self.embed_dim
52
+ ), "embed_dim must be divisible by num_heads"
53
+
54
+ if add_bias_kv:
55
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
56
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
57
+ else:
58
+ self.bias_k = self.bias_v = None
59
+
60
+ if linear1_cls == Linear:
61
+ if not self._qkv_same_embed_dim:
62
+ self.q_proj_weight = Parameter(
63
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
64
+ )
65
+ self.k_proj_weight = Parameter(
66
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
67
+ )
68
+ self.v_proj_weight = Parameter(
69
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
70
+ )
71
+ self.register_parameter("in_proj_weight", None)
72
+ else:
73
+ self.in_proj_weight = Parameter(
74
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
75
+ )
76
+ self.register_parameter("q_proj_weight", None)
77
+ self.register_parameter("k_proj_weight", None)
78
+ self.register_parameter("v_proj_weight", None)
79
+
80
+ if bias:
81
+ self.in_proj_bias = Parameter(
82
+ torch.empty(3 * embed_dim, **factory_kwargs)
83
+ )
84
+ else:
85
+ self.register_parameter("in_proj_bias", None)
86
+ self.out_proj = NonDynamicallyQuantizableLinear(
87
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
88
+ )
89
+
90
+ self._reset_parameters()
91
+ else:
92
+ if not self._qkv_same_embed_dim:
93
+ raise NotImplementedError
94
+ else:
95
+ self.in_proj_linear = linear1_cls(
96
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
97
+ )
98
+ self.in_proj_weight = self.in_proj_linear.weight
99
+
100
+ self.register_parameter("q_proj_weight", None)
101
+ self.register_parameter("k_proj_weight", None)
102
+ self.register_parameter("v_proj_weight", None)
103
+
104
+ if bias:
105
+ self.in_proj_bias = self.in_proj_linear.bias
106
+ else:
107
+ self.register_parameter("in_proj_bias", None)
108
+
109
+ self.out_proj = linear2_cls(
110
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
111
+ )
112
+
113
+ if self.bias_k is not None:
114
+ xavier_normal_(self.bias_k)
115
+ if self.bias_v is not None:
116
+ xavier_normal_(self.bias_v)
117
+
118
+ self.add_zero_attn = add_zero_attn
119
+
120
+ def _reset_parameters(self):
121
+ if self._qkv_same_embed_dim:
122
+ xavier_uniform_(self.in_proj_weight)
123
+ else:
124
+ xavier_uniform_(self.q_proj_weight)
125
+ xavier_uniform_(self.k_proj_weight)
126
+ xavier_uniform_(self.v_proj_weight)
127
+
128
+ if self.in_proj_bias is not None:
129
+ constant_(self.in_proj_bias, 0.0)
130
+ constant_(self.out_proj.bias, 0.0)
131
+
132
+ if self.bias_k is not None:
133
+ xavier_normal_(self.bias_k)
134
+ if self.bias_v is not None:
135
+ xavier_normal_(self.bias_v)
136
+
137
+ def __setstate__(self, state):
138
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
139
+ if "_qkv_same_embed_dim" not in state:
140
+ state["_qkv_same_embed_dim"] = True
141
+
142
+ super(MultiheadAttention, self).__setstate__(state)
143
+
144
+ def forward(
145
+ self,
146
+ query: Tensor,
147
+ key: Tensor,
148
+ value: Tensor,
149
+ key_padding_mask: Optional[Tensor] = None,
150
+ need_weights: bool = True,
151
+ attn_mask: Optional[Tensor] = None,
152
+ average_attn_weights: bool = True,
153
+ cache=None,
154
+ ) -> Tuple[Tensor, Optional[Tensor]]:
155
+ any_nested = query.is_nested or key.is_nested or value.is_nested
156
+ query = key = value = query.transpose(1, 0)
157
+ attn_output = multi_head_attention_forward_patched(
158
+ query,
159
+ key,
160
+ value,
161
+ self.embed_dim,
162
+ self.num_heads,
163
+ self.in_proj_weight,
164
+ self.in_proj_bias,
165
+ self.bias_k,
166
+ self.bias_v,
167
+ self.add_zero_attn,
168
+ self.dropout,
169
+ self.out_proj.weight,
170
+ self.out_proj.bias,
171
+ training=self.training,
172
+ key_padding_mask=key_padding_mask,
173
+ need_weights=need_weights,
174
+ attn_mask=attn_mask,
175
+ average_attn_weights=average_attn_weights,
176
+ cache=cache,
177
+ )
178
+ return attn_output.transpose(1, 0)
AR/modules/embedding.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+
50
+ self.reverse = False
51
+ self.pe = None
52
+ self.extend_pe(torch.tensor(0.0).expand(1, 4000))
53
+
54
+ def extend_pe(self, x):
55
+ """Reset the positional encodings."""
56
+ if self.pe is not None:
57
+ if self.pe.size(1) >= x.size(1):
58
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
59
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
60
+ return
61
+ pe = torch.zeros(x.size(1), self.embedding_dim)
62
+ if self.reverse:
63
+ position = torch.arange(
64
+ x.size(1) - 1, -1, -1.0, dtype=torch.float32
65
+ ).unsqueeze(1)
66
+ else:
67
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
68
+ div_term = torch.exp(
69
+ torch.arange(0, self.embedding_dim, 2, dtype=torch.float32)
70
+ * -(math.log(10000.0) / self.embedding_dim)
71
+ )
72
+ pe[:, 0::2] = torch.sin(position * div_term)
73
+ pe[:, 1::2] = torch.cos(position * div_term)
74
+ pe = pe.unsqueeze(0)
75
+ self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ self.extend_pe(x)
79
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
80
+ output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
81
+ return self.dropout(output)
AR/modules/embedding_onnx.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+ self.reverse = False
50
+ self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim))
51
+
52
+ def extend_pe(self, x):
53
+ position = torch.cumsum(torch.ones_like(x[:,:,0]), dim=1).transpose(0, 1)
54
+ scpe = (position * self.div_term).unsqueeze(0)
55
+ pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0)
56
+ pe = pe.contiguous().view(1, -1, self.embedding_dim)
57
+ return pe
58
+
59
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
60
+ pe = self.extend_pe(x)
61
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
62
+ output = output * self.x_scale + self.alpha * pe
63
+ return self.dropout(output)
AR/modules/lr_schedulers.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import math
4
+
5
+ import torch
6
+ from torch import nn
7
+ from torch.optim import Adam
8
+
9
+
10
+ class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler):
11
+ """
12
+ Implements Warmup learning rate schedule until 'warmup_steps', going from 'init_lr' to 'peak_lr' for multiple optimizers.
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ optimizer,
18
+ init_lr,
19
+ peak_lr,
20
+ end_lr,
21
+ warmup_steps=10000,
22
+ total_steps=400000,
23
+ current_step=0,
24
+ ):
25
+ self.init_lr = init_lr
26
+ self.peak_lr = peak_lr
27
+ self.end_lr = end_lr
28
+ self.optimizer = optimizer
29
+ self._warmup_rate = (peak_lr - init_lr) / warmup_steps
30
+ self._decay_rate = (end_lr - peak_lr) / (total_steps - warmup_steps)
31
+ self._current_step = current_step
32
+ self.lr = init_lr
33
+ self.warmup_steps = warmup_steps
34
+ self.total_steps = total_steps
35
+ self._last_lr = [self.lr]
36
+
37
+ def set_lr(self, lr):
38
+ self._last_lr = [g["lr"] for g in self.optimizer.param_groups]
39
+ for g in self.optimizer.param_groups:
40
+ # g['lr'] = lr
41
+ g["lr"] = self.end_lr ###锁定用线性
42
+
43
+ def step(self):
44
+ if self._current_step < self.warmup_steps:
45
+ lr = self.init_lr + self._warmup_rate * self._current_step
46
+
47
+ elif self._current_step > self.total_steps:
48
+ lr = self.end_lr
49
+
50
+ else:
51
+ decay_ratio = (self._current_step - self.warmup_steps) / (
52
+ self.total_steps - self.warmup_steps
53
+ )
54
+ if decay_ratio < 0.0 or decay_ratio > 1.0:
55
+ raise RuntimeError(
56
+ "Decay ratio must be in [0.0, 1.0]. Fix LR scheduler settings."
57
+ )
58
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
59
+ lr = self.end_lr + coeff * (self.peak_lr - self.end_lr)
60
+
61
+ self.lr = lr = self.end_lr = 0.002 ###锁定用线性###不听话,直接锁定!
62
+ self.set_lr(lr)
63
+ self.lr = lr
64
+ self._current_step += 1
65
+ return self.lr
66
+
67
+
68
+ if __name__ == "__main__":
69
+ from matplotlib import pyplot as plt
70
+ m = nn.Linear(10, 10)
71
+ opt = Adam(m.parameters(), lr=1e-4)
72
+ s = WarmupCosineLRSchedule(
73
+ opt, 1e-6, 2e-4, 1e-6, warmup_steps=2000, total_steps=20000, current_step=0
74
+ )
75
+ lrs = []
76
+ for i in range(25000):
77
+ s.step()
78
+ lrs.append(s.lr)
79
+ print(s.lr)
80
+
81
+ plt.plot(lrs)
82
+ plt.plot(range(0, 25000), lrs)
83
+ plt.show()
AR/modules/optim.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import contextlib
17
+ import logging
18
+ from collections import defaultdict
19
+ from typing import List
20
+ from typing import Tuple
21
+
22
+ import torch
23
+ from torch import Tensor
24
+ from torch.optim import Optimizer
25
+
26
+
27
+ class BatchedOptimizer(Optimizer):
28
+ """
29
+ This class adds to class Optimizer the capability to optimize parameters in batches:
30
+ it will stack the parameters and their grads for you so the optimizer can work
31
+ on tensors with an extra leading dimension. This is intended for speed with GPUs,
32
+ as it reduces the number of kernels launched in the optimizer.
33
+
34
+ Args:
35
+ params:
36
+ """
37
+
38
+ def __init__(self, params, defaults):
39
+ super(BatchedOptimizer, self).__init__(params, defaults)
40
+
41
+ @contextlib.contextmanager
42
+ def batched_params(self, param_group, group_params_names):
43
+ """
44
+ This function returns (technically, yields) a list of
45
+ of tuples (p, state), where
46
+ p is a `fake` parameter that is stacked (over axis 0) from real parameters
47
+ that share the same shape, and its gradient is also stacked;
48
+ `state` is the state corresponding to this batch of parameters
49
+ (it will be physically located in the "state" for one of the real
50
+ parameters, the last one that has any particular shape and dtype).
51
+
52
+ This function is decorated as a context manager so that it can
53
+ write parameters back to their "real" locations.
54
+
55
+ The idea is, instead of doing:
56
+ <code>
57
+ for p in group["params"]:
58
+ state = self.state[p]
59
+ ...
60
+ </code>
61
+ you can do:
62
+ <code>
63
+ with self.batched_params(group["params"]) as batches:
64
+ for p, state, p_names in batches:
65
+ ...
66
+ </code>
67
+
68
+ Args:
69
+ group: a parameter group, which is a list of parameters; should be
70
+ one of self.param_groups.
71
+ group_params_names: name for each parameter in group,
72
+ which is List[str].
73
+ """
74
+ batches = defaultdict(
75
+ list
76
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
77
+ batches_names = defaultdict(
78
+ list
79
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
80
+
81
+ assert len(param_group) == len(group_params_names)
82
+ for p, named_p in zip(param_group, group_params_names):
83
+ key = (str(p.dtype), *p.shape)
84
+ batches[key].append(p)
85
+ batches_names[key].append(named_p)
86
+
87
+ batches_names_keys = list(batches_names.keys())
88
+ sorted_idx = sorted(
89
+ range(len(batches_names)), key=lambda i: batches_names_keys[i])
90
+ batches_names = [
91
+ batches_names[batches_names_keys[idx]] for idx in sorted_idx
92
+ ]
93
+ batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
94
+
95
+ stacked_params_dict = dict()
96
+
97
+ # turn batches into a list, in deterministic order.
98
+ # tuples will contain tuples of (stacked_param, state, stacked_params_names),
99
+ # one for each batch in `batches`.
100
+ tuples = []
101
+
102
+ for batch, batch_names in zip(batches, batches_names):
103
+ p = batch[0]
104
+ # we arbitrarily store the state in the
105
+ # state corresponding to the 1st parameter in the
106
+ # group. class Optimizer will take care of saving/loading state.
107
+ state = self.state[p]
108
+ p_stacked = torch.stack(batch)
109
+ grad = torch.stack([
110
+ torch.zeros_like(p) if p.grad is None else p.grad for p in batch
111
+ ])
112
+ p_stacked.grad = grad
113
+ stacked_params_dict[key] = p_stacked
114
+ tuples.append((p_stacked, state, batch_names))
115
+
116
+ yield tuples # <-- calling code will do the actual optimization here!
117
+
118
+ for ((stacked_params, _state, _names), batch) in zip(tuples, batches):
119
+ for i, p in enumerate(batch): # batch is list of Parameter
120
+ p.copy_(stacked_params[i])
121
+
122
+
123
+ class ScaledAdam(BatchedOptimizer):
124
+ """
125
+ Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
126
+ proportional to the norm of that parameter; and also learn the scale of the parameter,
127
+ in log space, subject to upper and lower limits (as if we had factored each parameter as
128
+ param = underlying_param * log_scale.exp())
129
+
130
+
131
+ Args:
132
+ params: The parameters or param_groups to optimize (like other Optimizer subclasses)
133
+ lr: The learning rate. We will typically use a learning rate schedule that starts
134
+ at 0.03 and decreases over time, i.e. much higher than other common
135
+ optimizers.
136
+ clipping_scale: (e.g. 2.0)
137
+ A scale for gradient-clipping: if specified, the normalized gradients
138
+ over the whole model will be clipped to have 2-norm equal to
139
+ `clipping_scale` times the median 2-norm over the most recent period
140
+ of `clipping_update_period` minibatches. By "normalized gradients",
141
+ we mean after multiplying by the rms parameter value for this tensor
142
+ [for non-scalars]; this is appropriate because our update is scaled
143
+ by this quantity.
144
+ betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
145
+ Must satisfy 0 < beta <= beta2 < 1.
146
+ scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
147
+ scale of each parameter tensor and scalar parameters of the mode..
148
+ If each parameter were decomposed
149
+ as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
150
+ would be a the scaling factor on the learning rate of p_scale.
151
+ eps: A general-purpose epsilon to prevent division by zero
152
+ param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
153
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
154
+ parameter tensor to be >= this value)
155
+ param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
156
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
157
+ parameter tensor to be <= this value)
158
+ scalar_max: Maximum absolute value for scalar parameters (applicable if your
159
+ model has any parameters with numel() == 1).
160
+ size_update_period: The periodicity, in steps, with which we update the size (scale)
161
+ of the parameter tensor. This is provided to save a little time
162
+ in the update.
163
+ clipping_update_period: if clipping_scale is specified, this is the period
164
+ """
165
+
166
+ def __init__(
167
+ self,
168
+ params,
169
+ lr=3e-02,
170
+ clipping_scale=None,
171
+ betas=(0.9, 0.98),
172
+ scalar_lr_scale=0.1,
173
+ eps=1.0e-08,
174
+ param_min_rms=1.0e-05,
175
+ param_max_rms=3.0,
176
+ scalar_max=10.0,
177
+ size_update_period=4,
178
+ clipping_update_period=100,
179
+ parameters_names=None,
180
+ show_dominant_parameters=True, ):
181
+
182
+ assert parameters_names is not None, (
183
+ "Please prepare parameters_names,"
184
+ "which is a List[List[str]]. Each List[str] is for a group"
185
+ "and each str is for a parameter")
186
+ defaults = dict(
187
+ lr=lr,
188
+ clipping_scale=clipping_scale,
189
+ betas=betas,
190
+ scalar_lr_scale=scalar_lr_scale,
191
+ eps=eps,
192
+ param_min_rms=param_min_rms,
193
+ param_max_rms=param_max_rms,
194
+ scalar_max=scalar_max,
195
+ size_update_period=size_update_period,
196
+ clipping_update_period=clipping_update_period, )
197
+
198
+ super(ScaledAdam, self).__init__(params, defaults)
199
+ assert len(self.param_groups) == len(parameters_names)
200
+ self.parameters_names = parameters_names
201
+ self.show_dominant_parameters = show_dominant_parameters
202
+
203
+ def __setstate__(self, state):
204
+ super(ScaledAdam, self).__setstate__(state)
205
+
206
+ @torch.no_grad()
207
+ def step(self, closure=None):
208
+ """Performs a single optimization step.
209
+
210
+ Arguments:
211
+ closure (callable, optional): A closure that reevaluates the model
212
+ and returns the loss.
213
+ """
214
+ loss = None
215
+ if closure is not None:
216
+ with torch.enable_grad():
217
+ loss = closure()
218
+
219
+ batch = True
220
+
221
+ for group, group_params_names in zip(self.param_groups,
222
+ self.parameters_names):
223
+
224
+ with self.batched_params(group["params"],
225
+ group_params_names) as batches:
226
+
227
+ # batches is list of pairs (stacked_param, state). stacked_param is like
228
+ # a regular parameter, and will have a .grad, but the 1st dim corresponds to
229
+ # a stacking dim, it is not a real dim.
230
+
231
+ if (len(batches[0][1]) ==
232
+ 0): # if len(first state) == 0: not yet initialized
233
+ clipping_scale = 1
234
+ else:
235
+ clipping_scale = self._get_clipping_scale(group, batches)
236
+
237
+ for p, state, _ in batches:
238
+ # Perform optimization step.
239
+ # grad is not going to be None, we handled that when creating the batches.
240
+ grad = p.grad
241
+ if grad.is_sparse:
242
+ raise RuntimeError(
243
+ "ScaledAdam optimizer does not support sparse gradients"
244
+ )
245
+ # State initialization
246
+ if len(state) == 0:
247
+ self._init_state(group, p, state)
248
+
249
+ self._step_one_batch(group, p, state, clipping_scale)
250
+
251
+ return loss
252
+
253
+ def _init_state(self, group: dict, p: Tensor, state: dict):
254
+ """
255
+ Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
256
+ is actually the batch dimension, corresponding to batched-together
257
+ parameters of a given shape.
258
+
259
+
260
+ Args:
261
+ group: Dict to look up configuration values.
262
+ p: The parameter that we are initializing the state for
263
+ state: Dict from string to whatever state we are initializing
264
+ """
265
+ size_update_period = group["size_update_period"]
266
+
267
+ state["step"] = 0
268
+
269
+ kwargs = {"device": p.device, "dtype": p.dtype}
270
+
271
+ # 'delta' implements conventional momentum. There are
272
+ # several different kinds of update going on, so rather than
273
+ # compute "exp_avg" like in Adam, we store and decay a
274
+ # parameter-change "delta", which combines all forms of
275
+ # update. this is equivalent to how it's done in Adam,
276
+ # except for the first few steps.
277
+ state["delta"] = torch.zeros_like(
278
+ p, memory_format=torch.preserve_format)
279
+
280
+ batch_size = p.shape[0]
281
+ numel = p.numel() // batch_size
282
+ numel = p.numel()
283
+
284
+ if numel > 1:
285
+ # "param_rms" just periodically records the scalar root-mean-square value of
286
+ # the parameter tensor.
287
+ # it has a shape like (batch_size, 1, 1, 1, 1)
288
+ param_rms = (
289
+ (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt())
290
+ state["param_rms"] = param_rms
291
+
292
+ state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
293
+ state["scale_grads"] = torch.zeros(size_update_period,
294
+ *param_rms.shape, **kwargs)
295
+
296
+ # exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
297
+ state["exp_avg_sq"] = torch.zeros_like(
298
+ p, memory_format=torch.preserve_format)
299
+
300
+ def _get_clipping_scale(self,
301
+ group: dict,
302
+ tuples: List[Tuple[Tensor, dict, List[str]]]
303
+ ) -> float:
304
+ """
305
+ Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
306
+ by this amount before applying the rest of the update.
307
+
308
+ Args:
309
+ group: the parameter group, an item in self.param_groups
310
+ tuples: a list of tuples of (param, state, param_names)
311
+ where param is a batched set of parameters,
312
+ with a .grad (1st dim is batch dim)
313
+ and state is the state-dict where optimization parameters are kept.
314
+ param_names is a List[str] while each str is name for a parameter
315
+ in batched set of parameters "param".
316
+ """
317
+ assert len(tuples) >= 1
318
+ clipping_scale = group["clipping_scale"]
319
+ (first_p, first_state, _) = tuples[0]
320
+ step = first_state["step"]
321
+ if clipping_scale is None or step == 0:
322
+ # no clipping. return early on step == 0 because the other
323
+ # parameters' state won't have been initialized yet.
324
+ return 1.0
325
+ clipping_update_period = group["clipping_update_period"]
326
+
327
+ tot_sumsq = torch.tensor(0.0, device=first_p.device)
328
+ for (p, state, param_names) in tuples:
329
+ grad = p.grad
330
+ if grad.is_sparse:
331
+ raise RuntimeError(
332
+ "ScaledAdam optimizer does not support sparse gradients")
333
+ if p.numel() == p.shape[0]: # a batch of scalars
334
+ tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
335
+ else:
336
+ tot_sumsq += ((grad * state["param_rms"])**2).sum()
337
+
338
+ tot_norm = tot_sumsq.sqrt()
339
+ if "model_norms" not in first_state:
340
+ first_state["model_norms"] = torch.zeros(
341
+ clipping_update_period, device=p.device)
342
+ first_state["model_norms"][step % clipping_update_period] = tot_norm
343
+
344
+ if step % clipping_update_period == 0:
345
+ # Print some stats.
346
+ # We don't reach here if step == 0 because we would have returned
347
+ # above.
348
+ sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
349
+ quartiles = []
350
+ for n in range(0, 5):
351
+ index = min(
352
+ clipping_update_period - 1,
353
+ (clipping_update_period // 4) * n, )
354
+ quartiles.append(sorted_norms[index].item())
355
+
356
+ median = quartiles[2]
357
+ threshold = clipping_scale * median
358
+ first_state["model_norm_threshold"] = threshold
359
+ percent_clipped = (first_state["num_clipped"] * 100.0 /
360
+ clipping_update_period
361
+ if "num_clipped" in first_state else 0.0)
362
+ first_state["num_clipped"] = 0
363
+ quartiles = " ".join(["%.3e" % x for x in quartiles])
364
+ logging.info(
365
+ f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, "
366
+ f"threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
367
+ )
368
+
369
+ if step < clipping_update_period:
370
+ return 1.0 # We have not yet estimated a norm to clip to.
371
+ else:
372
+ try:
373
+ model_norm_threshold = first_state["model_norm_threshold"]
374
+ except KeyError:
375
+ logging.info(
376
+ "Warning: model_norm_threshold not in state: possibly "
377
+ "you changed config when restarting, adding clipping_scale option?"
378
+ )
379
+ return 1.0
380
+ ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
381
+ if ans < 1.0:
382
+ first_state["num_clipped"] += 1
383
+ if ans < 0.1:
384
+ logging.warn(
385
+ f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}"
386
+ )
387
+ if self.show_dominant_parameters:
388
+ assert p.shape[0] == len(param_names)
389
+ self._show_gradient_dominating_parameter(tuples, tot_sumsq)
390
+ return ans
391
+
392
+ def _show_gradient_dominating_parameter(
393
+ self, tuples: List[Tuple[Tensor, dict, List[str]]],
394
+ tot_sumsq: Tensor):
395
+ """
396
+ Show information of parameter wihch dominanting tot_sumsq.
397
+
398
+ Args:
399
+ tuples: a list of tuples of (param, state, param_names)
400
+ where param is a batched set of parameters,
401
+ with a .grad (1st dim is batch dim)
402
+ and state is the state-dict where optimization parameters are kept.
403
+ param_names is a List[str] while each str is name for a parameter
404
+ in batched set of parameters "param".
405
+ tot_sumsq: sumsq of all parameters. Though it's could be calculated
406
+ from tuples, we still pass it to save some time.
407
+ """
408
+ all_sumsq_orig = {}
409
+ for (p, state, batch_param_names) in tuples:
410
+ # p is a stacked batch parameters.
411
+ batch_grad = p.grad
412
+ if p.numel() == p.shape[0]: # a batch of scalars
413
+ batch_sumsq_orig = batch_grad**2
414
+ # Dummpy values used by following `zip` statement.
415
+ batch_rms_orig = torch.ones(p.shape[0])
416
+ else:
417
+ batch_rms_orig = state["param_rms"]
418
+ batch_sumsq_orig = ((batch_grad * batch_rms_orig)**2).sum(
419
+ dim=list(range(1, batch_grad.ndim)))
420
+
421
+ for name, sumsq_orig, rms, grad in zip(batch_param_names,
422
+ batch_sumsq_orig,
423
+ batch_rms_orig, batch_grad):
424
+
425
+ proportion_orig = sumsq_orig / tot_sumsq
426
+ all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
427
+
428
+ assert torch.isclose(
429
+ sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
430
+ torch.tensor(1.0), )
431
+ sorted_by_proportion = {
432
+ k: v
433
+ for k, v in sorted(
434
+ all_sumsq_orig.items(),
435
+ key=lambda item: item[1][0],
436
+ reverse=True, )
437
+ }
438
+ dominant_param_name = next(iter(sorted_by_proportion))
439
+ (dominant_proportion, dominant_sumsq, dominant_rms,
440
+ dominant_grad, ) = sorted_by_proportion[dominant_param_name]
441
+ logging.info(f"Parameter Dominanting tot_sumsq {dominant_param_name}"
442
+ f" with proportion {dominant_proportion:.2f},"
443
+ f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
444
+ f"={dominant_sumsq:.3e},"
445
+ f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
446
+ f" orig_rms_sq={(dominant_rms**2).item():.3e}")
447
+
448
+ def _step_one_batch(self,
449
+ group: dict,
450
+ p: Tensor,
451
+ state: dict,
452
+ clipping_scale: float):
453
+ """
454
+ Do the step for one parameter, which is actually going to be a batch of
455
+ `real` parameters, with dim 0 as the batch dim.
456
+ Args:
457
+ group: dict to look up configuration values
458
+ p: parameter to update (actually multiple parameters stacked together
459
+ as a batch)
460
+ state: state-dict for p, to look up the optimizer state
461
+ """
462
+ lr = group["lr"]
463
+ size_update_period = group["size_update_period"]
464
+ beta1 = group["betas"][0]
465
+
466
+ grad = p.grad
467
+ if clipping_scale != 1.0:
468
+ grad = grad * clipping_scale
469
+ step = state["step"]
470
+ delta = state["delta"]
471
+
472
+ delta.mul_(beta1)
473
+ batch_size = p.shape[0]
474
+ numel = p.numel() // batch_size
475
+ if numel > 1:
476
+ # Update the size/scale of p, and set param_rms
477
+ scale_grads = state["scale_grads"]
478
+ scale_grads[step % size_update_period] = (p * grad).sum(
479
+ dim=list(range(1, p.ndim)), keepdim=True)
480
+ if step % size_update_period == size_update_period - 1:
481
+ param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
482
+ param_rms.copy_((p**2)
483
+ .mean(dim=list(range(1, p.ndim)), keepdim=True)
484
+ .sqrt())
485
+ if step > 0:
486
+ # self._size_update() learns the overall scale on the
487
+ # parameter, by shrinking or expanding it.
488
+ self._size_update(group, scale_grads, p, state)
489
+
490
+ if numel == 1:
491
+ # For parameters with 1 element we just use regular Adam.
492
+ # Updates delta.
493
+ self._step_scalar(group, p, state)
494
+ else:
495
+ self._step(group, p, state)
496
+
497
+ state["step"] = step + 1
498
+
499
+ def _size_update(self,
500
+ group: dict,
501
+ scale_grads: Tensor,
502
+ p: Tensor,
503
+ state: dict) -> None:
504
+ """
505
+ Called only where p.numel() > 1, this updates the scale of the parameter.
506
+ If we imagine: p = underlying_param * scale.exp(), and we are doing
507
+ gradient descent on underlying param and on scale, this function does the update
508
+ on `scale`.
509
+
510
+ Args:
511
+ group: dict to look up configuration values
512
+ scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
513
+ grads w.r.t. the scales.
514
+ p: The parameter to update
515
+ state: The state-dict of p
516
+ """
517
+
518
+ param_rms = state["param_rms"]
519
+ beta1, beta2 = group["betas"]
520
+ size_lr = group["lr"] * group["scalar_lr_scale"]
521
+ param_min_rms = group["param_min_rms"]
522
+ param_max_rms = group["param_max_rms"]
523
+ eps = group["eps"]
524
+ step = state["step"]
525
+ batch_size = p.shape[0]
526
+
527
+ size_update_period = scale_grads.shape[0]
528
+ # correct beta2 for the size update period: we will have
529
+ # faster decay at this level.
530
+ beta2_corr = beta2**size_update_period
531
+
532
+ scale_exp_avg_sq = state[
533
+ "scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
534
+ scale_exp_avg_sq.mul_(beta2_corr).add_(
535
+ (scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
536
+ alpha=1 - beta2_corr, ) # shape is (batch_size, 1, 1, ...)
537
+
538
+ # The 1st time we reach here is when size_step == 1.
539
+ size_step = (step + 1) // size_update_period
540
+ bias_correction2 = 1 - beta2_corr**size_step
541
+ # we don't bother with bias_correction1; this will help prevent divergence
542
+ # at the start of training.
543
+
544
+ denom = scale_exp_avg_sq.sqrt() + eps
545
+
546
+ scale_step = (-size_lr * (bias_correction2**0.5) *
547
+ scale_grads.sum(dim=0) / denom)
548
+
549
+ is_too_small = param_rms < param_min_rms
550
+ is_too_large = param_rms > param_max_rms
551
+
552
+ # when the param gets too small, just don't shrink it any further.
553
+ scale_step.masked_fill_(is_too_small, 0.0)
554
+ # when it gets too large, stop it from getting any larger.
555
+ scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
556
+ delta = state["delta"]
557
+ # the factor of (1-beta1) relates to momentum.
558
+ delta.add_(p * scale_step, alpha=(1 - beta1))
559
+
560
+ def _step(self, group: dict, p: Tensor, state: dict):
561
+ """
562
+ This function does the core update of self.step(), in the case where the members of
563
+ the batch have more than 1 element.
564
+
565
+ Args:
566
+ group: A dict which will be used to look up configuration values
567
+ p: The parameter to be updated
568
+ grad: The grad of p
569
+ state: The state-dict corresponding to parameter p
570
+
571
+ This function modifies p.
572
+ """
573
+ grad = p.grad
574
+ lr = group["lr"]
575
+ beta1, beta2 = group["betas"]
576
+ eps = group["eps"]
577
+ param_min_rms = group["param_min_rms"]
578
+ step = state["step"]
579
+
580
+ exp_avg_sq = state["exp_avg_sq"]
581
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
582
+
583
+ this_step = state["step"] - (state["zero_step"]
584
+ if "zero_step" in state else 0)
585
+ bias_correction2 = 1 - beta2**(this_step + 1)
586
+ if bias_correction2 < 0.99:
587
+ # note: not in-place.
588
+ exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
589
+
590
+ denom = exp_avg_sq.sqrt()
591
+ denom += eps
592
+ grad = grad / denom
593
+
594
+ alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
595
+
596
+ delta = state["delta"]
597
+ delta.add_(grad * alpha)
598
+ p.add_(delta)
599
+
600
+ def _step_scalar(self, group: dict, p: Tensor, state: dict):
601
+ """
602
+ A simplified form of the core update for scalar tensors, where we cannot get a good
603
+ estimate of the parameter rms.
604
+ """
605
+ beta1, beta2 = group["betas"]
606
+ scalar_max = group["scalar_max"]
607
+ eps = group["eps"]
608
+ lr = group["lr"] * group["scalar_lr_scale"]
609
+ grad = p.grad
610
+
611
+ exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
612
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
613
+
614
+ # bias_correction2 is like in Adam. Don't bother with bias_correction1;
615
+ # slower update at the start will help stability anyway.
616
+ bias_correction2 = 1 - beta2**(state["step"] + 1)
617
+ denom = (exp_avg_sq / bias_correction2).sqrt() + eps
618
+
619
+ delta = state["delta"]
620
+ delta.add_(grad / denom, alpha=-lr * (1 - beta1))
621
+ p.clamp_(min=-scalar_max, max=scalar_max)
622
+ p.add_(delta)
AR/modules/patched_mha_with_cache.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+ from torch.nn import functional as F
9
+ import torch
10
+ # Tensor = torch.Tensor
11
+ # from typing import Callable, List, Optional, Tuple, Union
12
+
13
+
14
+ def multi_head_attention_forward_patched(
15
+ query: Tensor,
16
+ key: Tensor,
17
+ value: Tensor,
18
+ embed_dim_to_check: int,
19
+ num_heads: int,
20
+ in_proj_weight: Optional[Tensor],
21
+ in_proj_bias: Optional[Tensor],
22
+ bias_k: Optional[Tensor],
23
+ bias_v: Optional[Tensor],
24
+ add_zero_attn: bool,
25
+ dropout_p: float,
26
+ out_proj_weight: Tensor,
27
+ out_proj_bias: Optional[Tensor],
28
+ training: bool = True,
29
+ key_padding_mask: Optional[Tensor] = None,
30
+ need_weights: bool = True,
31
+ attn_mask: Optional[Tensor] = None,
32
+ use_separate_proj_weight: bool = False,
33
+ q_proj_weight: Optional[Tensor] = None,
34
+ k_proj_weight: Optional[Tensor] = None,
35
+ v_proj_weight: Optional[Tensor] = None,
36
+ static_k: Optional[Tensor] = None,
37
+ static_v: Optional[Tensor] = None,
38
+ average_attn_weights: bool = True,
39
+ is_causal: bool = False,
40
+ cache=None,
41
+ ) -> Tuple[Tensor, Optional[Tensor]]:
42
+ r"""
43
+ Args:
44
+ query, key, value: map a query and a set of key-value pairs to an output.
45
+ See "Attention Is All You Need" for more details.
46
+ embed_dim_to_check: total dimension of the model.
47
+ num_heads: parallel attention heads.
48
+ in_proj_weight, in_proj_bias: input projection weight and bias.
49
+ bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
50
+ add_zero_attn: add a new batch of zeros to the key and
51
+ value sequences at dim=1.
52
+ dropout_p: probability of an element to be zeroed.
53
+ out_proj_weight, out_proj_bias: the output projection weight and bias.
54
+ training: apply dropout if is ``True``.
55
+ key_padding_mask: if provided, specified padding elements in the key will
56
+ be ignored by the attention. This is an binary mask. When the value is True,
57
+ the corresponding value on the attention layer will be filled with -inf.
58
+ need_weights: output attn_output_weights.
59
+ Default: `True`
60
+ Note: `needs_weight` defaults to `True`, but should be set to `False`
61
+ For best performance when attention weights are not nedeeded.
62
+ *Setting needs_weights to `True`
63
+ leads to a significant performance degradation.*
64
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
65
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
66
+ is_causal: If specified, applies a causal mask as attention mask, and ignores
67
+ attn_mask for computing scaled dot product attention.
68
+ Default: ``False``.
69
+ .. warning::
70
+ is_causal is provides a hint that the attn_mask is the
71
+ causal mask.Providing incorrect hints can result in
72
+ incorrect execution, including forward and backward
73
+ compatibility.
74
+ use_separate_proj_weight: the function accept the proj. weights for query, key,
75
+ and value in different forms. If false, in_proj_weight will be used, which is
76
+ a combination of q_proj_weight, k_proj_weight, v_proj_weight.
77
+ q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
78
+ static_k, static_v: static key and value used for attention operators.
79
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
80
+ Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
81
+ when ``need_weights=True.``. Default: True
82
+
83
+
84
+ Shape:
85
+ Inputs:
86
+ - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
87
+ the embedding dimension.
88
+ - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
89
+ the embedding dimension.
90
+ - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
91
+ the embedding dimension.
92
+ - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
93
+ If a FloatTensor is provided, it will be directly added to the value.
94
+ If a BoolTensor is provided, the positions with the
95
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
96
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
97
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
98
+ S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
99
+ positions. If a BoolTensor is provided, positions with ``True``
100
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
101
+ is provided, it will be added to the attention weight.
102
+ - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
103
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
104
+ - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
105
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
106
+
107
+ Outputs:
108
+ - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
109
+ E is the embedding dimension.
110
+ - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
111
+ attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
112
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
113
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
114
+ head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
115
+ """
116
+ tens_ops = (
117
+ query,
118
+ key,
119
+ value,
120
+ in_proj_weight,
121
+ in_proj_bias,
122
+ bias_k,
123
+ bias_v,
124
+ out_proj_weight,
125
+ out_proj_bias,
126
+ )
127
+ if has_torch_function(tens_ops):
128
+ return handle_torch_function(
129
+ multi_head_attention_forward,
130
+ tens_ops,
131
+ query,
132
+ key,
133
+ value,
134
+ embed_dim_to_check,
135
+ num_heads,
136
+ in_proj_weight,
137
+ in_proj_bias,
138
+ bias_k,
139
+ bias_v,
140
+ add_zero_attn,
141
+ dropout_p,
142
+ out_proj_weight,
143
+ out_proj_bias,
144
+ training=training,
145
+ key_padding_mask=key_padding_mask,
146
+ need_weights=need_weights,
147
+ attn_mask=attn_mask,
148
+ is_causal=is_causal,
149
+ use_separate_proj_weight=use_separate_proj_weight,
150
+ q_proj_weight=q_proj_weight,
151
+ k_proj_weight=k_proj_weight,
152
+ v_proj_weight=v_proj_weight,
153
+ static_k=static_k,
154
+ static_v=static_v,
155
+ average_attn_weights=average_attn_weights,
156
+ cache=cache,
157
+ )
158
+
159
+ is_batched = _mha_shape_check(
160
+ query, key, value, key_padding_mask, attn_mask, num_heads
161
+ )
162
+
163
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
164
+ # is batched, run the computation and before returning squeeze the
165
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
166
+ if not is_batched:
167
+ # unsqueeze if the input is unbatched
168
+ query = query.unsqueeze(1)
169
+ key = key.unsqueeze(1)
170
+ value = value.unsqueeze(1)
171
+ if key_padding_mask is not None:
172
+ key_padding_mask = key_padding_mask.unsqueeze(0)
173
+
174
+ # set up shape vars
175
+ tgt_len, bsz, embed_dim = query.shape
176
+ src_len, _, _ = key.shape
177
+
178
+ key_padding_mask = _canonical_mask(
179
+ mask=key_padding_mask,
180
+ mask_name="key_padding_mask",
181
+ other_type=_none_or_dtype(attn_mask),
182
+ other_name="attn_mask",
183
+ target_type=query.dtype,
184
+ )
185
+
186
+ if is_causal and attn_mask is None:
187
+ raise RuntimeError(
188
+ "Need attn_mask if specifying the is_causal hint. "
189
+ "You may use the Transformer module method "
190
+ "`generate_square_subsequent_mask` to create this mask."
191
+ )
192
+
193
+ if is_causal and key_padding_mask is None and not need_weights:
194
+ # when we have a kpm or need weights, we need attn_mask
195
+ # Otherwise, we use the is_causal hint go as is_causal
196
+ # indicator to SDPA.
197
+ attn_mask = None
198
+ else:
199
+ attn_mask = _canonical_mask(
200
+ mask=attn_mask,
201
+ mask_name="attn_mask",
202
+ other_type=None,
203
+ other_name="",
204
+ target_type=query.dtype,
205
+ check_other=False,
206
+ )
207
+
208
+ if key_padding_mask is not None:
209
+ # We have the attn_mask, and use that to merge kpm into it.
210
+ # Turn off use of is_causal hint, as the merged mask is no
211
+ # longer causal.
212
+ is_causal = False
213
+
214
+ assert (
215
+ embed_dim == embed_dim_to_check
216
+ ), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
217
+ if isinstance(embed_dim, torch.Tensor):
218
+ # embed_dim can be a tensor when JIT tracing
219
+ head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
220
+ else:
221
+ head_dim = embed_dim // num_heads
222
+ assert (
223
+ head_dim * num_heads == embed_dim
224
+ ), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
225
+ if use_separate_proj_weight:
226
+ # allow MHA to have different embedding dimensions when separate projection weights are used
227
+ assert (
228
+ key.shape[:2] == value.shape[:2]
229
+ ), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
230
+ else:
231
+ assert (
232
+ key.shape == value.shape
233
+ ), f"key shape {key.shape} does not match value shape {value.shape}"
234
+
235
+ #
236
+ # compute in-projection
237
+ #
238
+ if not use_separate_proj_weight:
239
+ assert (
240
+ in_proj_weight is not None
241
+ ), "use_separate_proj_weight is False but in_proj_weight is None"
242
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
243
+ else:
244
+ assert (
245
+ q_proj_weight is not None
246
+ ), "use_separate_proj_weight is True but q_proj_weight is None"
247
+ assert (
248
+ k_proj_weight is not None
249
+ ), "use_separate_proj_weight is True but k_proj_weight is None"
250
+ assert (
251
+ v_proj_weight is not None
252
+ ), "use_separate_proj_weight is True but v_proj_weight is None"
253
+ if in_proj_bias is None:
254
+ b_q = b_k = b_v = None
255
+ else:
256
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
257
+ q, k, v = _in_projection(
258
+ query,
259
+ key,
260
+ value,
261
+ q_proj_weight,
262
+ k_proj_weight,
263
+ v_proj_weight,
264
+ b_q,
265
+ b_k,
266
+ b_v,
267
+ )
268
+ if cache != None:
269
+ if cache["first_infer"] == 1:
270
+ cache["k"][cache["stage"]] = k
271
+ # print(0,cache["k"].shape)
272
+ cache["v"][cache["stage"]] = v
273
+ else: ###12个layer每个都要留自己的cache_kv
274
+ # print(1,cache["k"].shape)
275
+ cache["k"][cache["stage"]] = torch.cat(
276
+ [cache["k"][cache["stage"]], k], 0
277
+ ) ##本来时序是1,但是proj的时候可能transpose了所以时序到0维了
278
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]], v], 0)
279
+ # print(2, cache["k"].shape)
280
+ src_len = cache["k"][cache["stage"]].shape[0]
281
+ k = cache["k"][cache["stage"]]
282
+ v = cache["v"][cache["stage"]]
283
+ # if attn_mask is not None:
284
+ # attn_mask=attn_mask[-1:,]
285
+ # print(attn_mask.shape,attn_mask)
286
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
287
+ # print(2333,cache)
288
+ # prep attention mask
289
+
290
+ attn_mask = _canonical_mask(
291
+ mask=attn_mask,
292
+ mask_name="attn_mask",
293
+ other_type=None,
294
+ other_name="",
295
+ target_type=q.dtype,
296
+ check_other=False,
297
+ )
298
+
299
+ if attn_mask is not None:
300
+ # ensure attn_mask's dim is 3
301
+ if attn_mask.dim() == 2:
302
+ correct_2d_size = (tgt_len, src_len)
303
+ if attn_mask.shape != correct_2d_size:
304
+ raise RuntimeError(
305
+ f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
306
+ )
307
+ attn_mask = attn_mask.unsqueeze(0)
308
+ elif attn_mask.dim() == 3:
309
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
310
+ if attn_mask.shape != correct_3d_size:
311
+ raise RuntimeError(
312
+ f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
313
+ )
314
+ else:
315
+ raise RuntimeError(
316
+ f"attn_mask's dimension {attn_mask.dim()} is not supported"
317
+ )
318
+
319
+ # add bias along batch dimension (currently second)
320
+ if bias_k is not None and bias_v is not None:
321
+ assert static_k is None, "bias cannot be added to static key."
322
+ assert static_v is None, "bias cannot be added to static value."
323
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
324
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
325
+ if attn_mask is not None:
326
+ attn_mask = pad(attn_mask, (0, 1))
327
+ if key_padding_mask is not None:
328
+ key_padding_mask = pad(key_padding_mask, (0, 1))
329
+ else:
330
+ assert bias_k is None
331
+ assert bias_v is None
332
+
333
+ #
334
+ # reshape q, k, v for multihead attention and make em batch first
335
+ #
336
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
337
+ if static_k is None:
338
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
339
+ else:
340
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
341
+ assert (
342
+ static_k.size(0) == bsz * num_heads
343
+ ), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
344
+ assert (
345
+ static_k.size(2) == head_dim
346
+ ), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
347
+ k = static_k
348
+ if static_v is None:
349
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
350
+ else:
351
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
352
+ assert (
353
+ static_v.size(0) == bsz * num_heads
354
+ ), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
355
+ assert (
356
+ static_v.size(2) == head_dim
357
+ ), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
358
+ v = static_v
359
+
360
+ # add zero attention along batch dimension (now first)
361
+ if add_zero_attn:
362
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
363
+ k = torch.cat(
364
+ [k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
365
+ )
366
+ v = torch.cat(
367
+ [v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
368
+ )
369
+ if attn_mask is not None:
370
+ attn_mask = pad(attn_mask, (0, 1))
371
+ if key_padding_mask is not None:
372
+ key_padding_mask = pad(key_padding_mask, (0, 1))
373
+
374
+ # update source sequence length after adjustments
375
+ src_len = k.size(1)
376
+
377
+ # merge key padding and attention masks
378
+ if key_padding_mask is not None:
379
+ assert key_padding_mask.shape == (
380
+ bsz,
381
+ src_len,
382
+ ), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
383
+ key_padding_mask = (
384
+ key_padding_mask.view(bsz, 1, 1, src_len)
385
+ .expand(-1, num_heads, -1, -1)
386
+ .reshape(bsz * num_heads, 1, src_len)
387
+ )
388
+ if attn_mask is None:
389
+ attn_mask = key_padding_mask
390
+ else:
391
+ attn_mask = attn_mask + key_padding_mask
392
+
393
+ # adjust dropout probability
394
+ if not training:
395
+ dropout_p = 0.0
396
+
397
+ #
398
+ # (deep breath) calculate attention and out projection
399
+ #
400
+
401
+ if need_weights:
402
+ B, Nt, E = q.shape
403
+ q_scaled = q / math.sqrt(E)
404
+
405
+ assert not (
406
+ is_causal and attn_mask is None
407
+ ), "FIXME: is_causal not implemented for need_weights"
408
+
409
+ if attn_mask is not None:
410
+ attn_output_weights = torch.baddbmm(
411
+ attn_mask, q_scaled, k.transpose(-2, -1)
412
+ )
413
+ else:
414
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
415
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
416
+ if dropout_p > 0.0:
417
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
418
+
419
+ attn_output = torch.bmm(attn_output_weights, v)
420
+
421
+ attn_output = (
422
+ attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
423
+ )
424
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
425
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
426
+
427
+ # optionally average attention weights over heads
428
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
429
+ if average_attn_weights:
430
+ attn_output_weights = attn_output_weights.mean(dim=1)
431
+
432
+ if not is_batched:
433
+ # squeeze the output if input was unbatched
434
+ attn_output = attn_output.squeeze(1)
435
+ attn_output_weights = attn_output_weights.squeeze(0)
436
+ return attn_output, attn_output_weights
437
+ else:
438
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
439
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
440
+ # in order to match the input for SDPA of (N, num_heads, L, S)
441
+ if attn_mask is not None:
442
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
443
+ attn_mask = attn_mask.unsqueeze(0)
444
+ else:
445
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
446
+
447
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
448
+ k = k.view(bsz, num_heads, src_len, head_dim)
449
+ v = v.view(bsz, num_heads, src_len, head_dim)
450
+
451
+ # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
452
+ attn_output = scaled_dot_product_attention(
453
+ q, k, v, attn_mask, dropout_p, is_causal
454
+ )
455
+
456
+ attn_output = (
457
+ attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
458
+ )
459
+
460
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
461
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
462
+ if not is_batched:
463
+ # squeeze the output if input was unbatched
464
+ attn_output = attn_output.squeeze(1)
465
+ return attn_output, None
AR/modules/patched_mha_with_cache_onnx.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+
9
+ def multi_head_attention_forward_patched(
10
+ query,
11
+ key,
12
+ value,
13
+ embed_dim_to_check: int,
14
+ num_heads: int,
15
+ in_proj_weight,
16
+ in_proj_bias: Optional[Tensor],
17
+ bias_k: Optional[Tensor],
18
+ bias_v: Optional[Tensor],
19
+ add_zero_attn: bool,
20
+ dropout_p: float,
21
+ out_proj_weight: Tensor,
22
+ out_proj_bias: Optional[Tensor],
23
+ training: bool = True,
24
+ key_padding_mask: Optional[Tensor] = None,
25
+ need_weights: bool = True,
26
+ attn_mask: Optional[Tensor] = None,
27
+ use_separate_proj_weight: bool = False,
28
+ q_proj_weight: Optional[Tensor] = None,
29
+ k_proj_weight: Optional[Tensor] = None,
30
+ v_proj_weight: Optional[Tensor] = None,
31
+ static_k: Optional[Tensor] = None,
32
+ static_v: Optional[Tensor] = None,
33
+ average_attn_weights: bool = True,
34
+ is_causal: bool = False,
35
+ cache=None,
36
+ ) -> Tuple[Tensor, Optional[Tensor]]:
37
+
38
+ # set up shape vars
39
+ _, _, embed_dim = query.shape
40
+ attn_mask = _canonical_mask(
41
+ mask=attn_mask,
42
+ mask_name="attn_mask",
43
+ other_type=None,
44
+ other_name="",
45
+ target_type=query.dtype,
46
+ check_other=False,
47
+ )
48
+ head_dim = embed_dim // num_heads
49
+
50
+ proj_qkv = linear(query, in_proj_weight, in_proj_bias)
51
+ proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
52
+ q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2]
53
+
54
+ if cache["first_infer"] == 1:
55
+ cache["k"][cache["stage"]] = k
56
+ cache["v"][cache["stage"]] = v
57
+ else:
58
+ cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0)
59
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0)
60
+ k = cache["k"][cache["stage"]]
61
+ v = cache["v"][cache["stage"]]
62
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
63
+
64
+ attn_mask = _canonical_mask(
65
+ mask=attn_mask,
66
+ mask_name="attn_mask",
67
+ other_type=None,
68
+ other_name="",
69
+ target_type=q.dtype,
70
+ check_other=False,
71
+ )
72
+ attn_mask = attn_mask.unsqueeze(0)
73
+
74
+ q = q.view(-1, num_heads, head_dim).transpose(0, 1)
75
+ k = k.view(-1, num_heads, head_dim).transpose(0, 1)
76
+ v = v.view(-1, num_heads, head_dim).transpose(0, 1)
77
+
78
+ dropout_p = 0.0
79
+ attn_mask = attn_mask.unsqueeze(0)
80
+ q = q.view(num_heads, -1, head_dim).unsqueeze(0)
81
+ k = k.view(num_heads, -1, head_dim).unsqueeze(0)
82
+ v = v.view(num_heads, -1, head_dim).unsqueeze(0)
83
+ attn_output = scaled_dot_product_attention(
84
+ q, k, v, attn_mask, dropout_p, is_causal
85
+ )
86
+ attn_output = (
87
+ attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim)
88
+ )
89
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
90
+ attn_output = attn_output.view(-1, 1, attn_output.size(1))
91
+
92
+ return attn_output
AR/modules/scaling.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../../../../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+ import math
18
+ import random
19
+ from typing import Optional
20
+ from typing import Tuple
21
+ from typing import Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ from torch import Tensor
26
+
27
+
28
+ class DoubleSwishFunction(torch.autograd.Function):
29
+ """
30
+ double_swish(x) = x * torch.sigmoid(x-1)
31
+ This is a definition, originally motivated by its close numerical
32
+ similarity to swish(swish(x)), where swish(x) = x * sigmoid(x).
33
+
34
+ Memory-efficient derivative computation:
35
+ double_swish(x) = x * s, where s(x) = torch.sigmoid(x-1)
36
+ double_swish'(x) = d/dx double_swish(x) = x * s'(x) + x' * s(x) = x * s'(x) + s(x).
37
+ Now, s'(x) = s(x) * (1-s(x)).
38
+ double_swish'(x) = x * s'(x) + s(x).
39
+ = x * s(x) * (1-s(x)) + s(x).
40
+ = double_swish(x) * (1-s(x)) + s(x)
41
+ ... so we just need to remember s(x) but not x itself.
42
+ """
43
+
44
+ @staticmethod
45
+ def forward(ctx, x: Tensor) -> Tensor:
46
+ requires_grad = x.requires_grad
47
+ x_dtype = x.dtype
48
+ if x.dtype == torch.float16:
49
+ x = x.to(torch.float32)
50
+
51
+ s = torch.sigmoid(x - 1.0)
52
+ y = x * s
53
+
54
+ if requires_grad:
55
+ deriv = y * (1 - s) + s
56
+ # notes on derivative of x * sigmoid(x - 1):
57
+ # https://www.wolframalpha.com/input?i=d%2Fdx+%28x+*+sigmoid%28x-1%29%29
58
+ # min \simeq -0.043638. Take floor as -0.043637 so it's a lower bund
59
+ # max \simeq 1.1990. Take ceil to be 1.2 so it's an upper bound.
60
+ # the combination of "+ torch.rand_like(deriv)" and casting to torch.uint8 (which
61
+ # floors), should be expectation-preserving.
62
+ floor = -0.043637
63
+ ceil = 1.2
64
+ d_scaled = (deriv - floor) * (255.0 / (ceil - floor)) + torch.rand_like(
65
+ deriv
66
+ )
67
+ if __name__ == "__main__":
68
+ # for self-testing only.
69
+ assert d_scaled.min() >= 0.0
70
+ assert d_scaled.max() < 256.0
71
+ d_int = d_scaled.to(torch.uint8)
72
+ ctx.save_for_backward(d_int)
73
+ if x.dtype == torch.float16 or torch.is_autocast_enabled():
74
+ y = y.to(torch.float16)
75
+ return y
76
+
77
+ @staticmethod
78
+ def backward(ctx, y_grad: Tensor) -> Tensor:
79
+ (d,) = ctx.saved_tensors
80
+ # the same constants as used in forward pass.
81
+ floor = -0.043637
82
+ ceil = 1.2
83
+ d = d * ((ceil - floor) / 255.0) + floor
84
+ return y_grad * d
85
+
86
+
87
+ class DoubleSwish(torch.nn.Module):
88
+ def forward(self, x: Tensor) -> Tensor:
89
+ """Return double-swish activation function which is an approximation to Swish(Swish(x)),
90
+ that we approximate closely with x * sigmoid(x-1).
91
+ """
92
+ if torch.jit.is_scripting() or torch.jit.is_tracing():
93
+ return x * torch.sigmoid(x - 1.0)
94
+ return DoubleSwishFunction.apply(x)
95
+
96
+
97
+ class ActivationBalancerFunction(torch.autograd.Function):
98
+ @staticmethod
99
+ def forward(
100
+ ctx,
101
+ x: Tensor,
102
+ scale_factor: Tensor,
103
+ sign_factor: Optional[Tensor],
104
+ channel_dim: int,
105
+ ) -> Tensor:
106
+ if channel_dim < 0:
107
+ channel_dim += x.ndim
108
+ ctx.channel_dim = channel_dim
109
+ xgt0 = x > 0
110
+ if sign_factor is None:
111
+ ctx.save_for_backward(xgt0, scale_factor)
112
+ else:
113
+ ctx.save_for_backward(xgt0, scale_factor, sign_factor)
114
+ return x
115
+
116
+ @staticmethod
117
+ def backward(ctx, x_grad: Tensor) -> Tuple[Tensor, None, None, None]:
118
+ if len(ctx.saved_tensors) == 3:
119
+ xgt0, scale_factor, sign_factor = ctx.saved_tensors
120
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
121
+ scale_factor = scale_factor.unsqueeze(-1)
122
+ sign_factor = sign_factor.unsqueeze(-1)
123
+ factor = sign_factor + scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
124
+ else:
125
+ xgt0, scale_factor = ctx.saved_tensors
126
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
127
+ scale_factor = scale_factor.unsqueeze(-1)
128
+ factor = scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
129
+ neg_delta_grad = x_grad.abs() * factor
130
+ return (
131
+ x_grad - neg_delta_grad,
132
+ None,
133
+ None,
134
+ None,
135
+ )
136
+
137
+
138
+ def _compute_scale_factor(
139
+ x: Tensor,
140
+ channel_dim: int,
141
+ min_abs: float,
142
+ max_abs: float,
143
+ gain_factor: float,
144
+ max_factor: float,
145
+ ) -> Tensor:
146
+ if channel_dim < 0:
147
+ channel_dim += x.ndim
148
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
149
+ x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
150
+
151
+ if min_abs == 0.0:
152
+ below_threshold = 0.0
153
+ else:
154
+ # below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
155
+ # x_abs)_mean , min_abs.
156
+ below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(
157
+ min=0, max=max_factor
158
+ )
159
+
160
+ above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(
161
+ min=0, max=max_factor
162
+ )
163
+
164
+ return below_threshold - above_threshold
165
+
166
+
167
+ def _compute_sign_factor(
168
+ x: Tensor,
169
+ channel_dim: int,
170
+ min_positive: float,
171
+ max_positive: float,
172
+ gain_factor: float,
173
+ max_factor: float,
174
+ ) -> Tensor:
175
+ if channel_dim < 0:
176
+ channel_dim += x.ndim
177
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
178
+ proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
179
+ if min_positive == 0.0:
180
+ factor1 = 0.0
181
+ else:
182
+ # 0 if proportion_positive >= min_positive, else can be
183
+ # as large as max_factor.
184
+ factor1 = (
185
+ (min_positive - proportion_positive) * (gain_factor / min_positive)
186
+ ).clamp_(min=0, max=max_factor)
187
+
188
+ if max_positive == 1.0:
189
+ factor2 = 0.0
190
+ else:
191
+ # 0 if self.proportion_positive <= max_positive, else can be
192
+ # as large as -max_factor.
193
+ factor2 = (
194
+ (proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))
195
+ ).clamp_(min=0, max=max_factor)
196
+ sign_factor = factor1 - factor2
197
+ # require min_positive != 0 or max_positive != 1:
198
+ assert not isinstance(sign_factor, float)
199
+ return sign_factor
200
+
201
+
202
+ class ActivationBalancer(torch.nn.Module):
203
+ """
204
+ Modifies the backpropped derivatives of a function to try to encourage, for
205
+ each channel, that it is positive at least a proportion `threshold` of the
206
+ time. It does this by multiplying negative derivative values by up to
207
+ (1+max_factor), and positive derivative values by up to (1-max_factor),
208
+ interpolated from 1 at the threshold to those extremal values when none
209
+ of the inputs are positive.
210
+
211
+ Args:
212
+ num_channels: the number of channels
213
+ channel_dim: the dimension/axis corresponding to the channel, e.g.
214
+ -1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
215
+ min_positive: the minimum, per channel, of the proportion of the time
216
+ that (x > 0), below which we start to modify the derivatives.
217
+ max_positive: the maximum, per channel, of the proportion of the time
218
+ that (x > 0), above which we start to modify the derivatives.
219
+ max_factor: the maximum factor by which we modify the derivatives for
220
+ either the sign constraint or the magnitude constraint;
221
+ e.g. with max_factor=0.02, the the derivatives would be multiplied by
222
+ values in the range [0.98..1.02].
223
+ sign_gain_factor: determines the 'gain' with which we increase the
224
+ change in gradient once the constraints on min_positive and max_positive
225
+ are violated.
226
+ scale_gain_factor: determines the 'gain' with which we increase the
227
+ change in gradient once the constraints on min_abs and max_abs
228
+ are violated.
229
+ min_abs: the minimum average-absolute-value difference from the mean
230
+ value per channel, which we allow, before we start to modify
231
+ the derivatives to prevent this.
232
+ max_abs: the maximum average-absolute-value difference from the mean
233
+ value per channel, which we allow, before we start to modify
234
+ the derivatives to prevent this.
235
+ min_prob: determines the minimum probability with which we modify the
236
+ gradients for the {min,max}_positive and {min,max}_abs constraints,
237
+ on each forward(). This is done randomly to prevent all layers
238
+ from doing it at the same time. Early in training we may use
239
+ higher probabilities than this; it will decay to this value.
240
+ """
241
+
242
+ def __init__(
243
+ self,
244
+ num_channels: int,
245
+ channel_dim: int,
246
+ min_positive: float = 0.05,
247
+ max_positive: float = 0.95,
248
+ max_factor: float = 0.04,
249
+ sign_gain_factor: float = 0.01,
250
+ scale_gain_factor: float = 0.02,
251
+ min_abs: float = 0.2,
252
+ max_abs: float = 100.0,
253
+ min_prob: float = 0.1,
254
+ ):
255
+ super(ActivationBalancer, self).__init__()
256
+ self.num_channels = num_channels
257
+ self.channel_dim = channel_dim
258
+ self.min_positive = min_positive
259
+ self.max_positive = max_positive
260
+ self.max_factor = max_factor
261
+ self.min_abs = min_abs
262
+ self.max_abs = max_abs
263
+ self.min_prob = min_prob
264
+ self.sign_gain_factor = sign_gain_factor
265
+ self.scale_gain_factor = scale_gain_factor
266
+
267
+ # count measures how many times the forward() function has been called.
268
+ # We occasionally sync this to a tensor called `count`, that exists to
269
+ # make sure it is synced to disk when we load and save the model.
270
+ self.cpu_count = 0
271
+ self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
272
+
273
+ def forward(self, x: Tensor) -> Tensor:
274
+ if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
275
+ return _no_op(x)
276
+
277
+ count = self.cpu_count
278
+ self.cpu_count += 1
279
+
280
+ if random.random() < 0.01:
281
+ # Occasionally sync self.cpu_count with self.count.
282
+ # count affects the decay of 'prob'. don't do this on every iter,
283
+ # because syncing with the GPU is slow.
284
+ self.cpu_count = max(self.cpu_count, self.count.item())
285
+ self.count.fill_(self.cpu_count)
286
+
287
+ # the prob of doing some work exponentially decreases from 0.5 till it hits
288
+ # a floor at min_prob (==0.1, by default)
289
+ prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
290
+
291
+ if random.random() < prob:
292
+ sign_gain_factor = 0.5
293
+ if self.min_positive != 0.0 or self.max_positive != 1.0:
294
+ sign_factor = _compute_sign_factor(
295
+ x,
296
+ self.channel_dim,
297
+ self.min_positive,
298
+ self.max_positive,
299
+ gain_factor=self.sign_gain_factor / prob,
300
+ max_factor=self.max_factor,
301
+ )
302
+ else:
303
+ sign_factor = None
304
+
305
+ scale_factor = _compute_scale_factor(
306
+ x.detach(),
307
+ self.channel_dim,
308
+ min_abs=self.min_abs,
309
+ max_abs=self.max_abs,
310
+ gain_factor=self.scale_gain_factor / prob,
311
+ max_factor=self.max_factor,
312
+ )
313
+ return ActivationBalancerFunction.apply(
314
+ x,
315
+ scale_factor,
316
+ sign_factor,
317
+ self.channel_dim,
318
+ )
319
+ else:
320
+ return _no_op(x)
321
+
322
+
323
+ def BalancedDoubleSwish(
324
+ d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25
325
+ ) -> nn.Sequential:
326
+ """
327
+ ActivationBalancer -> DoubleSwish
328
+ """
329
+ balancer = ActivationBalancer(
330
+ d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob
331
+ )
332
+ return nn.Sequential(
333
+ balancer,
334
+ DoubleSwish(),
335
+ )
AR/modules/transformer.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from torch import Tensor
14
+ from torch import nn
15
+ from torch.nn import functional as F
16
+
17
+ from moyoyo_tts.AR.modules.activation import MultiheadAttention
18
+ from moyoyo_tts.AR.modules.scaling import BalancedDoubleSwish
19
+
20
+ _shape_t = Union[int, List[int], torch.Size]
21
+
22
+
23
+ class LayerNorm(nn.Module):
24
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
25
+ normalized_shape: Tuple[int, ...]
26
+ eps: float
27
+ elementwise_affine: bool
28
+
29
+ def __init__(
30
+ self,
31
+ normalized_shape: _shape_t,
32
+ eps: float = 1e-5,
33
+ elementwise_affine: bool = True,
34
+ device=None,
35
+ dtype=None,
36
+ ) -> None:
37
+ factory_kwargs = {"device": device, "dtype": dtype}
38
+ super(LayerNorm, self).__init__()
39
+ if isinstance(normalized_shape, numbers.Integral):
40
+ # mypy error: incompatible types in assignment
41
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
42
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
43
+ self.eps = eps
44
+ self.elementwise_affine = elementwise_affine
45
+ if self.elementwise_affine:
46
+ self.weight = nn.Parameter(
47
+ torch.empty(self.normalized_shape, **factory_kwargs)
48
+ )
49
+ self.bias = nn.Parameter(
50
+ torch.empty(self.normalized_shape, **factory_kwargs)
51
+ )
52
+ else:
53
+ self.register_parameter("weight", None)
54
+ self.register_parameter("bias", None)
55
+
56
+ self.reset_parameters()
57
+
58
+ def reset_parameters(self) -> None:
59
+ if self.elementwise_affine:
60
+ nn.init.ones_(self.weight)
61
+ nn.init.zeros_(self.bias)
62
+
63
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
64
+ if isinstance(input, tuple):
65
+ input, embedding = input
66
+ return (
67
+ F.layer_norm(
68
+ input,
69
+ self.normalized_shape,
70
+ self.weight,
71
+ self.bias,
72
+ self.eps,
73
+ ),
74
+ embedding,
75
+ )
76
+
77
+ assert embedding is None
78
+ return F.layer_norm(
79
+ input, self.normalized_shape, self.weight, self.bias, self.eps
80
+ )
81
+
82
+ def extra_repr(self) -> str:
83
+ return (
84
+ "{normalized_shape}, eps={eps}, "
85
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
86
+ )
87
+
88
+
89
+ class IdentityNorm(nn.Module):
90
+ def __init__(
91
+ self,
92
+ d_model: int,
93
+ eps: float = 1e-5,
94
+ device=None,
95
+ dtype=None,
96
+ ) -> None:
97
+ super(IdentityNorm, self).__init__()
98
+
99
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
100
+ if isinstance(input, tuple):
101
+ return input
102
+
103
+ assert embedding is None
104
+ return input
105
+
106
+
107
+ class TransformerEncoder(nn.Module):
108
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
109
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
110
+
111
+ Args:
112
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
113
+ num_layers: the number of sub-encoder-layers in the encoder (required).
114
+ norm: the layer normalization component (optional).
115
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
116
+ (and convert back on output). This will improve the overall performance of
117
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
118
+
119
+ Examples::
120
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
121
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
122
+ >>> src = torch.rand(10, 32, 512)
123
+ >>> out = transformer_encoder(src)
124
+ """
125
+ __constants__ = ["norm"]
126
+
127
+ def __init__(self, encoder_layer, num_layers, norm=None):
128
+ super(TransformerEncoder, self).__init__()
129
+ self.layers = _get_clones(encoder_layer, num_layers)
130
+ self.num_layers = num_layers
131
+ self.norm = norm
132
+
133
+ def forward(
134
+ self,
135
+ src: Tensor,
136
+ mask: Optional[Tensor] = None,
137
+ src_key_padding_mask: Optional[Tensor] = None,
138
+ return_layer_states: bool = False,
139
+ cache=None,
140
+ ) -> Tensor:
141
+ r"""Pass the input through the encoder layers in turn.
142
+
143
+ Args:
144
+ src: the sequence to the encoder (required).
145
+ mask: the mask for the src sequence (optional).
146
+ src_key_padding_mask: the mask for the src keys per batch (optional).
147
+ return_layer_states: return layers' state (optional).
148
+
149
+ Shape:
150
+ see the docs in Transformer class.
151
+ """
152
+ if return_layer_states:
153
+ layer_states = [] # layers' output
154
+ output = src
155
+ for mod in self.layers:
156
+ output = mod(
157
+ output,
158
+ src_mask=mask,
159
+ src_key_padding_mask=src_key_padding_mask,
160
+ cache=cache,
161
+ )
162
+ layer_states.append(output[0])
163
+
164
+ if self.norm is not None:
165
+ output = self.norm(output)
166
+
167
+ return layer_states, output
168
+
169
+ output = src
170
+ for mod in self.layers:
171
+ output = mod(
172
+ output,
173
+ src_mask=mask,
174
+ src_key_padding_mask=src_key_padding_mask,
175
+ cache=cache,
176
+ )
177
+
178
+ if self.norm is not None:
179
+ output = self.norm(output)
180
+
181
+ return output
182
+
183
+
184
+ class TransformerEncoderLayer(nn.Module):
185
+ __constants__ = ["batch_first", "norm_first"]
186
+
187
+ def __init__(
188
+ self,
189
+ d_model: int,
190
+ nhead: int,
191
+ dim_feedforward: int = 2048,
192
+ dropout: float = 0.1,
193
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
194
+ batch_first: bool = False,
195
+ norm_first: bool = False,
196
+ device=None,
197
+ dtype=None,
198
+ linear1_self_attention_cls: nn.Module = nn.Linear,
199
+ linear2_self_attention_cls: nn.Module = nn.Linear,
200
+ linear1_feedforward_cls: nn.Module = nn.Linear,
201
+ linear2_feedforward_cls: nn.Module = nn.Linear,
202
+ layer_norm_cls: nn.Module = LayerNorm,
203
+ layer_norm_eps: float = 1e-5,
204
+ adaptive_layer_norm=False,
205
+ ) -> None:
206
+ factory_kwargs = {"device": device, "dtype": dtype}
207
+ super(TransformerEncoderLayer, self).__init__()
208
+ # print(233333333333,d_model,nhead)
209
+ # import os
210
+ # os._exit(2333333)
211
+ self.self_attn = MultiheadAttention(
212
+ d_model, # 512 16
213
+ nhead,
214
+ dropout=dropout,
215
+ batch_first=batch_first,
216
+ linear1_cls=linear1_self_attention_cls,
217
+ linear2_cls=linear2_self_attention_cls,
218
+ **factory_kwargs,
219
+ )
220
+
221
+ # Implementation of Feedforward model
222
+ self.linear1 = linear1_feedforward_cls(
223
+ d_model, dim_feedforward, **factory_kwargs
224
+ )
225
+ self.dropout = nn.Dropout(dropout)
226
+ self.linear2 = linear2_feedforward_cls(
227
+ dim_feedforward, d_model, **factory_kwargs
228
+ )
229
+
230
+ self.norm_first = norm_first
231
+ self.dropout1 = nn.Dropout(dropout)
232
+ self.dropout2 = nn.Dropout(dropout)
233
+
234
+ # Legacy string support for activation function.
235
+ if isinstance(activation, str):
236
+ activation = _get_activation_fn(activation)
237
+ elif isinstance(activation, partial):
238
+ activation = activation(d_model)
239
+ elif activation == BalancedDoubleSwish:
240
+ activation = BalancedDoubleSwish(d_model)
241
+
242
+ # # We can't test self.activation in forward() in TorchScript,
243
+ # # so stash some information about it instead.
244
+ # if activation is F.relu or isinstance(activation, torch.nn.ReLU):
245
+ # self.activation_relu_or_gelu = 1
246
+ # elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
247
+ # self.activation_relu_or_gelu = 2
248
+ # else:
249
+ # self.activation_relu_or_gelu = 0
250
+ self.activation = activation
251
+
252
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
253
+ if layer_norm_cls == IdentityNorm:
254
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
255
+ else:
256
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
257
+
258
+ if adaptive_layer_norm:
259
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
260
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
261
+ else:
262
+ self.norm1 = norm1
263
+ self.norm2 = norm2
264
+
265
+ def __setstate__(self, state):
266
+ super(TransformerEncoderLayer, self).__setstate__(state)
267
+ if not hasattr(self, "activation"):
268
+ self.activation = F.relu
269
+
270
+ def forward(
271
+ self,
272
+ src: Tensor,
273
+ src_mask: Optional[Tensor] = None,
274
+ src_key_padding_mask: Optional[Tensor] = None,
275
+ cache=None,
276
+ ) -> Tensor:
277
+ r"""Pass the input through the encoder layer.
278
+
279
+ Args:
280
+ src: the sequence to the encoder layer (required).
281
+ src_mask: the mask for the src sequence (optional).
282
+ src_key_padding_mask: the mask for the src keys per batch (optional).
283
+
284
+ Shape:
285
+ see the docs in Transformer class.
286
+ """
287
+ x, stage_embedding = src, None
288
+ is_src_tuple = False
289
+ if isinstance(src, tuple):
290
+ x, stage_embedding = src
291
+ is_src_tuple = True
292
+
293
+ if src_key_padding_mask is not None:
294
+ _skpm_dtype = src_key_padding_mask.dtype
295
+ if _skpm_dtype != torch.bool and not torch.is_floating_point(
296
+ src_key_padding_mask
297
+ ):
298
+ raise AssertionError(
299
+ "only bool and floating types of key_padding_mask are supported"
300
+ )
301
+
302
+ if self.norm_first:
303
+ x = x + self._sa_block(
304
+ self.norm1(x, stage_embedding),
305
+ src_mask,
306
+ src_key_padding_mask,
307
+ cache=cache,
308
+ )
309
+ x = x + self._ff_block(self.norm2(x, stage_embedding))
310
+ else:
311
+ x = self.norm1(
312
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
313
+ stage_embedding,
314
+ )
315
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
316
+
317
+ if is_src_tuple:
318
+ return (x, stage_embedding)
319
+ return x
320
+
321
+ # self-attention block
322
+ def _sa_block(
323
+ self,
324
+ x: Tensor,
325
+ attn_mask: Optional[Tensor],
326
+ key_padding_mask: Optional[Tensor],
327
+ cache=None,
328
+ ) -> Tensor:
329
+ # print(x.shape,attn_mask.shape,key_padding_mask)
330
+ # torch.Size([1, 188, 512]) torch.Size([188, 188]) None
331
+ # import os
332
+ # os._exit(23333)
333
+ x = self.self_attn(
334
+ x,
335
+ x,
336
+ x,
337
+ attn_mask=attn_mask,
338
+ key_padding_mask=key_padding_mask,
339
+ need_weights=False,
340
+ cache=cache,
341
+ )[0]
342
+ return self.dropout1(x)
343
+
344
+ # feed forward block
345
+ def _ff_block(self, x: Tensor) -> Tensor:
346
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
347
+ return self.dropout2(x)
348
+
349
+
350
+ class AdaptiveLayerNorm(nn.Module):
351
+ r"""Adaptive Layer Normalization"""
352
+
353
+ def __init__(self, d_model, norm) -> None:
354
+ super(AdaptiveLayerNorm, self).__init__()
355
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
356
+ self.norm = norm
357
+ self.d_model = d_model
358
+ self.eps = self.norm.eps
359
+
360
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
361
+ if isinstance(input, tuple):
362
+ input, embedding = input
363
+ weight, bias = torch.split(
364
+ self.project_layer(embedding),
365
+ split_size_or_sections=self.d_model,
366
+ dim=-1,
367
+ )
368
+ return (weight * self.norm(input) + bias, embedding)
369
+
370
+ weight, bias = torch.split(
371
+ self.project_layer(embedding),
372
+ split_size_or_sections=self.d_model,
373
+ dim=-1,
374
+ )
375
+ return weight * self.norm(input) + bias
376
+
377
+
378
+ def _get_clones(module, N):
379
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
AR/modules/transformer_onnx.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from moyoyo_tts.AR.modules.activation_onnx import MultiheadAttention
14
+ from moyoyo_tts.AR.modules.scaling import BalancedDoubleSwish
15
+ from torch import nn
16
+ from torch import Tensor
17
+ from torch.nn import functional as F
18
+
19
+ _shape_t = Union[int, List[int], torch.Size]
20
+
21
+
22
+ class LayerNorm(nn.Module):
23
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
24
+ normalized_shape: Tuple[int, ...]
25
+ eps: float
26
+ elementwise_affine: bool
27
+
28
+ def __init__(
29
+ self,
30
+ normalized_shape: _shape_t,
31
+ eps: float = 1e-5,
32
+ elementwise_affine: bool = True,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ super(LayerNorm, self).__init__()
38
+ if isinstance(normalized_shape, numbers.Integral):
39
+ # mypy error: incompatible types in assignment
40
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
41
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
42
+ self.eps = eps
43
+ self.elementwise_affine = elementwise_affine
44
+ if self.elementwise_affine:
45
+ self.weight = nn.Parameter(
46
+ torch.empty(self.normalized_shape, **factory_kwargs)
47
+ )
48
+ self.bias = nn.Parameter(
49
+ torch.empty(self.normalized_shape, **factory_kwargs)
50
+ )
51
+ else:
52
+ self.register_parameter("weight", None)
53
+ self.register_parameter("bias", None)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self) -> None:
58
+ if self.elementwise_affine:
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
63
+ if isinstance(input, tuple):
64
+ input, embedding = input
65
+ return (
66
+ F.layer_norm(
67
+ input,
68
+ self.normalized_shape,
69
+ self.weight,
70
+ self.bias,
71
+ self.eps,
72
+ ),
73
+ embedding,
74
+ )
75
+
76
+ assert embedding is None
77
+ return F.layer_norm(
78
+ input, self.normalized_shape, self.weight, self.bias, self.eps
79
+ )
80
+
81
+ def extra_repr(self) -> str:
82
+ return (
83
+ "{normalized_shape}, eps={eps}, "
84
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
85
+ )
86
+
87
+
88
+ class IdentityNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ d_model: int,
92
+ eps: float = 1e-5,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ super(IdentityNorm, self).__init__()
97
+
98
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
99
+ if isinstance(input, tuple):
100
+ return input
101
+
102
+ assert embedding is None
103
+ return input
104
+
105
+
106
+ class TransformerEncoder(nn.Module):
107
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
108
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
109
+
110
+ Args:
111
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
112
+ num_layers: the number of sub-encoder-layers in the encoder (required).
113
+ norm: the layer normalization component (optional).
114
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
115
+ (and convert back on output). This will improve the overall performance of
116
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
117
+
118
+ Examples::
119
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
120
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
121
+ >>> src = torch.rand(10, 32, 512)
122
+ >>> out = transformer_encoder(src)
123
+ """
124
+ __constants__ = ["norm"]
125
+
126
+ def __init__(self, encoder_layer, num_layers, norm=None):
127
+ super(TransformerEncoder, self).__init__()
128
+ self.layers = _get_clones(encoder_layer, num_layers)
129
+ self.num_layers = num_layers
130
+ self.norm = norm
131
+
132
+ def forward(
133
+ self,
134
+ src: Tensor,
135
+ mask: Optional[Tensor] = None,
136
+ src_key_padding_mask: Optional[Tensor] = None,
137
+ return_layer_states: bool = False,
138
+ cache=None,
139
+ ) -> Tensor:
140
+ output = src
141
+ for mod in self.layers:
142
+ output = mod(
143
+ output,
144
+ src_mask=mask,
145
+ src_key_padding_mask=src_key_padding_mask,
146
+ cache=cache,
147
+ )
148
+
149
+ if self.norm is not None:
150
+ output = self.norm(output)
151
+
152
+ return output
153
+
154
+
155
+ class TransformerEncoderLayer(nn.Module):
156
+ __constants__ = ["batch_first", "norm_first"]
157
+ def __init__(
158
+ self,
159
+ d_model: int,
160
+ nhead: int,
161
+ dim_feedforward: int = 2048,
162
+ dropout: float = 0.1,
163
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
164
+ batch_first: bool = False,
165
+ norm_first: bool = False,
166
+ device=None,
167
+ dtype=None,
168
+ linear1_self_attention_cls: nn.Module = nn.Linear,
169
+ linear2_self_attention_cls: nn.Module = nn.Linear,
170
+ linear1_feedforward_cls: nn.Module = nn.Linear,
171
+ linear2_feedforward_cls: nn.Module = nn.Linear,
172
+ layer_norm_cls: nn.Module = LayerNorm,
173
+ layer_norm_eps: float = 1e-5,
174
+ adaptive_layer_norm=False,
175
+ ) -> None:
176
+ factory_kwargs = {"device": device, "dtype": dtype}
177
+ super(TransformerEncoderLayer, self).__init__()
178
+ self.self_attn = MultiheadAttention(
179
+ d_model, # 512 16
180
+ nhead,
181
+ dropout=dropout,
182
+ batch_first=batch_first,
183
+ linear1_cls=linear1_self_attention_cls,
184
+ linear2_cls=linear2_self_attention_cls,
185
+ **factory_kwargs,
186
+ )
187
+ self.linear1 = linear1_feedforward_cls(
188
+ d_model, dim_feedforward, **factory_kwargs
189
+ )
190
+ self.dropout = nn.Dropout(dropout)
191
+ self.linear2 = linear2_feedforward_cls(
192
+ dim_feedforward, d_model, **factory_kwargs
193
+ )
194
+ self.norm_first = norm_first
195
+ self.dropout1 = nn.Dropout(dropout)
196
+ self.dropout2 = nn.Dropout(dropout)
197
+ if isinstance(activation, str):
198
+ activation = _get_activation_fn(activation)
199
+ elif isinstance(activation, partial):
200
+ activation = activation(d_model)
201
+ elif activation == BalancedDoubleSwish:
202
+ activation = BalancedDoubleSwish(d_model)
203
+ self.activation = activation
204
+
205
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
206
+ if layer_norm_cls == IdentityNorm:
207
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
208
+ else:
209
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
210
+
211
+ if adaptive_layer_norm:
212
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
213
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
214
+ else:
215
+ self.norm1 = norm1
216
+ self.norm2 = norm2
217
+
218
+ def __setstate__(self, state):
219
+ super(TransformerEncoderLayer, self).__setstate__(state)
220
+ if not hasattr(self, "activation"):
221
+ self.activation = F.relu
222
+
223
+ def forward(
224
+ self,
225
+ src: Tensor,
226
+ src_mask: Optional[Tensor] = None,
227
+ src_key_padding_mask: Optional[Tensor] = None,
228
+ cache=None,
229
+ ) -> Tensor:
230
+ x = src
231
+ stage_embedding = None
232
+ x = self.norm1(
233
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
234
+ stage_embedding,
235
+ )
236
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
237
+
238
+ return x
239
+
240
+ def _sa_block(
241
+ self,
242
+ x: Tensor,
243
+ attn_mask: Optional[Tensor],
244
+ key_padding_mask: Optional[Tensor],
245
+ cache=None,
246
+ ) -> Tensor:
247
+ x = self.self_attn(
248
+ x,
249
+ x,
250
+ x,
251
+ attn_mask=attn_mask,
252
+ key_padding_mask=key_padding_mask,
253
+ need_weights=False,
254
+ cache=cache,
255
+ )
256
+ return self.dropout1(x)
257
+
258
+ def _ff_block(self, x: Tensor) -> Tensor:
259
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
260
+ return self.dropout2(x)
261
+
262
+
263
+ class AdaptiveLayerNorm(nn.Module):
264
+ r"""Adaptive Layer Normalization"""
265
+
266
+ def __init__(self, d_model, norm) -> None:
267
+ super(AdaptiveLayerNorm, self).__init__()
268
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
269
+ self.norm = norm
270
+ self.d_model = d_model
271
+ self.eps = self.norm.eps
272
+
273
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
274
+ if isinstance(input, tuple):
275
+ input, embedding = input
276
+ weight, bias = torch.split(
277
+ self.project_layer(embedding),
278
+ split_size_or_sections=self.d_model,
279
+ dim=-1,
280
+ )
281
+ return (weight * self.norm(input) + bias, embedding)
282
+
283
+ weight, bias = torch.split(
284
+ self.project_layer(embedding),
285
+ split_size_or_sections=self.d_model,
286
+ dim=-1,
287
+ )
288
+ return weight * self.norm(input) + bias
289
+
290
+
291
+ def _get_clones(module, N):
292
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
AR/text_processing/__init__.py ADDED
File without changes
AR/text_processing/phonemizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import re
5
+ from typing import Dict
6
+ from typing import List
7
+
8
+ import regex
9
+ from gruut import sentences
10
+ from gruut.const import Sentence
11
+ from gruut.const import Word
12
+ from moyoyo_tts.AR.text_processing.symbols import SYMBOL_TO_ID
13
+
14
+
15
+ class GruutPhonemizer:
16
+ def __init__(self, language: str):
17
+ self._phonemizer = sentences
18
+ self.lang = language
19
+ self.symbol_to_id = SYMBOL_TO_ID
20
+ self._special_cases_dict: Dict[str] = {
21
+ r"\.\.\.": "... ",
22
+ ";": "; ",
23
+ ":": ": ",
24
+ ",": ", ",
25
+ r"\.": ". ",
26
+ "!": "! ",
27
+ r"\?": "? ",
28
+ "—": "—",
29
+ "…": "… ",
30
+ "«": "«",
31
+ "»": "»",
32
+ }
33
+ self._punctuation_regexp: str = (
34
+ rf"([{''.join(self._special_cases_dict.keys())}])"
35
+ )
36
+
37
+ def _normalize_punctuation(self, text: str) -> str:
38
+ text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text)
39
+ text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text)
40
+ text = regex.sub(r"\pZ+", r" ", text)
41
+ return text.strip()
42
+
43
+ def _convert_punctuation(self, word: Word) -> str:
44
+ if not word.phonemes:
45
+ return ""
46
+ if word.phonemes[0] in ["‖", "|"]:
47
+ return word.text.strip()
48
+
49
+ phonemes = "".join(word.phonemes)
50
+ # remove modifier characters ˈˌː with regex
51
+ phonemes = re.sub(r"[ˈˌː͡]", "", phonemes)
52
+ return phonemes.strip()
53
+
54
+ def phonemize(self, text: str, espeak: bool = False) -> str:
55
+ text_to_phonemize: str = self._normalize_punctuation(text)
56
+ sents: List[Sentence] = [
57
+ sent
58
+ for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)
59
+ ]
60
+ words: List[str] = [
61
+ self._convert_punctuation(word) for word in itertools.chain(*sents)
62
+ ]
63
+ return " ".join(words)
64
+
65
+ def transform(self, phonemes):
66
+ # convert phonemes to ids
67
+ # dictionary is in symbols.py
68
+ return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()]
69
+
70
+
71
+ if __name__ == "__main__":
72
+ phonemizer = GruutPhonemizer("en-us")
73
+ # text -> IPA
74
+ phonemes = phonemizer.phonemize("Hello, wor-ld ?")
75
+ print("phonemes:", phonemes)
76
+ print("len(phonemes):", len(phonemes))
77
+ phoneme_ids = phonemizer.transform(phonemes)
78
+ print("phoneme_ids:", phoneme_ids)
79
+ print("len(phoneme_ids):", len(phoneme_ids))
AR/text_processing/symbols.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ PAD = "_"
4
+ PUNCTUATION = ';:,.!?¡¿—…"«»“” '
5
+ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
6
+ IPA_LETTERS = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
7
+ SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS)
8
+ SPACE_ID = SYMBOLS.index(" ")
9
+ SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)}
10
+ ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)}
AR/utils/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def str2bool(str):
5
+ return True if str.lower() == 'true' else False
6
+
7
+
8
+ def get_newest_ckpt(string_list):
9
+ # 定义一个正则表达式模式,用于匹配字符串中的数字
10
+ pattern = r'epoch=(\d+)-step=(\d+)\.ckpt'
11
+
12
+ # 使用正则表达式提取每个字符串中的数字信息,并创建一个包含元组的列表
13
+ extracted_info = []
14
+ for string in string_list:
15
+ match = re.match(pattern, string)
16
+ if match:
17
+ epoch = int(match.group(1))
18
+ step = int(match.group(2))
19
+ extracted_info.append((epoch, step, string))
20
+ # 按照 epoch 后面的数字和 step 后面的数字进行排序
21
+ sorted_info = sorted(
22
+ extracted_info, key=lambda x: (x[0], x[1]), reverse=True)
23
+ # 获取最新的 ckpt 文件名
24
+ newest_ckpt = sorted_info[0][2]
25
+ return newest_ckpt
26
+
27
+
28
+ # 文本存在且不为空时 return True
29
+ def check_txt_file(file_path):
30
+ try:
31
+ with open(file_path, 'r') as file:
32
+ text = file.readline().strip()
33
+ assert text.strip() != ''
34
+ return text
35
+ except Exception:
36
+ return False
37
+ return False
AR/utils/initialize.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Initialize modules for espnet2 neural networks."""
3
+ import torch
4
+ from typeguard import check_argument_types
5
+
6
+
7
+ def initialize(model: torch.nn.Module, init: str):
8
+ """Initialize weights of a neural network module.
9
+
10
+ Parameters are initialized using the given method or distribution.
11
+
12
+ Custom initialization routines can be implemented into submodules
13
+ as function `espnet_initialization_fn` within the custom module.
14
+
15
+ Args:
16
+ model: Target.
17
+ init: Method of initialization.
18
+ """
19
+ assert check_argument_types()
20
+ print("init with", init)
21
+
22
+ # weight init
23
+ for p in model.parameters():
24
+ if p.dim() > 1:
25
+ if init == "xavier_uniform":
26
+ torch.nn.init.xavier_uniform_(p.data)
27
+ elif init == "xavier_normal":
28
+ torch.nn.init.xavier_normal_(p.data)
29
+ elif init == "kaiming_uniform":
30
+ torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
31
+ elif init == "kaiming_normal":
32
+ torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
33
+ else:
34
+ raise ValueError("Unknown initialization: " + init)
35
+ # bias init
36
+ for name, p in model.named_parameters():
37
+ if ".bias" in name and p.dim() == 1:
38
+ p.data.zero_()
AR/utils/io.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import torch
4
+ import yaml
5
+
6
+
7
+ def load_yaml_config(path):
8
+ with open(path) as f:
9
+ config = yaml.full_load(f)
10
+ return config
11
+
12
+
13
+ def save_config_to_yaml(config, path):
14
+ assert path.endswith(".yaml")
15
+ with open(path, "w") as f:
16
+ f.write(yaml.dump(config))
17
+ f.close()
18
+
19
+
20
+ def write_args(args, path):
21
+ args_dict = dict(
22
+ (name, getattr(args, name)) for name in dir(args) if not name.startswith("_")
23
+ )
24
+ with open(path, "a") as args_file:
25
+ args_file.write("==> torch version: {}\n".format(torch.__version__))
26
+ args_file.write(
27
+ "==> cudnn version: {}\n".format(torch.backends.cudnn.version())
28
+ )
29
+ args_file.write("==> Cmd:\n")
30
+ args_file.write(str(sys.argv))
31
+ args_file.write("\n==> args:\n")
32
+ for k, v in sorted(args_dict.items()):
33
+ args_file.write(" %s: %s\n" % (str(k), str(v)))
34
+ args_file.close()
README.md ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+
4
+ <h1>GPT-SoVITS-WebUI</h1>
5
+ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.<br><br>
6
+
7
+ [![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
8
+
9
+ <a href="https://trendshift.io/repositories/7033" target="_blank"><img src="https://trendshift.io/api/badge/repositories/7033" alt="RVC-Boss%2FGPT-SoVITS | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
10
+
11
+ <!-- img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br> -->
12
+
13
+ [![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
14
+ [![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
15
+ [![Huggingface](https://img.shields.io/badge/🤗%20-online%20demo-yellow.svg?style=for-the-badge)](https://huggingface.co/spaces/lj1995/GPT-SoVITS-v2)
16
+ [![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
17
+
18
+ **English** | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) | [**한국어**](./docs/ko/README.md) | [**Türkçe**](./docs/tr/README.md)
19
+
20
+ </div>
21
+
22
+ ---
23
+
24
+ ## Features:
25
+
26
+ 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion.
27
+
28
+ 2. **Few-shot TTS:** Fine-tune the model with just 1 minute of training data for improved voice similarity and realism.
29
+
30
+ 3. **Cross-lingual Support:** Inference in languages different from the training dataset, currently supporting English, Japanese, Korean, Cantonese and Chinese.
31
+
32
+ 4. **WebUI Tools:** Integrated tools include voice accompaniment separation, automatic training set segmentation, Chinese ASR, and text labeling, assisting beginners in creating training datasets and GPT/SoVITS models.
33
+
34
+ **Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here!**
35
+
36
+ Unseen speakers few-shot fine-tuning demo:
37
+
38
+ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
39
+
40
+ **User guide: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
41
+
42
+ ## Installation
43
+
44
+ For users in China, you can [click here](https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official) to use AutoDL Cloud Docker to experience the full functionality online.
45
+
46
+ ### Tested Environments
47
+
48
+ - Python 3.9, PyTorch 2.0.1, CUDA 11
49
+ - Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
50
+ - Python 3.9, PyTorch 2.2.2, macOS 14.4.1 (Apple silicon)
51
+ - Python 3.9, PyTorch 2.2.2, CPU devices
52
+
53
+ _Note: numba==0.56.4 requires py<3.11_
54
+
55
+ ### Windows
56
+
57
+ If you are a Windows user (tested with win>=10), you can [download the integrated package](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) and double-click on _go-webui.bat_ to start GPT-SoVITS-WebUI.
58
+
59
+ **Users in China can [download the package here](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e/dkxgpiy9zb96hob4#KTvnO).**
60
+
61
+ ### Linux
62
+
63
+ ```bash
64
+ conda create -n GPTSoVits python=3.9
65
+ conda activate GPTSoVits
66
+ bash install.sh
67
+ ```
68
+
69
+ ### macOS
70
+
71
+ **Note: The models trained with GPUs on Macs result in significantly lower quality compared to those trained on other devices, so we are temporarily using CPUs instead.**
72
+
73
+ 1. Install Xcode command-line tools by running `xcode-select --install`.
74
+ 2. Install FFmpeg by running `brew install ffmpeg`.
75
+ 3. Install the program by running the following commands:
76
+
77
+ ```bash
78
+ conda create -n GPTSoVits python=3.9
79
+ conda activate GPTSoVits
80
+ pip install -r requirements.txt
81
+ ```
82
+
83
+ ### Install Manually
84
+
85
+ #### Install FFmpeg
86
+
87
+ ##### Conda Users
88
+
89
+ ```bash
90
+ conda install ffmpeg
91
+ ```
92
+
93
+ ##### Ubuntu/Debian Users
94
+
95
+ ```bash
96
+ sudo apt install ffmpeg
97
+ sudo apt install libsox-dev
98
+ conda install -c conda-forge 'ffmpeg<7'
99
+ ```
100
+
101
+ ##### Windows Users
102
+
103
+ Download and place [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) and [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) in the GPT-SoVITS root.
104
+
105
+ Install [Visual Studio 2017](https://aka.ms/vs/17/release/vc_redist.x86.exe) (Korean TTS Only)
106
+
107
+ ##### MacOS Users
108
+ ```bash
109
+ brew install ffmpeg
110
+ ```
111
+
112
+ #### Install Dependences
113
+
114
+ ```bash
115
+ pip install -r requirements.txt
116
+ ```
117
+
118
+ ### Using Docker
119
+
120
+ #### docker-compose.yaml configuration
121
+
122
+ 0. Regarding image tags: Due to rapid updates in the codebase and the slow process of packaging and testing images, please check [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) for the currently packaged latest images and select as per your situation, or alternatively, build locally using a Dockerfile according to your own needs.
123
+ 1. Environment Variables:
124
+
125
+ - is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation.
126
+
127
+ 2. Volumes Configuration,The application's root directory inside the container is set to /workspace. The default docker-compose.yaml lists some practical examples for uploading/downloading content.
128
+ 3. shm_size: The default available memory for Docker Desktop on Windows is too small, which can cause abnormal operations. Adjust according to your own situation.
129
+ 4. Under the deploy section, GPU-related settings should be adjusted cautiously according to your system and actual circumstances.
130
+
131
+ #### Running with docker compose
132
+
133
+ ```
134
+ docker compose -f "docker-compose.yaml" up -d
135
+ ```
136
+
137
+ #### Running with docker command
138
+
139
+ As above, modify the corresponding parameters based on your actual situation, then run the following command:
140
+
141
+ ```
142
+ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
143
+ ```
144
+
145
+ ## Pretrained Models
146
+
147
+ **Users in China can [download all these models here](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e/dkxgpiy9zb96hob4#nVNhX).**
148
+
149
+ 1. Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`.
150
+
151
+ 2. Download G2PW models from [G2PWModel_1.1.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/g2p/G2PWModel_1.1.zip), unzip and rename to `G2PWModel`, and then place them in `GPT_SoVITS/text`.(Chinese TTS Only)
152
+
153
+ 3. For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`.
154
+
155
+ 4. For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/asr/models`.
156
+
157
+ 5. For English or Japanese ASR (additionally), download models from [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) and place them in `tools/asr/models`. Also, [other models](https://huggingface.co/Systran) may have the similar effect with smaller disk footprint.
158
+
159
+ ## Dataset Format
160
+
161
+ The TTS annotation .list file format:
162
+
163
+ ```
164
+ vocal_path|speaker_name|language|text
165
+ ```
166
+
167
+ Language dictionary:
168
+
169
+ - 'zh': Chinese
170
+ - 'ja': Japanese
171
+ - 'en': English
172
+ - 'ko': Korean
173
+ - 'yue': Cantonese
174
+
175
+ Example:
176
+
177
+ ```
178
+ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
179
+ ```
180
+
181
+ ## Finetune and inference
182
+
183
+ ### Open WebUI
184
+
185
+ #### Integrated Package Users
186
+
187
+ Double-click `go-webui.bat`or use `go-webui.ps1`
188
+ if you want to switch to V1,then double-click`go-webui-v1.bat` or use `go-webui-v1.ps1`
189
+
190
+ #### Others
191
+
192
+ ```bash
193
+ python webui.py <language(optional)>
194
+ ```
195
+
196
+ if you want to switch to V1,then
197
+
198
+ ```bash
199
+ python webui.py v1 <language(optional)>
200
+ ```
201
+ Or maunally switch version in WebUI
202
+
203
+ ### Finetune
204
+
205
+ #### Path Auto-filling is now supported
206
+
207
+ 1.Fill in the audio path
208
+
209
+ 2.Slice the audio into small chunks
210
+
211
+ 3.Denoise(optinal)
212
+
213
+ 4.ASR
214
+
215
+ 5.Proofreading ASR transcriptions
216
+
217
+ 6.Go to the next Tab, then finetune the model
218
+
219
+ ### Open Inference WebUI
220
+
221
+ #### Integrated Package Users
222
+
223
+ Double-click `go-webui-v2.bat` or use `go-webui-v2.ps1` ,then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
224
+
225
+ #### Others
226
+
227
+ ```bash
228
+ python moyoyo_tts/inference_webui.py <language(optional)>
229
+ ```
230
+ OR
231
+
232
+ ```bash
233
+ python webui.py
234
+ ```
235
+ then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
236
+
237
+ ## V2 Release Notes
238
+
239
+ New Features:
240
+
241
+ 1. Support Korean and Cantonese
242
+
243
+ 2. An optimized text frontend
244
+
245
+ 3. Pre-trained model extended from 2k hours to 5k hours
246
+
247
+ 4. Improved synthesis quality for low-quality reference audio
248
+
249
+ [more details](https://github.com/RVC-Boss/GPT-SoVITS/wiki/GPT%E2%80%90SoVITS%E2%80%90v2%E2%80%90features-(%E6%96%B0%E7%89%B9%E6%80%A7) )
250
+
251
+ Use v2 from v1 environment:
252
+
253
+ 1. `pip install -r requirements.txt` to update some packages
254
+
255
+ 2. Clone the latest codes from github.
256
+
257
+ 3. Download v2 pretrained models from [huggingface](https://huggingface.co/lj1995/GPT-SoVITS/tree/main/gsv-v2final-pretrained) and put them into `GPT_SoVITS\pretrained_models\gsv-v2final-pretrained`.
258
+
259
+ Chinese v2 additional: [G2PWModel_1.1.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/g2p/G2PWModel_1.1.zip)(Download G2PW models, unzip and rename to `G2PWModel`, and then place them in `GPT_SoVITS/text`.
260
+
261
+ ## Todo List
262
+
263
+ - [x] **High Priority:**
264
+
265
+ - [x] Localization in Japanese and English.
266
+ - [x] User guide.
267
+ - [x] Japanese and English dataset fine tune training.
268
+
269
+ - [ ] **Features:**
270
+ - [x] Zero-shot voice conversion (5s) / few-shot voice conversion (1min).
271
+ - [x] TTS speaking speed control.
272
+ - [ ] ~~Enhanced TTS emotion control.~~
273
+ - [ ] Experiment with changing SoVITS token inputs to probability distribution of GPT vocabs (transformer latent).
274
+ - [x] Improve English and Japanese text frontend.
275
+ - [ ] Develop tiny and larger-sized TTS models.
276
+ - [x] Colab scripts.
277
+ - [ ] Try expand training dataset (2k hours -> 10k hours).
278
+ - [x] better sovits base model (enhanced audio quality)
279
+ - [ ] model mix
280
+
281
+ ## (Additional) Method for running from the command line
282
+ Use the command line to open the WebUI for UVR5
283
+ ```
284
+ python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
285
+ ```
286
+ <!-- If you can't open a browser, follow the format below for UVR processing,This is using mdxnet for audio processing
287
+ ```
288
+ python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
289
+ ``` -->
290
+ This is how the audio segmentation of the dataset is done using the command line
291
+ ```
292
+ python audio_slicer.py \
293
+ --input_path "<path_to_original_audio_file_or_directory>" \
294
+ --output_root "<directory_where_subdivided_audio_clips_will_be_saved>" \
295
+ --threshold <volume_threshold> \
296
+ --min_length <minimum_duration_of_each_subclip> \
297
+ --min_interval <shortest_time_gap_between_adjacent_subclips>
298
+ --hop_size <step_size_for_computing_volume_curve>
299
+ ```
300
+ This is how dataset ASR processing is done using the command line(Only Chinese)
301
+ ```
302
+ python tools/asr/funasr_asr.py -i <input> -o <output>
303
+ ```
304
+ ASR processing is performed through Faster_Whisper(ASR marking except Chinese)
305
+
306
+ (No progress bars, GPU performance may cause time delays)
307
+ ```
308
+ python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language> -p <precision>
309
+ ```
310
+ A custom list save path is enabled
311
+
312
+ ## Credits
313
+
314
+ Special thanks to the following projects and contributors:
315
+
316
+ ### Theoretical Research
317
+ - [ar-vits](https://github.com/innnky/ar-vits)
318
+ - [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
319
+ - [vits](https://github.com/jaywalnut310/vits)
320
+ - [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
321
+ - [contentvec](https://github.com/auspicious3000/contentvec/)
322
+ - [hifi-gan](https://github.com/jik876/hifi-gan)
323
+ - [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
324
+ ### Pretrained Models
325
+ - [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
326
+ - [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
327
+ ### Text Frontend for Inference
328
+ - [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
329
+ - [LangSegment](https://github.com/juntaosun/LangSegment)
330
+ - [g2pW](https://github.com/GitYCC/g2pW)
331
+ - [pypinyin-g2pW](https://github.com/mozillazg/pypinyin-g2pW)
332
+ - [paddlespeech g2pw](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/g2pw)
333
+ ### WebUI Tools
334
+ - [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
335
+ - [audio-slicer](https://github.com/openvpi/audio-slicer)
336
+ - [SubFix](https://github.com/cronrpc/SubFix)
337
+ - [FFmpeg](https://github.com/FFmpeg/FFmpeg)
338
+ - [gradio](https://github.com/gradio-app/gradio)
339
+ - [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
340
+ - [FunASR](https://github.com/alibaba-damo-academy/FunASR)
341
+
342
+ Thankful to @Naozumi520 for providing the Cantonese training set and for the guidance on Cantonese-related knowledge.
343
+
344
+ ## Thanks to all contributors for their efforts
345
+
346
+ <a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
347
+ <img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
348
+ </a>
TTS_infer_pack/TTS.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import traceback
7
+ from copy import deepcopy
8
+ from time import time as ttime
9
+ from typing import List, Tuple, Union
10
+
11
+ import ffmpeg
12
+ import librosa
13
+ import soundfile
14
+ import numpy as np
15
+ import torch
16
+ import yaml
17
+ from tqdm import tqdm
18
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
19
+
20
+ now_dir = os.getcwd()
21
+ sys.path.append(now_dir)
22
+
23
+ from moyoyo_tts.AR.models.t2s_lightning_module import Text2SemanticLightningModule
24
+ from moyoyo_tts.TTS_infer_pack.TextPreprocessor import TextPreprocessor
25
+ from moyoyo_tts.TTS_infer_pack.text_segmentation_method import splits
26
+ from moyoyo_tts.feature_extractor.cnhubert import CNHubert
27
+ from moyoyo_tts.module.mel_processing import spectrogram_torch
28
+ from moyoyo_tts.module.models import SynthesizerTrn
29
+ from moyoyo_tts.tools.i18n.i18n import I18nAuto, scan_language_list
30
+ from moyoyo_tts.tools.my_utils import load_audio
31
+
32
+ language = os.environ.get("language", "Auto")
33
+ language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
34
+ i18n = I18nAuto(language=language)
35
+
36
+ # configs/tts_infer.yaml
37
+ """
38
+ custom:
39
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
40
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
41
+ device: cpu
42
+ is_half: false
43
+ t2s_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
44
+ vits_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
45
+ version: v2
46
+ default:
47
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
48
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
49
+ device: cpu
50
+ is_half: false
51
+ t2s_weights_path: moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
52
+ vits_weights_path: moyoyo_tts/pretrained_models/s2G488k.pth
53
+ version: v1
54
+ default_v2:
55
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
56
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
57
+ device: cpu
58
+ is_half: false
59
+ t2s_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
60
+ vits_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
61
+ version: v2
62
+ """
63
+
64
+
65
+ def set_seed(seed: int):
66
+ seed = int(seed)
67
+ seed = seed if seed != -1 else random.randrange(1 << 32)
68
+ #print(f"Set seed to {seed}")
69
+ os.environ['PYTHONHASHSEED'] = str(seed)
70
+ random.seed(seed)
71
+ np.random.seed(seed)
72
+ torch.manual_seed(seed)
73
+ try:
74
+ if torch.cuda.is_available():
75
+ torch.cuda.manual_seed(seed)
76
+ torch.cuda.manual_seed_all(seed)
77
+ # torch.backends.cudnn.deterministic = True
78
+ # torch.backends.cudnn.benchmark = False
79
+ # torch.backends.cudnn.enabled = True
80
+ # 开启后会影响精度
81
+ torch.backends.cuda.matmul.allow_tf32 = False
82
+ torch.backends.cudnn.allow_tf32 = False
83
+ except:
84
+ pass
85
+ return seed
86
+
87
+
88
+ class TTS_Config:
89
+ default_configs = {
90
+ "default": {
91
+ "device": "cpu",
92
+ "is_half": False,
93
+ "version": "v1",
94
+ "t2s_weights_path": "moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
95
+ "vits_weights_path": "moyoyo_tts/pretrained_models/s2G488k.pth",
96
+ "cnhuhbert_base_path": "moyoyo_tts/pretrained_models/chinese-hubert-base",
97
+ "bert_base_path": "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large",
98
+ },
99
+ "default_v2": {
100
+ "device": "cpu",
101
+ "is_half": False,
102
+ "version": "v2",
103
+ "t2s_weights_path": "moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
104
+ "vits_weights_path": "moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
105
+ "cnhuhbert_base_path": "moyoyo_tts/pretrained_models/chinese-hubert-base",
106
+ "bert_base_path": "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large",
107
+ },
108
+ }
109
+ configs: dict = None
110
+ v1_languages: list = ["auto", "en", "zh", "ja", "all_zh", "all_ja"]
111
+ v2_languages: list = ["auto", "auto_yue", "en", "zh", "ja", "yue", "ko", "all_zh", "all_ja", "all_yue", "all_ko"]
112
+ languages: list = v2_languages
113
+
114
+ # "all_zh",#全部按中文识别
115
+ # "en",#全部按英文识别#######不变
116
+ # "all_ja",#全部按日文识别
117
+ # "all_yue",#全部按中文识别
118
+ # "all_ko",#全部按韩文识别
119
+ # "zh",#按中英混合识别####不变
120
+ # "ja",#按日英混合识别####不变
121
+ # "yue",#按粤英混合识别####不变
122
+ # "ko",#按韩英混合识别####不变
123
+ # "auto",#多语种启动切分识别语种
124
+ # "auto_yue",#多语种启动切分识别语种
125
+
126
+ def __init__(self, configs: Union[dict, str] = None):
127
+
128
+ if configs in ["", None]:
129
+ # 设置默认配置文件路径
130
+ configs_base_path: str = "moyoyo_tts/configs/"
131
+ os.makedirs(configs_base_path, exist_ok=True)
132
+ self.configs_path: str = os.path.join(configs_base_path, "tts_infer.yaml")
133
+ if not os.path.exists(self.configs_path):
134
+ self.save_configs()
135
+ print(f"Create default config file at {self.configs_path}")
136
+ configs: dict = deepcopy(self.default_configs)
137
+
138
+ if isinstance(configs, str):
139
+ self.configs_path = configs
140
+ configs: dict = self._load_configs(self.configs_path)
141
+
142
+ assert isinstance(configs, dict)
143
+ version = configs.get("version", "v2").lower()
144
+ assert version in ["v1", "v2"]
145
+ self.default_configs["default"] = configs.get("default", self.default_configs["default"])
146
+ self.default_configs["default_v2"] = configs.get("default_v2", self.default_configs["default_v2"])
147
+
148
+ default_config_key = "default" if version == "v1" else "default_v2"
149
+ self.configs: dict = configs.get("custom", deepcopy(self.default_configs[default_config_key]))
150
+
151
+ self.device = self.configs.get("device", torch.device("cpu"))
152
+ self.is_half = self.configs.get("is_half", False)
153
+ self.version = version
154
+ self.t2s_weights_path = self.configs.get("t2s_weights_path", None)
155
+ self.vits_weights_path = self.configs.get("vits_weights_path", None)
156
+ # self.prompt_semantic_path = self.configs.get('prompt_semantic_path')
157
+ # self.refer_spec_path = self.configs.get('refer_spec_path')
158
+ self.bert_base_path = self.configs.get("bert_base_path", None)
159
+ os.environ['bert_path'] = f'{self.bert_base_path}'
160
+ self.cnhuhbert_base_path = self.configs.get("cnhuhbert_base_path", None)
161
+ self.languages = self.v2_languages if self.version == "v2" else self.v1_languages
162
+
163
+ if (self.t2s_weights_path in [None, ""]) or (not os.path.exists(self.t2s_weights_path)):
164
+ self.t2s_weights_path = self.default_configs[default_config_key]['t2s_weights_path']
165
+ print(f"fall back to default t2s_weights_path: {self.t2s_weights_path}")
166
+
167
+ if (self.vits_weights_path in [None, ""]) or (not os.path.exists(self.vits_weights_path)):
168
+ self.vits_weights_path = self.default_configs[default_config_key]['vits_weights_path']
169
+ print(f"fall back to default vits_weights_path: {self.vits_weights_path}")
170
+
171
+ if (self.bert_base_path in [None, ""]) or (not os.path.exists(self.bert_base_path)):
172
+ self.bert_base_path = self.default_configs[default_config_key]['bert_base_path']
173
+ print(f"fall back to default bert_base_path: {self.bert_base_path}")
174
+
175
+ if (self.cnhuhbert_base_path in [None, ""]) or (not os.path.exists(self.cnhuhbert_base_path)):
176
+ self.cnhuhbert_base_path = self.default_configs[default_config_key]['cnhuhbert_base_path']
177
+ print(f"fall back to default cnhuhbert_base_path: {self.cnhuhbert_base_path}")
178
+
179
+ # if (self.prompt_semantic_path in [None, ""]) or (not os.path.exists(self.prompt_semantic_path)):
180
+ # self.prompt_semantic_path = self.default_configs['prompt_semantic_path']
181
+ # print(f"fall back to default prompt_semantic_path: {self.prompt_semantic_path}")
182
+ #
183
+ # if (self.refer_spec_path in [None, ""]) or (not os.path.exists(self.refer_spec_path)):
184
+ # self.refer_spec_path = self.default_configs['refer_spec_path']
185
+ # print(f"fall back to default refer_spec_path: {self.refer_spec_path}")
186
+
187
+ self.update_configs()
188
+
189
+ self.max_sec = None
190
+ self.hz: int = 50
191
+ self.semantic_frame_rate: str = "25hz"
192
+ self.segment_size: int = 20480
193
+ self.filter_length: int = 2048
194
+ self.sampling_rate: int = 32000
195
+ self.hop_length: int = 640
196
+ self.win_length: int = 2048
197
+ self.n_speakers: int = 300
198
+
199
+ def _load_configs(self, configs_path: str) -> dict:
200
+ if os.path.exists(configs_path):
201
+ ...
202
+ else:
203
+ print(i18n("路径不存在,使用默认配置"))
204
+ self.save_configs(configs_path)
205
+ with open(configs_path, 'r') as f:
206
+ configs = yaml.load(f, Loader=yaml.FullLoader)
207
+
208
+ return configs
209
+
210
+ def save_configs(self, configs_path: str = None) -> None:
211
+ configs = deepcopy(self.default_configs)
212
+ if self.configs is not None:
213
+ configs["custom"] = self.update_configs()
214
+
215
+ if configs_path is None and hasattr(self, 'configs_path'):
216
+ configs_path = self.configs_path
217
+
218
+ if configs_path is None:
219
+ return
220
+
221
+ with open(configs_path, 'w') as f:
222
+ yaml.dump(configs, f)
223
+
224
+ def update_configs(self):
225
+ self.config = {
226
+ "device": str(self.device),
227
+ "is_half": self.is_half,
228
+ "version": self.version,
229
+ "t2s_weights_path": self.t2s_weights_path,
230
+ "vits_weights_path": self.vits_weights_path,
231
+ "bert_base_path": self.bert_base_path,
232
+ "cnhuhbert_base_path": self.cnhuhbert_base_path,
233
+ }
234
+ return self.config
235
+
236
+ def update_version(self, version: str) -> None:
237
+ self.version = version
238
+ self.languages = self.v2_languages if self.version == "v2" else self.v1_languages
239
+
240
+ def __str__(self):
241
+ self.configs = self.update_configs()
242
+ string = "TTS Config".center(100, '-') + '\n'
243
+ for k, v in self.configs.items():
244
+ string += f"{str(k).ljust(20)}: {str(v)}\n"
245
+ string += "-" * 100 + '\n'
246
+ return string
247
+
248
+ def __repr__(self):
249
+ return self.__str__()
250
+
251
+ def __hash__(self):
252
+ return hash(self.configs_path)
253
+
254
+ def __eq__(self, other):
255
+ return isinstance(other, TTS_Config) and self.configs_path == other.configs_path
256
+
257
+
258
+ class TTS:
259
+ def __init__(self, configs: Union[dict, str, TTS_Config]):
260
+ if isinstance(configs, TTS_Config):
261
+ self.configs = configs
262
+ else:
263
+ self.configs: TTS_Config = TTS_Config(configs)
264
+
265
+ self.t2s_model: Text2SemanticLightningModule = None
266
+ self.vits_model: SynthesizerTrn = None
267
+ self.bert_tokenizer: AutoTokenizer = None
268
+ self.bert_model: AutoModelForMaskedLM = None
269
+ self.cnhuhbert_model: CNHubert = None
270
+
271
+ self._init_models()
272
+
273
+ self.text_preprocessor: TextPreprocessor = \
274
+ TextPreprocessor(self.bert_model,
275
+ self.bert_tokenizer,
276
+ self.configs.device)
277
+
278
+ self.prompt_cache: dict = {
279
+ "ref_audio_path": None,
280
+ "prompt_semantic": None,
281
+ "refer_spec": [],
282
+ "prompt_text": None,
283
+ "prompt_lang": None,
284
+ "phones": None,
285
+ "bert_features": None,
286
+ "norm_text": None,
287
+ "aux_ref_audio_paths": [],
288
+ }
289
+
290
+ self.stop_flag: bool = False
291
+ self.precision: torch.dtype = torch.float16 if self.configs.is_half else torch.float32
292
+
293
+ def _init_models(self, ):
294
+ self.init_t2s_weights(self.configs.t2s_weights_path)
295
+ self.init_vits_weights(self.configs.vits_weights_path)
296
+ self.init_bert_weights(self.configs.bert_base_path)
297
+ self.init_cnhuhbert_weights(self.configs.cnhuhbert_base_path)
298
+ # self.enable_half_precision(self.configs.is_half)
299
+
300
+ def init_cnhuhbert_weights(self, base_path: str):
301
+ print(f"Loading CNHuBERT weights from {base_path}")
302
+ self.cnhuhbert_model = CNHubert(base_path)
303
+ self.cnhuhbert_model = self.cnhuhbert_model.eval()
304
+ self.cnhuhbert_model = self.cnhuhbert_model.to(self.configs.device)
305
+ if self.configs.is_half and str(self.configs.device) != "cpu":
306
+ self.cnhuhbert_model = self.cnhuhbert_model.half()
307
+
308
+ def init_bert_weights(self, base_path: str):
309
+ print(f"Loading BERT weights from {base_path}")
310
+ self.bert_tokenizer = AutoTokenizer.from_pretrained(base_path)
311
+ self.bert_model = AutoModelForMaskedLM.from_pretrained(base_path)
312
+ self.bert_model = self.bert_model.eval()
313
+ self.bert_model = self.bert_model.to(self.configs.device)
314
+ if self.configs.is_half and str(self.configs.device) != "cpu":
315
+ self.bert_model = self.bert_model.half()
316
+
317
+ def init_vits_weights(self, weights_path: str):
318
+ print(f"Loading VITS weights from {weights_path}")
319
+ self.configs.vits_weights_path = weights_path
320
+ dict_s2 = torch.load(weights_path, map_location=self.configs.device)
321
+ hps = dict_s2["config"]
322
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
323
+ self.configs.update_version("v1")
324
+ else:
325
+ self.configs.update_version("v2")
326
+ self.configs.save_configs()
327
+
328
+ hps["model"]["version"] = self.configs.version
329
+ self.configs.filter_length = hps["data"]["filter_length"]
330
+ self.configs.segment_size = hps["train"]["segment_size"]
331
+ self.configs.sampling_rate = hps["data"]["sampling_rate"]
332
+ self.configs.hop_length = hps["data"]["hop_length"]
333
+ self.configs.win_length = hps["data"]["win_length"]
334
+ self.configs.n_speakers = hps["data"]["n_speakers"]
335
+ self.configs.semantic_frame_rate = "25hz"
336
+ kwargs = hps["model"]
337
+ vits_model = SynthesizerTrn(
338
+ self.configs.filter_length // 2 + 1,
339
+ self.configs.segment_size // self.configs.hop_length,
340
+ n_speakers=self.configs.n_speakers,
341
+ **kwargs
342
+ )
343
+
344
+ if hasattr(vits_model, "enc_q"):
345
+ del vits_model.enc_q
346
+
347
+ vits_model = vits_model.to(self.configs.device)
348
+ vits_model = vits_model.eval()
349
+ vits_model.load_state_dict(dict_s2["weight"], strict=False)
350
+ self.vits_model = vits_model
351
+ if self.configs.is_half and str(self.configs.device) != "cpu":
352
+ self.vits_model = self.vits_model.half()
353
+
354
+ def init_t2s_weights(self, weights_path: str):
355
+ print(f"Loading Text2Semantic weights from {weights_path}")
356
+ self.configs.t2s_weights_path = weights_path
357
+ self.configs.save_configs()
358
+ self.configs.hz = 50
359
+ dict_s1 = torch.load(weights_path, map_location=self.configs.device)
360
+ config = dict_s1["config"]
361
+ self.configs.max_sec = config["data"]["max_sec"]
362
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
363
+ t2s_model.load_state_dict(dict_s1["weight"])
364
+ t2s_model = t2s_model.to(self.configs.device)
365
+ t2s_model = t2s_model.eval()
366
+ self.t2s_model = t2s_model
367
+ if self.configs.is_half and str(self.configs.device) != "cpu":
368
+ self.t2s_model = self.t2s_model.half()
369
+
370
+ def enable_half_precision(self, enable: bool = True, save: bool = True):
371
+ '''
372
+ To enable half precision for the TTS model.
373
+ Args:
374
+ enable: bool, whether to enable half precision.
375
+
376
+ '''
377
+ if str(self.configs.device) == "cpu" and enable:
378
+ print("Half precision is not supported on CPU.")
379
+ return
380
+
381
+ self.configs.is_half = enable
382
+ self.precision = torch.float16 if enable else torch.float32
383
+ if save:
384
+ self.configs.save_configs()
385
+ if enable:
386
+ if self.t2s_model is not None:
387
+ self.t2s_model = self.t2s_model.half()
388
+ if self.vits_model is not None:
389
+ self.vits_model = self.vits_model.half()
390
+ if self.bert_model is not None:
391
+ self.bert_model = self.bert_model.half()
392
+ if self.cnhuhbert_model is not None:
393
+ self.cnhuhbert_model = self.cnhuhbert_model.half()
394
+ else:
395
+ if self.t2s_model is not None:
396
+ self.t2s_model = self.t2s_model.float()
397
+ if self.vits_model is not None:
398
+ self.vits_model = self.vits_model.float()
399
+ if self.bert_model is not None:
400
+ self.bert_model = self.bert_model.float()
401
+ if self.cnhuhbert_model is not None:
402
+ self.cnhuhbert_model = self.cnhuhbert_model.float()
403
+
404
+ def set_device(self, device: torch.device, save: bool = True):
405
+ '''
406
+ To set the device for all models.
407
+ Args:
408
+ device: torch.device, the device to use for all models.
409
+ '''
410
+ self.configs.device = device
411
+ if save:
412
+ self.configs.save_configs()
413
+ if self.t2s_model is not None:
414
+ self.t2s_model = self.t2s_model.to(device)
415
+ if self.vits_model is not None:
416
+ self.vits_model = self.vits_model.to(device)
417
+ if self.bert_model is not None:
418
+ self.bert_model = self.bert_model.to(device)
419
+ if self.cnhuhbert_model is not None:
420
+ self.cnhuhbert_model = self.cnhuhbert_model.to(device)
421
+
422
+ def set_ref_audio(self, ref_audio_path: str):
423
+ '''
424
+ To set the reference audio for the TTS model,
425
+ including the prompt_semantic and refer_spepc.
426
+ Args:
427
+ ref_audio_path: str, the path of the reference audio.
428
+ '''
429
+ self._set_prompt_semantic(ref_audio_path)
430
+ self._set_ref_spec(ref_audio_path)
431
+ self._set_ref_audio_path(ref_audio_path)
432
+ # self._set_ref_audio_path(ref_audio_path)
433
+ # self.prompt_cache['prompt_semantic'] = torch.load(self.configs.prompt_semantic_path, map_location=self.configs.device)
434
+ # self.prompt_cache["refer_spec"] = [torch.load(self.configs.refer_spec_path, map_location=self.configs.device)]
435
+
436
+ def _set_ref_audio_path(self, ref_audio_path):
437
+ self.prompt_cache["ref_audio_path"] = ref_audio_path
438
+
439
+ def _set_ref_spec(self, ref_audio_path):
440
+ spec = self._get_ref_spec(ref_audio_path)
441
+ if self.prompt_cache["refer_spec"] in [[], None]:
442
+ self.prompt_cache["refer_spec"] = [spec]
443
+ else:
444
+ self.prompt_cache["refer_spec"][0] = spec
445
+
446
+ def _get_ref_spec(self, ref_audio_path):
447
+ audio = load_audio(ref_audio_path, int(self.configs.sampling_rate))
448
+ audio = torch.FloatTensor(audio)
449
+ maxx = audio.abs().max()
450
+ if (maxx > 1): audio /= min(2, maxx)
451
+ audio_norm = audio
452
+ audio_norm = audio_norm.unsqueeze(0)
453
+ spec = spectrogram_torch(
454
+ audio_norm,
455
+ self.configs.filter_length,
456
+ self.configs.sampling_rate,
457
+ self.configs.hop_length,
458
+ self.configs.win_length,
459
+ center=False,
460
+ )
461
+ spec = spec.to(self.configs.device)
462
+ if self.configs.is_half:
463
+ spec = spec.half()
464
+ return spec
465
+
466
+ def _set_prompt_semantic(self, ref_wav_path: str):
467
+ zero_wav = np.zeros(
468
+ int(self.configs.sampling_rate * 0.3),
469
+ dtype=np.float16 if self.configs.is_half else np.float32,
470
+ )
471
+ with torch.no_grad():
472
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
473
+ y, sr, = soundfile.read(ref_wav_path)
474
+ duration = librosa.get_duration(y=y, sr=sr)
475
+ if (duration < 3 or duration > 10):
476
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
477
+ wav16k = torch.from_numpy(wav16k)
478
+ zero_wav_torch = torch.from_numpy(zero_wav)
479
+ wav16k = wav16k.to(self.configs.device)
480
+ zero_wav_torch = zero_wav_torch.to(self.configs.device)
481
+ if self.configs.is_half:
482
+ wav16k = wav16k.half()
483
+ zero_wav_torch = zero_wav_torch.half()
484
+
485
+ wav16k = torch.cat([wav16k, zero_wav_torch])
486
+ hubert_feature = self.cnhuhbert_model.model(wav16k.unsqueeze(0))[
487
+ "last_hidden_state"
488
+ ].transpose(
489
+ 1, 2
490
+ ) # .float()
491
+ codes = self.vits_model.extract_latent(hubert_feature)
492
+
493
+ prompt_semantic = codes[0, 0].to(self.configs.device)
494
+ self.prompt_cache["prompt_semantic"] = prompt_semantic
495
+
496
+ def batch_sequences(self, sequences: List[torch.Tensor], axis: int = 0, pad_value: int = 0, max_length: int = None):
497
+ seq = sequences[0]
498
+ ndim = seq.dim()
499
+ if axis < 0:
500
+ axis += ndim
501
+ dtype: torch.dtype = seq.dtype
502
+ pad_value = torch.tensor(pad_value, dtype=dtype)
503
+ seq_lengths = [seq.shape[axis] for seq in sequences]
504
+ if max_length is None:
505
+ max_length = max(seq_lengths)
506
+ else:
507
+ max_length = max(seq_lengths) if max_length < max(seq_lengths) else max_length
508
+
509
+ padded_sequences = []
510
+ for seq, length in zip(sequences, seq_lengths):
511
+ padding = [0] * axis + [0, max_length - length] + [0] * (ndim - axis - 1)
512
+ padded_seq = torch.nn.functional.pad(seq, padding, value=pad_value)
513
+ padded_sequences.append(padded_seq)
514
+ batch = torch.stack(padded_sequences)
515
+ return batch
516
+
517
+ def to_batch(self, data: list,
518
+ prompt_data: dict = None,
519
+ batch_size: int = 5,
520
+ threshold: float = 0.75,
521
+ split_bucket: bool = True,
522
+ device: torch.device = torch.device("cpu"),
523
+ precision: torch.dtype = torch.float32,
524
+ ):
525
+ _data: list = []
526
+ index_and_len_list = []
527
+ for idx, item in enumerate(data):
528
+ norm_text_len = len(item["norm_text"])
529
+ index_and_len_list.append([idx, norm_text_len])
530
+
531
+ batch_index_list = []
532
+ if split_bucket:
533
+ index_and_len_list.sort(key=lambda x: x[1])
534
+ index_and_len_list = np.array(index_and_len_list, dtype=np.int64)
535
+
536
+ batch_index_list_len = 0
537
+ pos = 0
538
+ while pos < index_and_len_list.shape[0]:
539
+ # batch_index_list.append(index_and_len_list[pos:min(pos+batch_size,len(index_and_len_list))])
540
+ pos_end = min(pos + batch_size, index_and_len_list.shape[0])
541
+ while pos < pos_end:
542
+ batch = index_and_len_list[pos:pos_end, 1].astype(np.float32)
543
+ score = batch[(pos_end - pos) // 2] / (batch.mean() + 1e-8)
544
+ if (score >= threshold) or (pos_end - pos == 1):
545
+ batch_index = index_and_len_list[pos:pos_end, 0].tolist()
546
+ batch_index_list_len += len(batch_index)
547
+ batch_index_list.append(batch_index)
548
+ pos = pos_end
549
+ break
550
+ pos_end = pos_end - 1
551
+
552
+ assert batch_index_list_len == len(data)
553
+
554
+ else:
555
+ for i in range(len(data)):
556
+ if i % batch_size == 0:
557
+ batch_index_list.append([])
558
+ batch_index_list[-1].append(i)
559
+
560
+ for batch_idx, index_list in enumerate(batch_index_list):
561
+ item_list = [data[idx] for idx in index_list]
562
+ phones_list = []
563
+ phones_len_list = []
564
+ # bert_features_list = []
565
+ all_phones_list = []
566
+ all_phones_len_list = []
567
+ all_bert_features_list = []
568
+ norm_text_batch = []
569
+ all_bert_max_len = 0
570
+ all_phones_max_len = 0
571
+ for item in item_list:
572
+ if prompt_data is not None:
573
+ all_bert_features = torch.cat([prompt_data["bert_features"], item["bert_features"]], 1) \
574
+ .to(dtype=precision, device=device)
575
+ all_phones = torch.LongTensor(prompt_data["phones"] + item["phones"]).to(device)
576
+ phones = torch.LongTensor(item["phones"]).to(device)
577
+ # norm_text = prompt_data["norm_text"]+item["norm_text"]
578
+ else:
579
+ all_bert_features = item["bert_features"] \
580
+ .to(dtype=precision, device=device)
581
+ phones = torch.LongTensor(item["phones"]).to(device)
582
+ all_phones = phones
583
+ # norm_text = item["norm_text"]
584
+
585
+ all_bert_max_len = max(all_bert_max_len, all_bert_features.shape[-1])
586
+ all_phones_max_len = max(all_phones_max_len, all_phones.shape[-1])
587
+
588
+ phones_list.append(phones)
589
+ phones_len_list.append(phones.shape[-1])
590
+ all_phones_list.append(all_phones)
591
+ all_phones_len_list.append(all_phones.shape[-1])
592
+ all_bert_features_list.append(all_bert_features)
593
+ norm_text_batch.append(item["norm_text"])
594
+
595
+ phones_batch = phones_list
596
+ all_phones_batch = all_phones_list
597
+ all_bert_features_batch = all_bert_features_list
598
+
599
+ max_len = max(all_bert_max_len, all_phones_max_len)
600
+ # phones_batch = self.batch_sequences(phones_list, axis=0, pad_value=0, max_length=max_len)
601
+ #### 直接对phones和bert_features进行pad。(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
602
+ # all_phones_batch = self.batch_sequences(all_phones_list, axis=0, pad_value=0, max_length=max_len)
603
+ # all_bert_features_batch = all_bert_features_list
604
+ # all_bert_features_batch = torch.zeros((len(all_bert_features_list), 1024, max_len), dtype=precision, device=device)
605
+ # for idx, item in enumerate(all_bert_features_list):
606
+ # all_bert_features_batch[idx, :, : item.shape[-1]] = item
607
+
608
+ # #### 先对phones进行embedding、对bert_features进行project,再pad到相同长度,(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
609
+ # all_phones_list = [self.t2s_model.model.ar_text_embedding(item.to(self.t2s_model.device)) for item in all_phones_list]
610
+ # all_phones_list = [F.pad(item,(0,0,0,max_len-item.shape[0]),value=0) for item in all_phones_list]
611
+ # all_phones_batch = torch.stack(all_phones_list, dim=0)
612
+
613
+ # all_bert_features_list = [self.t2s_model.model.bert_proj(item.to(self.t2s_model.device).transpose(0, 1)) for item in all_bert_features_list]
614
+ # all_bert_features_list = [F.pad(item,(0,0,0,max_len-item.shape[0]), value=0) for item in all_bert_features_list]
615
+ # all_bert_features_batch = torch.stack(all_bert_features_list, dim=0)
616
+
617
+ batch = {
618
+ "phones": phones_batch,
619
+ "phones_len": torch.LongTensor(phones_len_list).to(device),
620
+ "all_phones": all_phones_batch,
621
+ "all_phones_len": torch.LongTensor(all_phones_len_list).to(device),
622
+ "all_bert_features": all_bert_features_batch,
623
+ "norm_text": norm_text_batch,
624
+ "max_len": max_len,
625
+ }
626
+ _data.append(batch)
627
+
628
+ return _data, batch_index_list
629
+
630
+ def recovery_order(self, data: list, batch_index_list: list) -> list:
631
+ '''
632
+ Recovery the order of the audio according to the batch_index_list.
633
+
634
+ Args:
635
+ data (List[list(np.ndarray)]): the out of order audio .
636
+ batch_index_list (List[list[int]]): the batch index list.
637
+
638
+ Returns:
639
+ list (List[np.ndarray]): the data in the original order.
640
+ '''
641
+ length = len(sum(batch_index_list, []))
642
+ _data = [None] * length
643
+ for i, index_list in enumerate(batch_index_list):
644
+ for j, index in enumerate(index_list):
645
+ _data[index] = data[i][j]
646
+ return _data
647
+
648
+ def stop(self, ):
649
+ '''
650
+ Stop the inference process.
651
+ '''
652
+ self.stop_flag = True
653
+
654
+ @torch.no_grad()
655
+ def run(self, inputs: dict):
656
+ """
657
+ Text to speech inference.
658
+
659
+ Args:
660
+ inputs (dict):
661
+ {
662
+ "text": "", # str.(required) text to be synthesized
663
+ "text_lang: "", # str.(required) language of the text to be synthesized
664
+ "ref_audio_path": "", # str.(required) reference audio path
665
+ "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
666
+ "prompt_text": "", # str.(optional) prompt text for the reference audio
667
+ "prompt_lang": "", # str.(required) language of the prompt text for the reference audio
668
+ "top_k": 5, # int. top k sampling
669
+ "top_p": 1, # float. top p sampling
670
+ "temperature": 1, # float. temperature for sampling
671
+ "text_split_method": "cut0", # str. text split method, see text_segmentation_method.py for details.
672
+ "batch_size": 1, # int. batch size for inference
673
+ "batch_threshold": 0.75, # float. threshold for batch splitting.
674
+ "split_bucket: True, # bool. whether to split the batch into multiple buckets.
675
+ "return_fragment": False, # bool. step by step return the audio fragment.
676
+ "speed_factor":1.0, # float. control the speed of the synthesized audio.
677
+ "fragment_interval":0.3, # float. to control the interval of the audio fragment.
678
+ "seed": -1, # int. random seed for reproducibility.
679
+ "parallel_infer": True, # bool. whether to use parallel inference.
680
+ "repetition_penalty": 1.35 # float. repetition penalty for T2S model.
681
+ }
682
+ returns:
683
+ Tuple[int, np.ndarray]: sampling rate and audio data.
684
+ """
685
+ ########## variables initialization ###########
686
+ self.stop_flag: bool = False
687
+ text: str = inputs.get("text", "")
688
+ text_lang: str = inputs.get("text_lang", "")
689
+ ref_audio_path: str = inputs.get("ref_audio_path", "")
690
+ aux_ref_audio_paths: list = inputs.get("aux_ref_audio_paths", [])
691
+ prompt_text: str = inputs.get("prompt_text", "")
692
+ prompt_lang: str = inputs.get("prompt_lang", "")
693
+ top_k: int = inputs.get("top_k", 5)
694
+ top_p: float = inputs.get("top_p", 1)
695
+ temperature: float = inputs.get("temperature", 1)
696
+ text_split_method: str = inputs.get("text_split_method", "cut0")
697
+ batch_size = inputs.get("batch_size", 1)
698
+ batch_threshold = inputs.get("batch_threshold", 0.75)
699
+ speed_factor = inputs.get("speed_factor", 1.0)
700
+ split_bucket = inputs.get("split_bucket", True)
701
+ return_fragment = inputs.get("return_fragment", False)
702
+ fragment_interval = inputs.get("fragment_interval", 0.3)
703
+ seed = inputs.get("seed", -1)
704
+ seed = -1 if seed in ["", None] else seed
705
+ actual_seed = set_seed(seed)
706
+ parallel_infer = inputs.get("parallel_infer", True)
707
+ repetition_penalty = inputs.get("repetition_penalty", 1.35)
708
+
709
+ if parallel_infer:
710
+ #print(i18n("并行推理模式已开启"))
711
+ self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_batch_infer
712
+ else:
713
+ #print(i18n("并行推理模式已关闭"))
714
+ self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive_batched
715
+
716
+ if return_fragment:
717
+ #print(i18n("分段返回模式已开启"))
718
+ if split_bucket:
719
+ split_bucket = False
720
+ #print(i18n("分段返回模式不支持分桶处理,已自动关闭分桶处理"))
721
+
722
+ if split_bucket and speed_factor == 1.0:
723
+ print(i18n("分桶处理模式已开启"))
724
+ elif speed_factor != 1.0:
725
+ #print(i18n("语速调节不支持分桶处理,已自动关闭分桶处理"))
726
+ split_bucket = False
727
+ else:
728
+ print(i18n("分桶处理模式已关闭"))
729
+
730
+ if fragment_interval < 0.01:
731
+ fragment_interval = 0.01
732
+ #print(i18n("分段间隔过小,已自动设置为0.01"))
733
+
734
+ no_prompt_text = False
735
+ if prompt_text in [None, ""]:
736
+ no_prompt_text = True
737
+
738
+ assert text_lang in self.configs.languages
739
+ if not no_prompt_text:
740
+ assert prompt_lang in self.configs.languages
741
+
742
+ if ref_audio_path in [None, ""] and \
743
+ ((self.prompt_cache["prompt_semantic"] is None) or (self.prompt_cache["refer_spec"] in [None, []])):
744
+ raise ValueError(
745
+ "ref_audio_path cannot be empty, when the reference audio is not set using set_ref_audio()")
746
+
747
+ ###### setting reference audio and prompt text preprocessing ########
748
+ t0 = ttime()
749
+ if (ref_audio_path is not None) and (ref_audio_path != self.prompt_cache["ref_audio_path"]):
750
+ if not os.path.exists(ref_audio_path):
751
+ raise ValueError(f"{ref_audio_path} not exists")
752
+ self.set_ref_audio(ref_audio_path)
753
+
754
+ aux_ref_audio_paths = aux_ref_audio_paths if aux_ref_audio_paths is not None else []
755
+ paths = set(aux_ref_audio_paths) & set(self.prompt_cache["aux_ref_audio_paths"])
756
+ if not (len(list(paths)) == len(aux_ref_audio_paths) == len(self.prompt_cache["aux_ref_audio_paths"])):
757
+ self.prompt_cache["aux_ref_audio_paths"] = aux_ref_audio_paths
758
+ self.prompt_cache["refer_spec"] = [self.prompt_cache["refer_spec"][0]]
759
+ for path in aux_ref_audio_paths:
760
+ if path in [None, ""]:
761
+ continue
762
+ if not os.path.exists(path):
763
+ print(i18n("音频文件不存在,跳过:{}").format(path))
764
+ continue
765
+ self.prompt_cache["refer_spec"].append(self._get_ref_spec(path))
766
+
767
+ if not no_prompt_text:
768
+ prompt_text = prompt_text.strip("\n")
769
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_lang != "en" else "."
770
+ #print(i18n("实际输入的参考文本:"), prompt_text)
771
+ if self.prompt_cache["prompt_text"] != prompt_text:
772
+ self.prompt_cache["prompt_text"] = prompt_text
773
+ self.prompt_cache["prompt_lang"] = prompt_lang
774
+ phones, bert_features, norm_text = \
775
+ self.text_preprocessor.segment_and_extract_feature_for_text(
776
+ prompt_text,
777
+ prompt_lang,
778
+ self.configs.version)
779
+ self.prompt_cache["phones"] = phones
780
+ self.prompt_cache["bert_features"] = bert_features
781
+ self.prompt_cache["norm_text"] = norm_text
782
+
783
+ ###### text preprocessing ########
784
+ t1 = ttime()
785
+ data: list = None
786
+ if not return_fragment:
787
+ data = self.text_preprocessor.preprocess(text, text_lang, text_split_method, self.configs.version)
788
+ if len(data) == 0:
789
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
790
+ dtype=np.int16)
791
+ return
792
+
793
+ batch_index_list: list = None
794
+ data, batch_index_list = self.to_batch(data,
795
+ prompt_data=self.prompt_cache if not no_prompt_text else None,
796
+ batch_size=batch_size,
797
+ threshold=batch_threshold,
798
+ split_bucket=split_bucket,
799
+ device=self.configs.device,
800
+ precision=self.precision
801
+ )
802
+ else:
803
+ #print(i18n("############ 切分文本 ############"))
804
+ texts = self.text_preprocessor.pre_seg_text(text, text_lang, text_split_method)
805
+ data = []
806
+ for i in range(len(texts)):
807
+ if i % batch_size == 0:
808
+ data.append([])
809
+ data[-1].append(texts[i])
810
+
811
+ def make_batch(batch_texts):
812
+ batch_data = []
813
+ #print(i18n("############ 提取文本Bert特征 ############"))
814
+ for text in tqdm(batch_texts):
815
+ phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(text,
816
+ text_lang,
817
+ self.configs.version)
818
+ if phones is None:
819
+ continue
820
+ res = {
821
+ "phones": phones,
822
+ "bert_features": bert_features,
823
+ "norm_text": norm_text,
824
+ }
825
+ batch_data.append(res)
826
+ if len(batch_data) == 0:
827
+ return None
828
+ batch, _ = self.to_batch(batch_data,
829
+ prompt_data=self.prompt_cache if not no_prompt_text else None,
830
+ batch_size=batch_size,
831
+ threshold=batch_threshold,
832
+ split_bucket=False,
833
+ device=self.configs.device,
834
+ precision=self.precision
835
+ )
836
+ return batch[0]
837
+
838
+ t2 = ttime()
839
+ try:
840
+ #print("############ 推理 ############")
841
+ ###### inference ######
842
+ t_34 = 0.0
843
+ t_45 = 0.0
844
+ audio = []
845
+ for item in data:
846
+ t3 = ttime()
847
+ if return_fragment:
848
+ item = make_batch(item)
849
+ if item is None:
850
+ continue
851
+
852
+ batch_phones: List[torch.LongTensor] = item["phones"]
853
+ # batch_phones:torch.LongTensor = item["phones"]
854
+ batch_phones_len: torch.LongTensor = item["phones_len"]
855
+ all_phoneme_ids: torch.LongTensor = item["all_phones"]
856
+ all_phoneme_lens: torch.LongTensor = item["all_phones_len"]
857
+ all_bert_features: torch.LongTensor = item["all_bert_features"]
858
+ norm_text: str = item["norm_text"]
859
+ max_len = item["max_len"]
860
+
861
+ #print(i18n("前端处理后的文本(每句):"), norm_text)
862
+ if no_prompt_text:
863
+ prompt = None
864
+ else:
865
+ prompt = self.prompt_cache["prompt_semantic"].expand(len(all_phoneme_ids), -1).to(
866
+ self.configs.device)
867
+
868
+ pred_semantic_list, idx_list = self.t2s_model.model.infer_panel(
869
+ all_phoneme_ids,
870
+ all_phoneme_lens,
871
+ prompt,
872
+ all_bert_features,
873
+ # prompt_phone_len=ph_offset,
874
+ top_k=top_k,
875
+ top_p=top_p,
876
+ temperature=temperature,
877
+ early_stop_num=self.configs.hz * self.configs.max_sec,
878
+ max_len=max_len,
879
+ repetition_penalty=repetition_penalty,
880
+ )
881
+ t4 = ttime()
882
+ t_34 += t4 - t3
883
+
884
+ refer_audio_spec: torch.Tensor = [item.to(dtype=self.precision, device=self.configs.device) for item in
885
+ self.prompt_cache["refer_spec"]]
886
+
887
+ batch_audio_fragment = []
888
+
889
+ # ## vits并行推理 method 1
890
+ # pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
891
+ # pred_semantic_len = torch.LongTensor([item.shape[0] for item in pred_semantic_list]).to(self.configs.device)
892
+ # pred_semantic = self.batch_sequences(pred_semantic_list, axis=0, pad_value=0).unsqueeze(0)
893
+ # max_len = 0
894
+ # for i in range(0, len(batch_phones)):
895
+ # max_len = max(max_len, batch_phones[i].shape[-1])
896
+ # batch_phones = self.batch_sequences(batch_phones, axis=0, pad_value=0, max_length=max_len)
897
+ # batch_phones = batch_phones.to(self.configs.device)
898
+ # batch_audio_fragment = (self.vits_model.batched_decode(
899
+ # pred_semantic, pred_semantic_len, batch_phones, batch_phones_len,refer_audio_spec
900
+ # ))
901
+
902
+ if speed_factor == 1.0:
903
+ # ## vits并行推理 method 2
904
+ pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
905
+ upsample_rate = math.prod(self.vits_model.upsample_rates)
906
+ audio_frag_idx = [pred_semantic_list[i].shape[0] * 2 * upsample_rate for i in
907
+ range(0, len(pred_semantic_list))]
908
+ audio_frag_end_idx = [sum(audio_frag_idx[:i + 1]) for i in range(0, len(audio_frag_idx))]
909
+ all_pred_semantic = torch.cat(pred_semantic_list).unsqueeze(0).unsqueeze(0).to(self.configs.device)
910
+ _batch_phones = torch.cat(batch_phones).unsqueeze(0).to(self.configs.device)
911
+ _batch_audio_fragment = (self.vits_model.decode(
912
+ all_pred_semantic, _batch_phones, refer_audio_spec, speed=speed_factor
913
+ ).detach()[0, 0, :])
914
+ audio_frag_end_idx.insert(0, 0)
915
+ batch_audio_fragment = [_batch_audio_fragment[audio_frag_end_idx[i - 1]:audio_frag_end_idx[i]] for i
916
+ in range(1, len(audio_frag_end_idx))]
917
+ else:
918
+ # ## vits串行推理
919
+ for i, idx in enumerate(idx_list):
920
+ phones = batch_phones[i].unsqueeze(0).to(self.configs.device)
921
+ _pred_semantic = (
922
+ pred_semantic_list[i][-idx:].unsqueeze(0).unsqueeze(0)) # .unsqueeze(0)#mq要多unsqueeze一次
923
+ audio_fragment = (self.vits_model.decode(
924
+ _pred_semantic, phones, refer_audio_spec, speed=speed_factor
925
+ ).detach()[0, 0, :])
926
+ batch_audio_fragment.append(
927
+ audio_fragment
928
+ ) ###试试重建不带上prompt部分
929
+
930
+ t5 = ttime()
931
+ t_45 += t5 - t4
932
+ if return_fragment:
933
+ print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t4 - t3, t5 - t4))
934
+ yield self.audio_postprocess([batch_audio_fragment],
935
+ self.configs.sampling_rate,
936
+ None,
937
+ speed_factor,
938
+ False,
939
+ fragment_interval
940
+ )
941
+ else:
942
+ audio.append(batch_audio_fragment)
943
+
944
+ if self.stop_flag:
945
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
946
+ dtype=np.int16)
947
+ return
948
+
949
+ if not return_fragment:
950
+ # print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t_34, t_45))
951
+ if len(audio) == 0:
952
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
953
+ dtype=np.int16)
954
+ return
955
+ yield self.audio_postprocess(audio,
956
+ self.configs.sampling_rate,
957
+ batch_index_list,
958
+ speed_factor,
959
+ split_bucket,
960
+ fragment_interval
961
+ )
962
+
963
+ except Exception as e:
964
+ traceback.print_exc()
965
+ # 必须返回一个空音频, 否则会导致显存不释放。
966
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
967
+ dtype=np.int16)
968
+ # 重置模型, 否则会导致显存释放不完全。
969
+ del self.t2s_model
970
+ del self.vits_model
971
+ self.t2s_model = None
972
+ self.vits_model = None
973
+ self.init_t2s_weights(self.configs.t2s_weights_path)
974
+ self.init_vits_weights(self.configs.vits_weights_path)
975
+ raise e
976
+ finally:
977
+ self.empty_cache()
978
+
979
+ def empty_cache(self):
980
+ try:
981
+ gc.collect() # 触发gc的垃圾回收。避免内存一直增长。
982
+ if "cuda" in str(self.configs.device):
983
+ torch.cuda.empty_cache()
984
+ elif str(self.configs.device) == "mps":
985
+ torch.mps.empty_cache()
986
+ except:
987
+ pass
988
+
989
+ def audio_postprocess(self,
990
+ audio: List[torch.Tensor],
991
+ sr: int,
992
+ batch_index_list: list = None,
993
+ speed_factor: float = 1.0,
994
+ split_bucket: bool = True,
995
+ fragment_interval: float = 0.3
996
+ ) -> Tuple[int, np.ndarray]:
997
+ zero_wav = torch.zeros(
998
+ int(self.configs.sampling_rate * fragment_interval),
999
+ dtype=self.precision,
1000
+ device=self.configs.device
1001
+ )
1002
+
1003
+ for i, batch in enumerate(audio):
1004
+ for j, audio_fragment in enumerate(batch):
1005
+ max_audio = torch.abs(audio_fragment).max() # 简单防止16bit爆音
1006
+ if max_audio > 1: audio_fragment /= max_audio
1007
+ audio_fragment: torch.Tensor = torch.cat([audio_fragment, zero_wav], dim=0)
1008
+ audio[i][j] = audio_fragment.cpu().numpy()
1009
+
1010
+ if split_bucket:
1011
+ audio = self.recovery_order(audio, batch_index_list)
1012
+ else:
1013
+ # audio = [item for batch in audio for item in batch]
1014
+ audio = sum(audio, [])
1015
+
1016
+ audio = np.concatenate(audio, 0)
1017
+ audio = (audio * 32768).astype(np.int16)
1018
+
1019
+ # try:
1020
+ # if speed_factor != 1.0:
1021
+ # audio = speed_change(audio, speed=speed_factor, sr=int(sr))
1022
+ # except Exception as e:
1023
+ # print(f"Failed to change speed of audio: \n{e}")
1024
+
1025
+ return sr, audio
1026
+
1027
+
1028
+ def speed_change(input_audio: np.ndarray, speed: float, sr: int):
1029
+ # 将 NumPy 数组转换为原始 PCM 流
1030
+ raw_audio = input_audio.astype(np.int16).tobytes()
1031
+
1032
+ # 设置 ffmpeg 输入流
1033
+ input_stream = ffmpeg.input('pipe:', format='s16le', acodec='pcm_s16le', ar=str(sr), ac=1)
1034
+
1035
+ # 变速处理
1036
+ output_stream = input_stream.filter('atempo', speed)
1037
+
1038
+ # 输出流到管道
1039
+ out, _ = (
1040
+ output_stream.output('pipe:', format='s16le', acodec='pcm_s16le')
1041
+ .run(input=raw_audio, capture_stdout=True, capture_stderr=True)
1042
+ )
1043
+
1044
+ # 将管道输出解码为 NumPy 数组
1045
+ processed_audio = np.frombuffer(out, np.int16)
1046
+
1047
+ return processed_audio
TTS_infer_pack/TTS_mid.py ADDED
@@ -0,0 +1,1043 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import math
3
+ import os
4
+ import pathlib
5
+ import random
6
+ import sys
7
+ import traceback
8
+ from copy import deepcopy
9
+ from time import time as ttime
10
+ from typing import List, Tuple, Union
11
+
12
+ import ffmpeg
13
+ import librosa
14
+ import numpy as np
15
+ import torch
16
+ import yaml
17
+ from tqdm import tqdm
18
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
19
+
20
+ now_dir = os.getcwd()
21
+ sys.path.append(now_dir)
22
+
23
+ from moyoyo_tts.AR.models.t2s_lightning_module import Text2SemanticLightningModule
24
+ from moyoyo_tts.TTS_infer_pack.TextPreprocessor import TextPreprocessor
25
+ from moyoyo_tts.TTS_infer_pack.text_segmentation_method import splits
26
+ from moyoyo_tts.feature_extractor.cnhubert import CNHubert
27
+ from moyoyo_tts.module.mel_processing import spectrogram_torch
28
+ from moyoyo_tts.module.models import SynthesizerTrn
29
+ from moyoyo_tts.tools.i18n.i18n import I18nAuto, scan_language_list
30
+ from moyoyo_tts.tools.my_utils import load_audio
31
+
32
+ language = os.environ.get("language", "Auto")
33
+ language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
34
+ i18n = I18nAuto(language=language)
35
+
36
+ # configs/tts_infer.yaml
37
+ """
38
+ custom:
39
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
40
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
41
+ device: cpu
42
+ is_half: false
43
+ t2s_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
44
+ vits_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
45
+ version: v2
46
+ default:
47
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
48
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
49
+ device: cpu
50
+ is_half: false
51
+ t2s_weights_path: moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
52
+ vits_weights_path: moyoyo_tts/pretrained_models/s2G488k.pth
53
+ version: v1
54
+ default_v2:
55
+ bert_base_path: moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large
56
+ cnhuhbert_base_path: moyoyo_tts/pretrained_models/chinese-hubert-base
57
+ device: cpu
58
+ is_half: false
59
+ t2s_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
60
+ vits_weights_path: moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth
61
+ version: v2
62
+ """
63
+
64
+
65
+ def set_seed(seed: int):
66
+ seed = int(seed)
67
+ seed = seed if seed != -1 else random.randrange(1 << 32)
68
+ #print(f"Set seed to {seed}")
69
+ os.environ['PYTHONHASHSEED'] = str(seed)
70
+ random.seed(seed)
71
+ np.random.seed(seed)
72
+ torch.manual_seed(seed)
73
+ try:
74
+ if torch.cuda.is_available():
75
+ torch.cuda.manual_seed(seed)
76
+ torch.cuda.manual_seed_all(seed)
77
+ # torch.backends.cudnn.deterministic = True
78
+ # torch.backends.cudnn.benchmark = False
79
+ # torch.backends.cudnn.enabled = True
80
+ # 开启后会影响精度
81
+ torch.backends.cuda.matmul.allow_tf32 = False
82
+ torch.backends.cudnn.allow_tf32 = False
83
+ except:
84
+ pass
85
+ return seed
86
+
87
+
88
+ class TTS_Config:
89
+ default_configs = {
90
+ "default": {
91
+ "device": "cpu",
92
+ "is_half": False,
93
+ "version": "v1",
94
+ "t2s_weights_path": "moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt",
95
+ "vits_weights_path": "moyoyo_tts/pretrained_models/s2G488k.pth",
96
+ "cnhuhbert_base_path": "moyoyo_tts/pretrained_models/chinese-hubert-base",
97
+ "bert_base_path": "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large",
98
+ },
99
+ "default_v2": {
100
+ "device": "cpu",
101
+ "is_half": False,
102
+ "version": "v2",
103
+ "t2s_weights_path": "moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt",
104
+ "vits_weights_path": "moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",
105
+ "cnhuhbert_base_path": "moyoyo_tts/pretrained_models/chinese-hubert-base",
106
+ "bert_base_path": "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large",
107
+ },
108
+ }
109
+ configs: dict = None
110
+ v1_languages: list = ["auto", "en", "zh", "ja", "all_zh", "all_ja"]
111
+ v2_languages: list = ["auto", "auto_yue", "en", "zh", "ja", "yue", "ko", "all_zh", "all_ja", "all_yue", "all_ko"]
112
+ languages: list = v2_languages
113
+
114
+ # "all_zh",#全部按中文识别
115
+ # "en",#全部按英文识别#######不变
116
+ # "all_ja",#全部按日文识别
117
+ # "all_yue",#全部按中文识别
118
+ # "all_ko",#全部按韩文识别
119
+ # "zh",#按中英混合识别####不变
120
+ # "ja",#按日英混合识别####不变
121
+ # "yue",#按粤英混合识别####不变
122
+ # "ko",#按韩英混合识别####不变
123
+ # "auto",#多语种启动切分识别语种
124
+ # "auto_yue",#多语种启动切分识别语种
125
+
126
+ def __init__(self, configs: Union[dict, str] = None):
127
+
128
+ if configs in ["", None]:
129
+ # 设置默认配置文件路径
130
+ configs_base_path: str = "moyoyo_tts/configs/"
131
+ os.makedirs(configs_base_path, exist_ok=True)
132
+ self.configs_path: str = os.path.join(configs_base_path, "tts_infer.yaml")
133
+ if not os.path.exists(self.configs_path):
134
+ self.save_configs()
135
+ print(f"Create default config file at {self.configs_path}")
136
+ configs: dict = deepcopy(self.default_configs)
137
+
138
+ if isinstance(configs, str):
139
+ self.configs_path = configs
140
+ configs: dict = self._load_configs(self.configs_path)
141
+
142
+ assert isinstance(configs, dict)
143
+ version = configs.get("version", "v2").lower()
144
+ assert version in ["v1", "v2"]
145
+ self.default_configs["default"] = configs.get("default", self.default_configs["default"])
146
+ self.default_configs["default_v2"] = configs.get("default_v2", self.default_configs["default_v2"])
147
+
148
+ default_config_key = "default" if version == "v1" else "default_v2"
149
+ self.configs: dict = configs.get("custom", deepcopy(self.default_configs[default_config_key]))
150
+
151
+ self.device = self.configs.get("device", torch.device("cpu"))
152
+ self.is_half = self.configs.get("is_half", False)
153
+ self.version = version
154
+ self.t2s_weights_path = self.configs.get("t2s_weights_path", None)
155
+ self.vits_weights_path = self.configs.get("vits_weights_path", None)
156
+ self.bert_base_path = self.configs.get("bert_base_path", None)
157
+ os.environ['bert_path'] = f'{self.bert_base_path}'
158
+ self.cnhuhbert_base_path = self.configs.get("cnhuhbert_base_path", None)
159
+ self.languages = self.v2_languages if self.version == "v2" else self.v1_languages
160
+
161
+ if (self.t2s_weights_path in [None, ""]) or (not os.path.exists(self.t2s_weights_path)):
162
+ self.t2s_weights_path = self.default_configs[default_config_key]['t2s_weights_path']
163
+ print(f"fall back to default t2s_weights_path: {self.t2s_weights_path}")
164
+
165
+ if (self.vits_weights_path in [None, ""]) or (not os.path.exists(self.vits_weights_path)):
166
+ self.vits_weights_path = self.default_configs[default_config_key]['vits_weights_path']
167
+ print(f"fall back to default vits_weights_path: {self.vits_weights_path}")
168
+
169
+ if (self.bert_base_path in [None, ""]) or (not os.path.exists(self.bert_base_path)):
170
+ self.bert_base_path = self.default_configs[default_config_key]['bert_base_path']
171
+ print(f"fall back to default bert_base_path: {self.bert_base_path}")
172
+
173
+ if (self.cnhuhbert_base_path in [None, ""]) or (not os.path.exists(self.cnhuhbert_base_path)):
174
+ self.cnhuhbert_base_path = self.default_configs[default_config_key]['cnhuhbert_base_path']
175
+ print(f"fall back to default cnhuhbert_base_path: {self.cnhuhbert_base_path}")
176
+
177
+ self.update_configs()
178
+
179
+ self.max_sec = None
180
+ self.hz: int = 50
181
+ self.semantic_frame_rate: str = "25hz"
182
+ self.segment_size: int = 20480
183
+ self.filter_length: int = 2048
184
+ self.sampling_rate: int = 32000
185
+ self.hop_length: int = 640
186
+ self.win_length: int = 2048
187
+ self.n_speakers: int = 300
188
+
189
+ def _load_configs(self, configs_path: str) -> dict:
190
+ if os.path.exists(configs_path):
191
+ ...
192
+ else:
193
+ print(i18n("路径不存在,使用默认配置"))
194
+ self.save_configs(configs_path)
195
+ with open(configs_path, 'r') as f:
196
+ configs = yaml.load(f, Loader=yaml.FullLoader)
197
+
198
+ return configs
199
+
200
+ def save_configs(self, configs_path: str = None) -> None:
201
+ configs = deepcopy(self.default_configs)
202
+ if self.configs is not None:
203
+ configs["custom"] = self.update_configs()
204
+
205
+ if configs_path is None and hasattr(self, 'configs_path'):
206
+ configs_path = self.configs_path
207
+
208
+ if configs_path is None:
209
+ return
210
+
211
+ with open(configs_path, 'w') as f:
212
+ yaml.dump(configs, f)
213
+
214
+ def update_configs(self):
215
+ self.config = {
216
+ "device": str(self.device),
217
+ "is_half": self.is_half,
218
+ "version": self.version,
219
+ "t2s_weights_path": self.t2s_weights_path,
220
+ "vits_weights_path": self.vits_weights_path,
221
+ "bert_base_path": self.bert_base_path,
222
+ "cnhuhbert_base_path": self.cnhuhbert_base_path,
223
+ }
224
+ return self.config
225
+
226
+ def update_version(self, version: str) -> None:
227
+ self.version = version
228
+ self.languages = self.v2_languages if self.version == "v2" else self.v1_languages
229
+
230
+ def __str__(self):
231
+ self.configs = self.update_configs()
232
+ string = "TTS Config".center(100, '-') + '\n'
233
+ for k, v in self.configs.items():
234
+ string += f"{str(k).ljust(20)}: {str(v)}\n"
235
+ string += "-" * 100 + '\n'
236
+ return string
237
+
238
+ def __repr__(self):
239
+ return self.__str__()
240
+
241
+ def __hash__(self):
242
+ return hash(self.configs_path)
243
+
244
+ def __eq__(self, other):
245
+ return isinstance(other, TTS_Config) and self.configs_path == other.configs_path
246
+
247
+
248
+ class TTS:
249
+ def __init__(self, configs: Union[dict, str, TTS_Config]):
250
+ if isinstance(configs, TTS_Config):
251
+ self.configs = configs
252
+ else:
253
+ self.configs: TTS_Config = TTS_Config(configs)
254
+
255
+ self.t2s_model: Text2SemanticLightningModule = None
256
+ self.vits_model: SynthesizerTrn = None
257
+ self.bert_tokenizer: AutoTokenizer = None
258
+ self.bert_model: AutoModelForMaskedLM = None
259
+ self.cnhuhbert_model: CNHubert = None
260
+
261
+ self._init_models()
262
+
263
+ self.text_preprocessor: TextPreprocessor = \
264
+ TextPreprocessor(self.bert_model,
265
+ self.bert_tokenizer,
266
+ self.configs.device)
267
+
268
+ self.prompt_cache: dict = {
269
+ "ref_audio_path": None,
270
+ "prompt_semantic": None,
271
+ "refer_spec": [],
272
+ "prompt_text": None,
273
+ "prompt_lang": None,
274
+ "phones": None,
275
+ "bert_features": None,
276
+ "norm_text": None,
277
+ "aux_ref_audio_paths": [],
278
+ }
279
+
280
+ self.stop_flag: bool = False
281
+ self.precision: torch.dtype = torch.float16 if self.configs.is_half else torch.float32
282
+
283
+ def _init_models(self, ):
284
+ self.init_t2s_weights(self.configs.t2s_weights_path)
285
+ self.init_vits_weights(self.configs.vits_weights_path)
286
+ self.init_bert_weights(self.configs.bert_base_path)
287
+ self.init_cnhuhbert_weights(self.configs.cnhuhbert_base_path)
288
+ # self.enable_half_precision(self.configs.is_half)
289
+
290
+ def init_cnhuhbert_weights(self, base_path: str):
291
+ print(f"Loading CNHuBERT weights from {base_path}")
292
+ self.cnhuhbert_model = CNHubert(base_path)
293
+ self.cnhuhbert_model = self.cnhuhbert_model.eval()
294
+ self.cnhuhbert_model = self.cnhuhbert_model.to(self.configs.device)
295
+ if self.configs.is_half and str(self.configs.device) != "cpu":
296
+ self.cnhuhbert_model = self.cnhuhbert_model.half()
297
+
298
+ def init_bert_weights(self, base_path: str):
299
+ print(f"Loading BERT weights from {base_path}")
300
+ self.bert_tokenizer = AutoTokenizer.from_pretrained(base_path)
301
+ self.bert_model = AutoModelForMaskedLM.from_pretrained(base_path)
302
+ self.bert_model = self.bert_model.eval()
303
+ self.bert_model = self.bert_model.to(self.configs.device)
304
+ if self.configs.is_half and str(self.configs.device) != "cpu":
305
+ self.bert_model = self.bert_model.half()
306
+
307
+ def init_vits_weights(self, weights_path: str):
308
+ print(f"Loading VITS weights from {weights_path}")
309
+ self.configs.vits_weights_path = weights_path
310
+ dict_s2 = torch.load(weights_path, map_location=self.configs.device)
311
+ hps = dict_s2["config"]
312
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
313
+ self.configs.update_version("v1")
314
+ else:
315
+ self.configs.update_version("v2")
316
+ self.configs.save_configs()
317
+
318
+ hps["model"]["version"] = self.configs.version
319
+ self.configs.filter_length = hps["data"]["filter_length"]
320
+ self.configs.segment_size = hps["train"]["segment_size"]
321
+ self.configs.sampling_rate = hps["data"]["sampling_rate"]
322
+ self.configs.hop_length = hps["data"]["hop_length"]
323
+ self.configs.win_length = hps["data"]["win_length"]
324
+ self.configs.n_speakers = hps["data"]["n_speakers"]
325
+ self.configs.semantic_frame_rate = "25hz"
326
+ kwargs = hps["model"]
327
+ vits_model = SynthesizerTrn(
328
+ self.configs.filter_length // 2 + 1,
329
+ self.configs.segment_size // self.configs.hop_length,
330
+ n_speakers=self.configs.n_speakers,
331
+ **kwargs
332
+ )
333
+
334
+ if hasattr(vits_model, "enc_q"):
335
+ del vits_model.enc_q
336
+
337
+ vits_model = vits_model.to(self.configs.device)
338
+ vits_model = vits_model.eval()
339
+ vits_model.load_state_dict(dict_s2["weight"], strict=False)
340
+ self.vits_model = vits_model
341
+ if self.configs.is_half and str(self.configs.device) != "cpu":
342
+ self.vits_model = self.vits_model.half()
343
+
344
+ def init_t2s_weights(self, weights_path: str):
345
+ print(f"Loading Text2Semantic weights from {weights_path}")
346
+ self.configs.t2s_weights_path = weights_path
347
+ self.configs.save_configs()
348
+ self.configs.hz = 50
349
+ dict_s1 = torch.load(weights_path, map_location=self.configs.device)
350
+ config = dict_s1["config"]
351
+ self.configs.max_sec = config["data"]["max_sec"]
352
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
353
+ t2s_model.load_state_dict(dict_s1["weight"])
354
+ t2s_model = t2s_model.to(self.configs.device)
355
+ t2s_model = t2s_model.eval()
356
+ self.t2s_model = t2s_model
357
+ if self.configs.is_half and str(self.configs.device) != "cpu":
358
+ self.t2s_model = self.t2s_model.half()
359
+
360
+ def enable_half_precision(self, enable: bool = True, save: bool = True):
361
+ '''
362
+ To enable half precision for the TTS model.
363
+ Args:
364
+ enable: bool, whether to enable half precision.
365
+
366
+ '''
367
+ if str(self.configs.device) == "cpu" and enable:
368
+ print("Half precision is not supported on CPU.")
369
+ return
370
+
371
+ self.configs.is_half = enable
372
+ self.precision = torch.float16 if enable else torch.float32
373
+ if save:
374
+ self.configs.save_configs()
375
+ if enable:
376
+ if self.t2s_model is not None:
377
+ self.t2s_model = self.t2s_model.half()
378
+ if self.vits_model is not None:
379
+ self.vits_model = self.vits_model.half()
380
+ if self.bert_model is not None:
381
+ self.bert_model = self.bert_model.half()
382
+ if self.cnhuhbert_model is not None:
383
+ self.cnhuhbert_model = self.cnhuhbert_model.half()
384
+ else:
385
+ if self.t2s_model is not None:
386
+ self.t2s_model = self.t2s_model.float()
387
+ if self.vits_model is not None:
388
+ self.vits_model = self.vits_model.float()
389
+ if self.bert_model is not None:
390
+ self.bert_model = self.bert_model.float()
391
+ if self.cnhuhbert_model is not None:
392
+ self.cnhuhbert_model = self.cnhuhbert_model.float()
393
+
394
+ def set_device(self, device: torch.device, save: bool = True):
395
+ '''
396
+ To set the device for all models.
397
+ Args:
398
+ device: torch.device, the device to use for all models.
399
+ '''
400
+ self.configs.device = device
401
+ if save:
402
+ self.configs.save_configs()
403
+ if self.t2s_model is not None:
404
+ self.t2s_model = self.t2s_model.to(device)
405
+ if self.vits_model is not None:
406
+ self.vits_model = self.vits_model.to(device)
407
+ if self.bert_model is not None:
408
+ self.bert_model = self.bert_model.to(device)
409
+ if self.cnhuhbert_model is not None:
410
+ self.cnhuhbert_model = self.cnhuhbert_model.to(device)
411
+
412
+ def set_ref_audio(self, ref_audio_path: str):
413
+ '''
414
+ To set the reference audio for the TTS model,
415
+ including the prompt_semantic and refer_spepc.
416
+ Args:
417
+ ref_audio_path: str, the path of the reference audio.
418
+ '''
419
+ self._set_prompt_semantic(ref_audio_path)
420
+ self._set_ref_spec(ref_audio_path)
421
+ self._set_ref_audio_path(ref_audio_path)
422
+
423
+ def _set_ref_audio_path(self, ref_audio_path):
424
+ self.prompt_cache["ref_audio_path"] = ref_audio_path
425
+
426
+ def _set_ref_spec(self, ref_audio_path):
427
+ spec = self._get_ref_spec(ref_audio_path)
428
+ if self.prompt_cache["refer_spec"] in [[], None]:
429
+ self.prompt_cache["refer_spec"] = [spec]
430
+ else:
431
+ self.prompt_cache["refer_spec"][0] = spec
432
+
433
+ def _get_ref_spec(self, ref_audio_path):
434
+ audio = load_audio(ref_audio_path, int(self.configs.sampling_rate))
435
+ audio = torch.FloatTensor(audio)
436
+ maxx = audio.abs().max()
437
+ if (maxx > 1): audio /= min(2, maxx)
438
+ audio_norm = audio
439
+ audio_norm = audio_norm.unsqueeze(0)
440
+ spec = spectrogram_torch(
441
+ audio_norm,
442
+ self.configs.filter_length,
443
+ self.configs.sampling_rate,
444
+ self.configs.hop_length,
445
+ self.configs.win_length,
446
+ center=False,
447
+ )
448
+ spec = spec.to(self.configs.device)
449
+ if self.configs.is_half:
450
+ spec = spec.half()
451
+ ref_audio = pathlib.Path(ref_audio_path)
452
+ export_dir = ref_audio.parent
453
+ export_path = f"{export_dir / f'refer_spec.pt'}"
454
+ print(f'ref spec file path: {export_path}')
455
+ torch.save(spec, export_path)
456
+ return spec
457
+
458
+ def _set_prompt_semantic(self, ref_wav_path: str):
459
+ zero_wav = np.zeros(
460
+ int(self.configs.sampling_rate * 0.3),
461
+ dtype=np.float16 if self.configs.is_half else np.float32,
462
+ )
463
+ with torch.no_grad():
464
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
465
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
466
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
467
+ wav16k = torch.from_numpy(wav16k)
468
+ zero_wav_torch = torch.from_numpy(zero_wav)
469
+ wav16k = wav16k.to(self.configs.device)
470
+ zero_wav_torch = zero_wav_torch.to(self.configs.device)
471
+ if self.configs.is_half:
472
+ wav16k = wav16k.half()
473
+ zero_wav_torch = zero_wav_torch.half()
474
+
475
+ wav16k = torch.cat([wav16k, zero_wav_torch])
476
+ hubert_feature = self.cnhuhbert_model.model(wav16k.unsqueeze(0))[
477
+ "last_hidden_state"
478
+ ].transpose(
479
+ 1, 2
480
+ ) # .float()
481
+ codes = self.vits_model.extract_latent(hubert_feature)
482
+
483
+ prompt_semantic = codes[0, 0].to(self.configs.device)
484
+ self.prompt_cache["prompt_semantic"] = prompt_semantic
485
+
486
+ ref_audio = pathlib.Path(ref_wav_path)
487
+ export_dir = ref_audio.parent
488
+ export_path = f"{export_dir / f'prompt_semantic.pt'}"
489
+ print(f'prompt semantic file path: {export_path}')
490
+ torch.save(prompt_semantic, export_path)
491
+
492
+ def batch_sequences(self, sequences: List[torch.Tensor], axis: int = 0, pad_value: int = 0, max_length: int = None):
493
+ seq = sequences[0]
494
+ ndim = seq.dim()
495
+ if axis < 0:
496
+ axis += ndim
497
+ dtype: torch.dtype = seq.dtype
498
+ pad_value = torch.tensor(pad_value, dtype=dtype)
499
+ seq_lengths = [seq.shape[axis] for seq in sequences]
500
+ if max_length is None:
501
+ max_length = max(seq_lengths)
502
+ else:
503
+ max_length = max(seq_lengths) if max_length < max(seq_lengths) else max_length
504
+
505
+ padded_sequences = []
506
+ for seq, length in zip(sequences, seq_lengths):
507
+ padding = [0] * axis + [0, max_length - length] + [0] * (ndim - axis - 1)
508
+ padded_seq = torch.nn.functional.pad(seq, padding, value=pad_value)
509
+ padded_sequences.append(padded_seq)
510
+ batch = torch.stack(padded_sequences)
511
+ return batch
512
+
513
+ def to_batch(self, data: list,
514
+ prompt_data: dict = None,
515
+ batch_size: int = 5,
516
+ threshold: float = 0.75,
517
+ split_bucket: bool = True,
518
+ device: torch.device = torch.device("cpu"),
519
+ precision: torch.dtype = torch.float32,
520
+ ):
521
+ _data: list = []
522
+ index_and_len_list = []
523
+ for idx, item in enumerate(data):
524
+ norm_text_len = len(item["norm_text"])
525
+ index_and_len_list.append([idx, norm_text_len])
526
+
527
+ batch_index_list = []
528
+ if split_bucket:
529
+ index_and_len_list.sort(key=lambda x: x[1])
530
+ index_and_len_list = np.array(index_and_len_list, dtype=np.int64)
531
+
532
+ batch_index_list_len = 0
533
+ pos = 0
534
+ while pos < index_and_len_list.shape[0]:
535
+ # batch_index_list.append(index_and_len_list[pos:min(pos+batch_size,len(index_and_len_list))])
536
+ pos_end = min(pos + batch_size, index_and_len_list.shape[0])
537
+ while pos < pos_end:
538
+ batch = index_and_len_list[pos:pos_end, 1].astype(np.float32)
539
+ score = batch[(pos_end - pos) // 2] / (batch.mean() + 1e-8)
540
+ if (score >= threshold) or (pos_end - pos == 1):
541
+ batch_index = index_and_len_list[pos:pos_end, 0].tolist()
542
+ batch_index_list_len += len(batch_index)
543
+ batch_index_list.append(batch_index)
544
+ pos = pos_end
545
+ break
546
+ pos_end = pos_end - 1
547
+
548
+ assert batch_index_list_len == len(data)
549
+
550
+ else:
551
+ for i in range(len(data)):
552
+ if i % batch_size == 0:
553
+ batch_index_list.append([])
554
+ batch_index_list[-1].append(i)
555
+
556
+ for batch_idx, index_list in enumerate(batch_index_list):
557
+ item_list = [data[idx] for idx in index_list]
558
+ phones_list = []
559
+ phones_len_list = []
560
+ # bert_features_list = []
561
+ all_phones_list = []
562
+ all_phones_len_list = []
563
+ all_bert_features_list = []
564
+ norm_text_batch = []
565
+ all_bert_max_len = 0
566
+ all_phones_max_len = 0
567
+ for item in item_list:
568
+ if prompt_data is not None:
569
+ all_bert_features = torch.cat([prompt_data["bert_features"], item["bert_features"]], 1) \
570
+ .to(dtype=precision, device=device)
571
+ all_phones = torch.LongTensor(prompt_data["phones"] + item["phones"]).to(device)
572
+ phones = torch.LongTensor(item["phones"]).to(device)
573
+ # norm_text = prompt_data["norm_text"]+item["norm_text"]
574
+ else:
575
+ all_bert_features = item["bert_features"] \
576
+ .to(dtype=precision, device=device)
577
+ phones = torch.LongTensor(item["phones"]).to(device)
578
+ all_phones = phones
579
+ # norm_text = item["norm_text"]
580
+
581
+ all_bert_max_len = max(all_bert_max_len, all_bert_features.shape[-1])
582
+ all_phones_max_len = max(all_phones_max_len, all_phones.shape[-1])
583
+
584
+ phones_list.append(phones)
585
+ phones_len_list.append(phones.shape[-1])
586
+ all_phones_list.append(all_phones)
587
+ all_phones_len_list.append(all_phones.shape[-1])
588
+ all_bert_features_list.append(all_bert_features)
589
+ norm_text_batch.append(item["norm_text"])
590
+
591
+ phones_batch = phones_list
592
+ all_phones_batch = all_phones_list
593
+ all_bert_features_batch = all_bert_features_list
594
+
595
+ max_len = max(all_bert_max_len, all_phones_max_len)
596
+ # phones_batch = self.batch_sequences(phones_list, axis=0, pad_value=0, max_length=max_len)
597
+ #### 直接对phones和bert_features进行pad。(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
598
+ # all_phones_batch = self.batch_sequences(all_phones_list, axis=0, pad_value=0, max_length=max_len)
599
+ # all_bert_features_batch = all_bert_features_list
600
+ # all_bert_features_batch = torch.zeros((len(all_bert_features_list), 1024, max_len), dtype=precision, device=device)
601
+ # for idx, item in enumerate(all_bert_features_list):
602
+ # all_bert_features_batch[idx, :, : item.shape[-1]] = item
603
+
604
+ # #### 先对phones进行embedding、对bert_features进行project,再pad到相同长度,(padding策略会影响T2S模型生成的结果,但不直接影响复读概率。影响复读概率的主要因素是mask的策略)
605
+ # all_phones_list = [self.t2s_model.model.ar_text_embedding(item.to(self.t2s_model.device)) for item in all_phones_list]
606
+ # all_phones_list = [F.pad(item,(0,0,0,max_len-item.shape[0]),value=0) for item in all_phones_list]
607
+ # all_phones_batch = torch.stack(all_phones_list, dim=0)
608
+
609
+ # all_bert_features_list = [self.t2s_model.model.bert_proj(item.to(self.t2s_model.device).transpose(0, 1)) for item in all_bert_features_list]
610
+ # all_bert_features_list = [F.pad(item,(0,0,0,max_len-item.shape[0]), value=0) for item in all_bert_features_list]
611
+ # all_bert_features_batch = torch.stack(all_bert_features_list, dim=0)
612
+
613
+ batch = {
614
+ "phones": phones_batch,
615
+ "phones_len": torch.LongTensor(phones_len_list).to(device),
616
+ "all_phones": all_phones_batch,
617
+ "all_phones_len": torch.LongTensor(all_phones_len_list).to(device),
618
+ "all_bert_features": all_bert_features_batch,
619
+ "norm_text": norm_text_batch,
620
+ "max_len": max_len,
621
+ }
622
+ _data.append(batch)
623
+
624
+ return _data, batch_index_list
625
+
626
+ def recovery_order(self, data: list, batch_index_list: list) -> list:
627
+ '''
628
+ Recovery the order of the audio according to the batch_index_list.
629
+
630
+ Args:
631
+ data (List[list(np.ndarray)]): the out of order audio .
632
+ batch_index_list (List[list[int]]): the batch index list.
633
+
634
+ Returns:
635
+ list (List[np.ndarray]): the data in the original order.
636
+ '''
637
+ length = len(sum(batch_index_list, []))
638
+ _data = [None] * length
639
+ for i, index_list in enumerate(batch_index_list):
640
+ for j, index in enumerate(index_list):
641
+ _data[index] = data[i][j]
642
+ return _data
643
+
644
+ def stop(self, ):
645
+ '''
646
+ Stop the inference process.
647
+ '''
648
+ self.stop_flag = True
649
+
650
+ @torch.no_grad()
651
+ def run(self, inputs: dict):
652
+ """
653
+ Text to speech inference.
654
+
655
+ Args:
656
+ inputs (dict):
657
+ {
658
+ "text": "", # str.(required) text to be synthesized
659
+ "text_lang: "", # str.(required) language of the text to be synthesized
660
+ "ref_audio_path": "", # str.(required) reference audio path
661
+ "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
662
+ "prompt_text": "", # str.(optional) prompt text for the reference audio
663
+ "prompt_lang": "", # str.(required) language of the prompt text for the reference audio
664
+ "top_k": 5, # int. top k sampling
665
+ "top_p": 1, # float. top p sampling
666
+ "temperature": 1, # float. temperature for sampling
667
+ "text_split_method": "cut0", # str. text split method, see text_segmentation_method.py for details.
668
+ "batch_size": 1, # int. batch size for inference
669
+ "batch_threshold": 0.75, # float. threshold for batch splitting.
670
+ "split_bucket: True, # bool. whether to split the batch into multiple buckets.
671
+ "return_fragment": False, # bool. step by step return the audio fragment.
672
+ "speed_factor":1.0, # float. control the speed of the synthesized audio.
673
+ "fragment_interval":0.3, # float. to control the interval of the audio fragment.
674
+ "seed": -1, # int. random seed for reproducibility.
675
+ "parallel_infer": True, # bool. whether to use parallel inference.
676
+ "repetition_penalty": 1.35 # float. repetition penalty for T2S model.
677
+ }
678
+ returns:
679
+ Tuple[int, np.ndarray]: sampling rate and audio data.
680
+ """
681
+ ########## variables initialization ###########
682
+ self.stop_flag: bool = False
683
+ text: str = inputs.get("text", "")
684
+ text_lang: str = inputs.get("text_lang", "")
685
+ ref_audio_path: str = inputs.get("ref_audio_path", "")
686
+ aux_ref_audio_paths: list = inputs.get("aux_ref_audio_paths", [])
687
+ prompt_text: str = inputs.get("prompt_text", "")
688
+ prompt_lang: str = inputs.get("prompt_lang", "")
689
+ top_k: int = inputs.get("top_k", 5)
690
+ top_p: float = inputs.get("top_p", 1)
691
+ temperature: float = inputs.get("temperature", 1)
692
+ text_split_method: str = inputs.get("text_split_method", "cut0")
693
+ batch_size = inputs.get("batch_size", 1)
694
+ batch_threshold = inputs.get("batch_threshold", 0.75)
695
+ speed_factor = inputs.get("speed_factor", 1.0)
696
+ split_bucket = inputs.get("split_bucket", True)
697
+ return_fragment = inputs.get("return_fragment", False)
698
+ fragment_interval = inputs.get("fragment_interval", 0.3)
699
+ seed = inputs.get("seed", -1)
700
+ seed = -1 if seed in ["", None] else seed
701
+ actual_seed = set_seed(seed)
702
+ parallel_infer = inputs.get("parallel_infer", True)
703
+ repetition_penalty = inputs.get("repetition_penalty", 1.35)
704
+
705
+ if parallel_infer:
706
+ #print(i18n("并行推理模式已开启"))
707
+ self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_batch_infer
708
+ else:
709
+ #print(i18n("并行推理模式已关闭"))
710
+ self.t2s_model.model.infer_panel = self.t2s_model.model.infer_panel_naive_batched
711
+
712
+ if return_fragment:
713
+ #print(i18n("分段返回模式已开启"))
714
+ if split_bucket:
715
+ split_bucket = False
716
+ #print(i18n("分段返回模式不支持分桶处理,已自动关闭分桶处理"))
717
+
718
+ if split_bucket and speed_factor == 1.0:
719
+ print(i18n("分桶处理模式已开启"))
720
+ elif speed_factor != 1.0:
721
+ #print(i18n("语速调节不支持分桶处理,已自动关闭分桶处理"))
722
+ split_bucket = False
723
+ else:
724
+ print(i18n("分桶处理模式已关闭"))
725
+
726
+ if fragment_interval < 0.01:
727
+ fragment_interval = 0.01
728
+ #print(i18n("分段间隔过小,已自动设置为0.01"))
729
+
730
+ no_prompt_text = False
731
+ if prompt_text in [None, ""]:
732
+ no_prompt_text = True
733
+
734
+ assert text_lang in self.configs.languages
735
+ if not no_prompt_text:
736
+ assert prompt_lang in self.configs.languages
737
+
738
+ if ref_audio_path in [None, ""] and \
739
+ ((self.prompt_cache["prompt_semantic"] is None) or (self.prompt_cache["refer_spec"] in [None, []])):
740
+ raise ValueError(
741
+ "ref_audio_path cannot be empty, when the reference audio is not set using set_ref_audio()")
742
+
743
+ ###### setting reference audio and prompt text preprocessing ########
744
+ t0 = ttime()
745
+ if (ref_audio_path is not None) and (ref_audio_path != self.prompt_cache["ref_audio_path"]):
746
+ if not os.path.exists(ref_audio_path):
747
+ raise ValueError(f"{ref_audio_path} not exists")
748
+ self.set_ref_audio(ref_audio_path)
749
+
750
+ aux_ref_audio_paths = aux_ref_audio_paths if aux_ref_audio_paths is not None else []
751
+ paths = set(aux_ref_audio_paths) & set(self.prompt_cache["aux_ref_audio_paths"])
752
+ if not (len(list(paths)) == len(aux_ref_audio_paths) == len(self.prompt_cache["aux_ref_audio_paths"])):
753
+ self.prompt_cache["aux_ref_audio_paths"] = aux_ref_audio_paths
754
+ self.prompt_cache["refer_spec"] = [self.prompt_cache["refer_spec"][0]]
755
+ for path in aux_ref_audio_paths:
756
+ if path in [None, ""]:
757
+ continue
758
+ if not os.path.exists(path):
759
+ print(i18n("音频文件不存在,跳过:{}").format(path))
760
+ continue
761
+ self.prompt_cache["refer_spec"].append(self._get_ref_spec(path))
762
+
763
+ if not no_prompt_text:
764
+ prompt_text = prompt_text.strip("\n")
765
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_lang != "en" else "."
766
+ #print(i18n("实际输入的参考文本:"), prompt_text)
767
+ if self.prompt_cache["prompt_text"] != prompt_text:
768
+ self.prompt_cache["prompt_text"] = prompt_text
769
+ self.prompt_cache["prompt_lang"] = prompt_lang
770
+ phones, bert_features, norm_text = \
771
+ self.text_preprocessor.segment_and_extract_feature_for_text(
772
+ prompt_text,
773
+ prompt_lang,
774
+ self.configs.version)
775
+ self.prompt_cache["phones"] = phones
776
+ self.prompt_cache["bert_features"] = bert_features
777
+ self.prompt_cache["norm_text"] = norm_text
778
+
779
+ ###### text preprocessing ########
780
+ t1 = ttime()
781
+ data: list = None
782
+ if not return_fragment:
783
+ data = self.text_preprocessor.preprocess(text, text_lang, text_split_method, self.configs.version)
784
+ if len(data) == 0:
785
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
786
+ dtype=np.int16)
787
+ return
788
+
789
+ batch_index_list: list = None
790
+ data, batch_index_list = self.to_batch(data,
791
+ prompt_data=self.prompt_cache if not no_prompt_text else None,
792
+ batch_size=batch_size,
793
+ threshold=batch_threshold,
794
+ split_bucket=split_bucket,
795
+ device=self.configs.device,
796
+ precision=self.precision
797
+ )
798
+ else:
799
+ #print(i18n("############ 切分文本 ############"))
800
+ texts = self.text_preprocessor.pre_seg_text(text, text_lang, text_split_method)
801
+ data = []
802
+ for i in range(len(texts)):
803
+ if i % batch_size == 0:
804
+ data.append([])
805
+ data[-1].append(texts[i])
806
+
807
+ def make_batch(batch_texts):
808
+ batch_data = []
809
+ #print(i18n("############ 提取文本Bert特征 ############"))
810
+ for text in tqdm(batch_texts):
811
+ phones, bert_features, norm_text = self.text_preprocessor.segment_and_extract_feature_for_text(text,
812
+ text_lang,
813
+ self.configs.version)
814
+ if phones is None:
815
+ continue
816
+ res = {
817
+ "phones": phones,
818
+ "bert_features": bert_features,
819
+ "norm_text": norm_text,
820
+ }
821
+ batch_data.append(res)
822
+ if len(batch_data) == 0:
823
+ return None
824
+ batch, _ = self.to_batch(batch_data,
825
+ prompt_data=self.prompt_cache if not no_prompt_text else None,
826
+ batch_size=batch_size,
827
+ threshold=batch_threshold,
828
+ split_bucket=False,
829
+ device=self.configs.device,
830
+ precision=self.precision
831
+ )
832
+ return batch[0]
833
+
834
+ t2 = ttime()
835
+ try:
836
+ #print("############ 推理 ############")
837
+ ###### inference ######
838
+ t_34 = 0.0
839
+ t_45 = 0.0
840
+ audio = []
841
+ for item in data:
842
+ t3 = ttime()
843
+ if return_fragment:
844
+ item = make_batch(item)
845
+ if item is None:
846
+ continue
847
+
848
+ batch_phones: List[torch.LongTensor] = item["phones"]
849
+ # batch_phones:torch.LongTensor = item["phones"]
850
+ batch_phones_len: torch.LongTensor = item["phones_len"]
851
+ all_phoneme_ids: torch.LongTensor = item["all_phones"]
852
+ all_phoneme_lens: torch.LongTensor = item["all_phones_len"]
853
+ all_bert_features: torch.LongTensor = item["all_bert_features"]
854
+ norm_text: str = item["norm_text"]
855
+ max_len = item["max_len"]
856
+
857
+ #print(i18n("前端处理后的文本(每句):"), norm_text)
858
+ if no_prompt_text:
859
+ prompt = None
860
+ else:
861
+ prompt = self.prompt_cache["prompt_semantic"].expand(len(all_phoneme_ids), -1).to(
862
+ self.configs.device)
863
+
864
+ pred_semantic_list, idx_list = self.t2s_model.model.infer_panel(
865
+ all_phoneme_ids,
866
+ all_phoneme_lens,
867
+ prompt,
868
+ all_bert_features,
869
+ # prompt_phone_len=ph_offset,
870
+ top_k=top_k,
871
+ top_p=top_p,
872
+ temperature=temperature,
873
+ early_stop_num=self.configs.hz * self.configs.max_sec,
874
+ max_len=max_len,
875
+ repetition_penalty=repetition_penalty,
876
+ )
877
+ t4 = ttime()
878
+ t_34 += t4 - t3
879
+
880
+ refer_audio_spec: torch.Tensor = [item.to(dtype=self.precision, device=self.configs.device) for item in
881
+ self.prompt_cache["refer_spec"]]
882
+
883
+ batch_audio_fragment = []
884
+
885
+ # ## vits并行推理 method 1
886
+ # pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
887
+ # pred_semantic_len = torch.LongTensor([item.shape[0] for item in pred_semantic_list]).to(self.configs.device)
888
+ # pred_semantic = self.batch_sequences(pred_semantic_list, axis=0, pad_value=0).unsqueeze(0)
889
+ # max_len = 0
890
+ # for i in range(0, len(batch_phones)):
891
+ # max_len = max(max_len, batch_phones[i].shape[-1])
892
+ # batch_phones = self.batch_sequences(batch_phones, axis=0, pad_value=0, max_length=max_len)
893
+ # batch_phones = batch_phones.to(self.configs.device)
894
+ # batch_audio_fragment = (self.vits_model.batched_decode(
895
+ # pred_semantic, pred_semantic_len, batch_phones, batch_phones_len,refer_audio_spec
896
+ # ))
897
+
898
+ if speed_factor == 1.0:
899
+ # ## vits并行推理 method 2
900
+ pred_semantic_list = [item[-idx:] for item, idx in zip(pred_semantic_list, idx_list)]
901
+ upsample_rate = math.prod(self.vits_model.upsample_rates)
902
+ audio_frag_idx = [pred_semantic_list[i].shape[0] * 2 * upsample_rate for i in
903
+ range(0, len(pred_semantic_list))]
904
+ audio_frag_end_idx = [sum(audio_frag_idx[:i + 1]) for i in range(0, len(audio_frag_idx))]
905
+ all_pred_semantic = torch.cat(pred_semantic_list).unsqueeze(0).unsqueeze(0).to(self.configs.device)
906
+ _batch_phones = torch.cat(batch_phones).unsqueeze(0).to(self.configs.device)
907
+ _batch_audio_fragment = (self.vits_model.decode(
908
+ all_pred_semantic, _batch_phones, refer_audio_spec, speed=speed_factor
909
+ ).detach()[0, 0, :])
910
+ audio_frag_end_idx.insert(0, 0)
911
+ batch_audio_fragment = [_batch_audio_fragment[audio_frag_end_idx[i - 1]:audio_frag_end_idx[i]] for i
912
+ in range(1, len(audio_frag_end_idx))]
913
+ else:
914
+ # ## vits串行推理
915
+ for i, idx in enumerate(idx_list):
916
+ phones = batch_phones[i].unsqueeze(0).to(self.configs.device)
917
+ _pred_semantic = (
918
+ pred_semantic_list[i][-idx:].unsqueeze(0).unsqueeze(0)) # .unsqueeze(0)#mq要多unsqueeze一次
919
+ audio_fragment = (self.vits_model.decode(
920
+ _pred_semantic, phones, refer_audio_spec, speed=speed_factor
921
+ ).detach()[0, 0, :])
922
+ batch_audio_fragment.append(
923
+ audio_fragment
924
+ ) ###试试重建不带上prompt部分
925
+
926
+ t5 = ttime()
927
+ t_45 += t5 - t4
928
+ if return_fragment:
929
+ print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t4 - t3, t5 - t4))
930
+ yield self.audio_postprocess([batch_audio_fragment],
931
+ self.configs.sampling_rate,
932
+ None,
933
+ speed_factor,
934
+ False,
935
+ fragment_interval
936
+ )
937
+ else:
938
+ audio.append(batch_audio_fragment)
939
+
940
+ if self.stop_flag:
941
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
942
+ dtype=np.int16)
943
+ return
944
+
945
+ if not return_fragment:
946
+ # print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t_34, t_45))
947
+ if len(audio) == 0:
948
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
949
+ dtype=np.int16)
950
+ return
951
+ yield self.audio_postprocess(audio,
952
+ self.configs.sampling_rate,
953
+ batch_index_list,
954
+ speed_factor,
955
+ split_bucket,
956
+ fragment_interval
957
+ )
958
+
959
+ except Exception as e:
960
+ traceback.print_exc()
961
+ # 必须返回一个空音频, 否则会导致显存不释放。
962
+ yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
963
+ dtype=np.int16)
964
+ # 重置模型, 否则会导致显存释放不完全。
965
+ del self.t2s_model
966
+ del self.vits_model
967
+ self.t2s_model = None
968
+ self.vits_model = None
969
+ self.init_t2s_weights(self.configs.t2s_weights_path)
970
+ self.init_vits_weights(self.configs.vits_weights_path)
971
+ raise e
972
+ finally:
973
+ self.empty_cache()
974
+
975
+ def empty_cache(self):
976
+ try:
977
+ gc.collect() # 触发gc的垃圾回收。避免内存一直增长。
978
+ if "cuda" in str(self.configs.device):
979
+ torch.cuda.empty_cache()
980
+ elif str(self.configs.device) == "mps":
981
+ torch.mps.empty_cache()
982
+ except:
983
+ pass
984
+
985
+ def audio_postprocess(self,
986
+ audio: List[torch.Tensor],
987
+ sr: int,
988
+ batch_index_list: list = None,
989
+ speed_factor: float = 1.0,
990
+ split_bucket: bool = True,
991
+ fragment_interval: float = 0.3
992
+ ) -> Tuple[int, np.ndarray]:
993
+ zero_wav = torch.zeros(
994
+ int(self.configs.sampling_rate * fragment_interval),
995
+ dtype=self.precision,
996
+ device=self.configs.device
997
+ )
998
+
999
+ for i, batch in enumerate(audio):
1000
+ for j, audio_fragment in enumerate(batch):
1001
+ max_audio = torch.abs(audio_fragment).max() # 简单防止16bit爆音
1002
+ if max_audio > 1: audio_fragment /= max_audio
1003
+ audio_fragment: torch.Tensor = torch.cat([audio_fragment, zero_wav], dim=0)
1004
+ audio[i][j] = audio_fragment.cpu().numpy()
1005
+
1006
+ if split_bucket:
1007
+ audio = self.recovery_order(audio, batch_index_list)
1008
+ else:
1009
+ # audio = [item for batch in audio for item in batch]
1010
+ audio = sum(audio, [])
1011
+
1012
+ audio = np.concatenate(audio, 0)
1013
+ audio = (audio * 32768).astype(np.int16)
1014
+
1015
+ # try:
1016
+ # if speed_factor != 1.0:
1017
+ # audio = speed_change(audio, speed=speed_factor, sr=int(sr))
1018
+ # except Exception as e:
1019
+ # print(f"Failed to change speed of audio: \n{e}")
1020
+
1021
+ return sr, audio
1022
+
1023
+
1024
+ def speed_change(input_audio: np.ndarray, speed: float, sr: int):
1025
+ # 将 NumPy 数组转换为原始 PCM 流
1026
+ raw_audio = input_audio.astype(np.int16).tobytes()
1027
+
1028
+ # 设置 ffmpeg 输入流
1029
+ input_stream = ffmpeg.input('pipe:', format='s16le', acodec='pcm_s16le', ar=str(sr), ac=1)
1030
+
1031
+ # 变速处理
1032
+ output_stream = input_stream.filter('atempo', speed)
1033
+
1034
+ # 输出流到管道
1035
+ out, _ = (
1036
+ output_stream.output('pipe:', format='s16le', acodec='pcm_s16le')
1037
+ .run(input=raw_audio, capture_stdout=True, capture_stderr=True)
1038
+ )
1039
+
1040
+ # 将管道输出解码为 NumPy 数组
1041
+ processed_audio = np.frombuffer(out, np.int16)
1042
+
1043
+ return processed_audio
TTS_infer_pack/TextPreprocessor.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import re
4
+ import sys
5
+ from typing import Dict, List, Tuple
6
+
7
+ import LangSegment.LangSegment as LangSegment
8
+ import torch
9
+ from tqdm import tqdm
10
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
11
+
12
+ now_dir = os.getcwd()
13
+ sys.path.append(now_dir)
14
+
15
+ from moyoyo_tts.TTS_infer_pack.text_segmentation_method import split_big_text, splits, get_method as get_seg_method
16
+ from moyoyo_tts.text import chinese
17
+ from moyoyo_tts.text import cleaned_text_to_sequence
18
+ from moyoyo_tts.text.cleaner import clean_text
19
+ from moyoyo_tts.tools.i18n.i18n import I18nAuto, scan_language_list
20
+
21
+
22
+ language=os.environ.get("language","Auto")
23
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
24
+ i18n = I18nAuto(language=language)
25
+ punctuation = set(['!', '?', '…', ',', '.', '-'," "])
26
+
27
+ def get_first(text:str) -> str:
28
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
29
+ text = re.split(pattern, text)[0].strip()
30
+ return text
31
+
32
+ def merge_short_text_in_array(texts:str, threshold:int) -> list:
33
+ if (len(texts)) < 2:
34
+ return texts
35
+ result = []
36
+ text = ""
37
+ for ele in texts:
38
+ text += ele
39
+ if len(text) >= threshold:
40
+ result.append(text)
41
+ text = ""
42
+ if (len(text) > 0):
43
+ if len(result) == 0:
44
+ result.append(text)
45
+ else:
46
+ result[len(result) - 1] += text
47
+ return result
48
+
49
+
50
+
51
+
52
+
53
+ class TextPreprocessor:
54
+ def __init__(self, bert_model:AutoModelForMaskedLM,
55
+ tokenizer:AutoTokenizer, device:torch.device):
56
+ self.bert_model = bert_model
57
+ self.tokenizer = tokenizer
58
+ self.device = device
59
+
60
+ def preprocess(self, text:str, lang:str, text_split_method:str, version:str="v2")->List[Dict]:
61
+ #print(i18n("############ 切分文本 ############"))
62
+ text = self.replace_consecutive_punctuation(text)
63
+ texts = self.pre_seg_text(text, lang, text_split_method)
64
+ result = []
65
+ #print(i18n("############ 提取文本Bert特征 ############"))
66
+ # for text in tqdm(texts):
67
+ for text in texts:
68
+ phones, bert_features, norm_text = self.segment_and_extract_feature_for_text(text, lang, version)
69
+ if phones is None or norm_text=="":
70
+ continue
71
+ res={
72
+ "phones": phones,
73
+ "bert_features": bert_features,
74
+ "norm_text": norm_text,
75
+ }
76
+ result.append(res)
77
+ return result
78
+
79
+ def pre_seg_text(self, text:str, lang:str, text_split_method:str):
80
+ text = text.strip("\n")
81
+ if len(text) == 0:
82
+ return []
83
+ if (text[0] not in splits and len(get_first(text)) < 4):
84
+ text = "。" + text if lang != "en" else "." + text
85
+ #print(i18n("实际输入的目标文本:"))
86
+ #print(text)
87
+
88
+ seg_method = get_seg_method(text_split_method)
89
+ text = seg_method(text)
90
+
91
+ while "\n\n" in text:
92
+ text = text.replace("\n\n", "\n")
93
+
94
+ _texts = text.split("\n")
95
+ _texts = self.filter_text(_texts)
96
+ _texts = merge_short_text_in_array(_texts, 5)
97
+ texts = []
98
+
99
+
100
+ for text in _texts:
101
+ # 解决输入目标文本的空行导致报错的问题
102
+ if (len(text.strip()) == 0):
103
+ continue
104
+ if not re.sub("\W+", "", text):
105
+ # 检测一下,如果是纯符号,就跳过。
106
+ continue
107
+ if (text[-1] not in splits): text += "。" if lang != "en" else "."
108
+
109
+ # 解决句子过长导致Bert报错的问题
110
+ if (len(text) > 510):
111
+ texts.extend(split_big_text(text))
112
+ else:
113
+ texts.append(text)
114
+
115
+ #print(i18n("实际输入的目标文本(切句后):"))
116
+ #print(texts)
117
+ return texts
118
+
119
+ def segment_and_extract_feature_for_text(self, text:str, language:str, version:str="v1")->Tuple[list, torch.Tensor, str]:
120
+ return self.get_phones_and_bert(text, language, version)
121
+
122
+ def get_phones_and_bert(self, text:str, language:str, version:str, final:bool=False):
123
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
124
+ language = language.replace("all_","")
125
+ if language == "en":
126
+ LangSegment.setfilters(["en"])
127
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
128
+ else:
129
+ # 因无法区别中日韩文汉字,以用户输入为准
130
+ formattext = text
131
+ while " " in formattext:
132
+ formattext = formattext.replace(" ", " ")
133
+ if language == "zh":
134
+ if re.search(r'[A-Za-z]', formattext):
135
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
136
+ formattext = chinese.mix_text_normalize(formattext)
137
+ return self.get_phones_and_bert(formattext,"zh",version)
138
+ else:
139
+ phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version)
140
+ bert = self.get_bert_feature(norm_text, word2ph).to(self.device)
141
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
142
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
143
+ formattext = chinese.mix_text_normalize(formattext)
144
+ return self.get_phones_and_bert(formattext,"yue",version)
145
+ else:
146
+ phones, word2ph, norm_text = self.clean_text_inf(formattext, language, version)
147
+ bert = torch.zeros(
148
+ (1024, len(phones)),
149
+ dtype=torch.float32,
150
+ ).to(self.device)
151
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
152
+ textlist=[]
153
+ langlist=[]
154
+ LangSegment.setfilters(["zh","ja","en","ko"])
155
+ if language == "auto":
156
+ for tmp in LangSegment.getTexts(text):
157
+ langlist.append(tmp["lang"])
158
+ textlist.append(tmp["text"])
159
+ elif language == "auto_yue":
160
+ for tmp in LangSegment.getTexts(text):
161
+ if tmp["lang"] == "zh":
162
+ tmp["lang"] = "yue"
163
+ langlist.append(tmp["lang"])
164
+ textlist.append(tmp["text"])
165
+ else:
166
+ for tmp in LangSegment.getTexts(text):
167
+ if tmp["lang"] == "en":
168
+ langlist.append(tmp["lang"])
169
+ else:
170
+ # 因无法区别中日韩文汉字,以用户输入为准
171
+ langlist.append(language)
172
+ textlist.append(tmp["text"])
173
+ # print(textlist)
174
+ # print(langlist)
175
+ phones_list = []
176
+ bert_list = []
177
+ norm_text_list = []
178
+ for i in range(len(textlist)):
179
+ lang = langlist[i]
180
+ phones, word2ph, norm_text = self.clean_text_inf(textlist[i], lang, version)
181
+ bert = self.get_bert_inf(phones, word2ph, norm_text, lang)
182
+ phones_list.append(phones)
183
+ norm_text_list.append(norm_text)
184
+ bert_list.append(bert)
185
+ bert = torch.cat(bert_list, dim=1)
186
+ phones = sum(phones_list, [])
187
+ norm_text = ''.join(norm_text_list)
188
+
189
+ if not final and len(phones) < 6:
190
+ return self.get_phones_and_bert("." + text,language,version,final=True)
191
+
192
+ return phones, bert, norm_text
193
+
194
+
195
+ def get_bert_feature(self, text:str, word2ph:list)->torch.Tensor:
196
+ with torch.no_grad():
197
+ inputs = self.tokenizer(text, return_tensors="pt")
198
+ for i in inputs:
199
+ inputs[i] = inputs[i].to(self.device)
200
+ res = self.bert_model(**inputs, output_hidden_states=True)
201
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
202
+ assert len(word2ph) == len(text)
203
+ phone_level_feature = []
204
+ for i in range(len(word2ph)):
205
+ repeat_feature = res[i].repeat(word2ph[i], 1)
206
+ phone_level_feature.append(repeat_feature)
207
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
208
+ return phone_level_feature.T
209
+
210
+ def clean_text_inf(self, text:str, language:str, version:str="v2"):
211
+ phones, word2ph, norm_text = clean_text(text, language, version)
212
+ phones = cleaned_text_to_sequence(phones, version)
213
+ return phones, word2ph, norm_text
214
+
215
+ def get_bert_inf(self, phones:list, word2ph:list, norm_text:str, language:str):
216
+ language=language.replace("all_","")
217
+ if language == "zh":
218
+ feature = self.get_bert_feature(norm_text, word2ph).to(self.device)
219
+ else:
220
+ feature = torch.zeros(
221
+ (1024, len(phones)),
222
+ dtype=torch.float32,
223
+ ).to(self.device)
224
+
225
+ return feature
226
+
227
+
228
+ def filter_text(self,texts):
229
+ _text=[]
230
+ if all(text in [None, " ", "\n",""] for text in texts):
231
+ raise ValueError(i18n("请输入有效文本"))
232
+ for text in texts:
233
+ if text in [None, " ", ""]:
234
+ pass
235
+ else:
236
+ _text.append(text)
237
+ return _text
238
+
239
+
240
+ def replace_consecutive_punctuation(self,text):
241
+ punctuations = ''.join(re.escape(p) for p in punctuation)
242
+ pattern = f'([{punctuations}])([{punctuations}])+'
243
+ result = re.sub(pattern, r'\1', text)
244
+ return result
245
+
246
+
247
+
TTS_infer_pack/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from . import TTS, text_segmentation_method
TTS_infer_pack/text_segmentation_method.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+ import re
6
+ from typing import Callable
7
+
8
+ punctuation = set(['!', '?', '…', ',', '.', '-'," "])
9
+ METHODS = dict()
10
+
11
+ def get_method(name:str)->Callable:
12
+ method = METHODS.get(name, None)
13
+ if method is None:
14
+ raise ValueError(f"Method {name} not found")
15
+ return method
16
+
17
+ def get_method_names()->list:
18
+ return list(METHODS.keys())
19
+
20
+ def register_method(name):
21
+ def decorator(func):
22
+ METHODS[name] = func
23
+ return func
24
+ return decorator
25
+
26
+ splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
27
+
28
+ def split_big_text(text, max_len=510):
29
+ # 定义全角和半角标点符号
30
+ punctuation = "".join(splits)
31
+
32
+ # 切割文本
33
+ segments = re.split('([' + punctuation + '])', text)
34
+
35
+ # 初始化结果列表和当前片段
36
+ result = []
37
+ current_segment = ''
38
+
39
+ for segment in segments:
40
+ # 如果当前片段加上新的片段长度超过max_len,就将当前片段加入结果列表,并重置当前片段
41
+ if len(current_segment + segment) > max_len:
42
+ result.append(current_segment)
43
+ current_segment = segment
44
+ else:
45
+ current_segment += segment
46
+
47
+ # 将最后一个片段加入结果列表
48
+ if current_segment:
49
+ result.append(current_segment)
50
+
51
+ return result
52
+
53
+
54
+
55
+ def split(todo_text):
56
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
57
+ if todo_text[-1] not in splits:
58
+ todo_text += "。"
59
+ i_split_head = i_split_tail = 0
60
+ len_text = len(todo_text)
61
+ todo_texts = []
62
+ while 1:
63
+ if i_split_head >= len_text:
64
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
65
+ if todo_text[i_split_head] in splits:
66
+ i_split_head += 1
67
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
68
+ i_split_tail = i_split_head
69
+ else:
70
+ i_split_head += 1
71
+ return todo_texts
72
+
73
+
74
+ # 不切
75
+ @register_method("cut0")
76
+ def cut0(inp):
77
+ if not set(inp).issubset(punctuation):
78
+ return inp
79
+ else:
80
+ return "/n"
81
+
82
+
83
+ # 凑四句一切
84
+ @register_method("cut1")
85
+ def cut1(inp):
86
+ inp = inp.strip("\n")
87
+ inps = split(inp)
88
+ split_idx = list(range(0, len(inps), 4))
89
+ split_idx[-1] = None
90
+ if len(split_idx) > 1:
91
+ opts = []
92
+ for idx in range(len(split_idx) - 1):
93
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
94
+ else:
95
+ opts = [inp]
96
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
97
+ return "\n".join(opts)
98
+
99
+
100
+ # 凑50字一切
101
+ @register_method("cut2")
102
+ def cut2(inp):
103
+ inp = inp.strip("\n")
104
+ inps = split(inp)
105
+ if len(inps) < 2:
106
+ return inp
107
+ opts = []
108
+ summ = 0
109
+ tmp_str = ""
110
+ for i in range(len(inps)):
111
+ summ += len(inps[i])
112
+ tmp_str += inps[i]
113
+ if summ > 50:
114
+ summ = 0
115
+ opts.append(tmp_str)
116
+ tmp_str = ""
117
+ if tmp_str != "":
118
+ opts.append(tmp_str)
119
+ # print(opts)
120
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
121
+ opts[-2] = opts[-2] + opts[-1]
122
+ opts = opts[:-1]
123
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
124
+ return "\n".join(opts)
125
+
126
+ # 按中文句号。切
127
+ @register_method("cut3")
128
+ def cut3(inp):
129
+ inp = inp.strip("\n")
130
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
131
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
132
+ return "\n".join(opts)
133
+
134
+ #按英文句号.切
135
+ @register_method("cut4")
136
+ def cut4(inp):
137
+ inp = inp.strip("\n")
138
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
139
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
140
+ return "\n".join(opts)
141
+
142
+ # 按标点符号切
143
+ # contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
144
+ @register_method("cut5")
145
+ def cut5(inp):
146
+ inp = inp.strip("\n")
147
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
148
+ mergeitems = []
149
+ items = []
150
+
151
+ for i, char in enumerate(inp):
152
+ if char in punds:
153
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
154
+ items.append(char)
155
+ else:
156
+ items.append(char)
157
+ mergeitems.append("".join(items))
158
+ items = []
159
+ else:
160
+ items.append(char)
161
+
162
+ if items:
163
+ mergeitems.append("".join(items))
164
+
165
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
166
+ return "\n".join(opt)
167
+
168
+
169
+
170
+ if __name__ == '__main__':
171
+ method = get_method("cut5")
172
+ print(method("你好,我是小明。你好,我是小红。你好,我是小刚。你好,我是小张。"))
173
+
TTS_infer_pack/tts_infer_module.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import typing
4
+ from pathlib import Path
5
+
6
+ from .TTS import TTS, TTS_Config
7
+
8
+ # from pydub import AudioSegment
9
+ # from pydub.playback import play
10
+ # from setting import settings
11
+ current_dir = Path(__file__).parent
12
+ parent_dir = current_dir.parent
13
+
14
+
15
+ class TTSModule:
16
+ def __init__(self, tts_config: TTS_Config):
17
+ self.tts_pipeline = TTS(tts_config)
18
+
19
+ def setup_inference_params(
20
+ self,
21
+ text_lang: typing.Literal["en", "all_zh", "all_ja"] = 'en',
22
+ ref_audio: str = '',
23
+ prompt_text: str = '',
24
+ prompt_lang: str = '',
25
+ top_k: int = 60,
26
+ top_p: float = 1.0,
27
+ temperature: float = 1.0,
28
+ text_split_method: typing.Literal['cut0', 'cut1', 'cut2', 'cut3', 'cut4', 'cut5'] = 'cut5',
29
+ batch_size: int = 100,
30
+ speed_factor: float = 1.1,
31
+ split_bucket: bool = True,
32
+ return_fragment: bool = False,
33
+ fragment_interval: float = 0.1,
34
+ seed: int = 233333,
35
+ parallel_infer: bool = True,
36
+ ):
37
+ """
38
+ Set up inference parameters for the model.
39
+
40
+ :param text_lang: Language of the input text. Choose from "en" (English), "all_zh" (Chinese), or "all_ja" (Japanese).
41
+ :param ref_audio: Path to the reference audio file. This audio is used as a voice reference for the output.
42
+ :param prompt_text: Text of the reference speech. This can be used to guide the model's output style or content.
43
+ :param prompt_lang: Language of the reference speech. Should correspond to the language of prompt_text.
44
+ :param top_k: Top-K sampling parameter for GPT. Limits the next token selection to the K most likely tokens.
45
+ :param top_p: Top-P (nucleus) sampling parameter for GPT. Sets a cumulative probability cutoff for token selection.
46
+ :param temperature: Temperature for GPT sampling. Higher values increase randomness, lower values make output more deterministic.
47
+ :param text_split_method: Method to split the input text:
48
+ - "cut0": No splitting (process whole text at once)
49
+ - "cut1": Split every 4 sentences
50
+ - "cut2": Split every 50 words
51
+ - "cut3": Split at Chinese full stops '。'
52
+ - "cut4": Split at English periods '.'
53
+ - "cut5": Automatic splitting (recommended)
54
+ :param batch_size: Batch size for inference. Larger values may speed up processing but require more memory.
55
+ :param speed_factor: Factor to control the speed of the output audio. Values > 1 speed up, < 1 slow down.
56
+ :param split_bucket: Whether to use bucket splitting for more efficient processing. Recommended to keep on.
57
+ :param return_fragment: If True, return individual fragments of the generated audio.
58
+ :param fragment_interval: Time interval (in seconds) between each sentence or fragment in the output audio.
59
+ :param seed: Random seed for reproducibility. Set a fixed value for consistent results across runs.
60
+ :param parallel_infer: whether to use parallel inference.
61
+ """
62
+ self.text_lang = text_lang
63
+ self.ref_audio = str(ref_audio)
64
+ self.prompt_text = prompt_text
65
+ self.prompt_lang = prompt_lang
66
+ self.top_k = max(1, int(top_k)) # 确保top_k至少为1
67
+ self.top_p = max(0.0, min(1.0, float(top_p))) # 确保top_p在0到1之间
68
+ self.temperature = max(0.0, float(temperature)) # 确保temperature非负
69
+ self.text_split_method = text_split_method
70
+ self.batch_size = max(1, int(batch_size)) # 确保batch_size至少为1
71
+ self.speed_factor = max(0.1, float(speed_factor)) # 确保speed_factor至少为0.1
72
+ self.split_bucket = bool(split_bucket)
73
+ self.return_fragment = bool(return_fragment)
74
+ self.fragment_interval = max(0.0, float(fragment_interval)) # 确保fragment_interval非负
75
+ self.seed = int(seed)
76
+ self.parallel_infer = parallel_infer
77
+
78
+ # Validate that the reference audio file exists
79
+ if self.ref_audio and not os.path.exists(self.ref_audio):
80
+ raise FileNotFoundError(f"Reference audio file not found: {self.ref_audio}")
81
+
82
+ def inference(self, text, text_lang, ref_audio_path, prompt_text, prompt_lang, top_k,
83
+ top_p, temperature, text_split_method, batch_size, speed_factor,
84
+ ref_text_free, split_bucket, fragment_interval, seed, parallel_infer: bool):
85
+ actual_seed = seed if seed not in [-1, "", None] else random.randrange(1 << 32)
86
+ inputs = {
87
+ "text": text,
88
+ "text_lang": text_lang,
89
+ "ref_audio_path": ref_audio_path,
90
+ "prompt_text": prompt_text if not ref_text_free else "",
91
+ "prompt_lang": prompt_lang,
92
+ "top_k": top_k,
93
+ "top_p": top_p,
94
+ "temperature": temperature,
95
+ "text_split_method": text_split_method,
96
+ "batch_size": int(batch_size),
97
+ "speed_factor": float(speed_factor),
98
+ "split_bucket": split_bucket,
99
+ "return_fragment": False,
100
+ "fragment_interval": fragment_interval,
101
+ "seed": actual_seed,
102
+ "parallel_infer": parallel_infer
103
+ }
104
+ # print(inputs)
105
+ for item in self.tts_pipeline.run(inputs):
106
+ yield item, actual_seed
107
+
108
+ def generate_audio(self, text, warmup: bool = False):
109
+
110
+ [output] = self.inference(
111
+ text,
112
+ self.text_lang,
113
+ self.ref_audio,
114
+ self.prompt_text,
115
+ self.prompt_lang,
116
+ self.top_k,
117
+ self.top_p,
118
+ self.temperature,
119
+ self.text_split_method,
120
+ self.batch_size,
121
+ self.speed_factor,
122
+ self.return_fragment,
123
+ self.split_bucket,
124
+ self.fragment_interval,
125
+ self.seed,
126
+ self.parallel_infer,
127
+ )
128
+
129
+ if warmup:
130
+ return None
131
+
132
+ return output
133
+
134
+ # solution v1
135
+ # print(f"Audio generated path : '{parent_dir}'")
136
+ # # output_folder = "./output" # 指定输出目录
137
+ # output_folder = str(settings.AUDIO_OUTPUT_FOLDER)
138
+ # if not os.path.exists(output_folder):
139
+ # os.makedirs(output_folder) # 如果目录不存在,则创建它
140
+ # now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S%f") # 获取当前时间的时间戳
141
+ # file_name = os.path.join(output_folder, f'output_{now}_{count}.wav')
142
+ # sf.write(file_name, output[0][1], samplerate=output[0][0], subtype='PCM_16')
143
+ #
144
+ # print(f"Audio file saved to: {file_name}")
145
+ # return file_name
146
+
147
+ # 使用示例
148
+ # tts_module = TTSModule("./moyoyo_tts/configs/tts_infer.yaml")
149
+ # text = "Well, you know what the say, Families are like fudge, mostly sweet, but sometimes nuts. My family is doing great, thanks for asking! My son is growing up to be a smart and handsome young man, just like his mom. He's currently working on his own talker show, which I'm sure will be even more hilarious than mine."
150
+ # tts_module.generate_audio(text)
__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .TTS_infer_pack.TTS import TTS_Config
2
+ from .TTS_infer_pack.tts_infer_module import TTSModule
3
+ from .utils import HParams
4
+
5
+ __all__ = (
6
+ HParams,
7
+ TTSModule,
8
+ TTS_Config
9
+ )
configs/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ *.yaml
configs/s2.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a167e10a5664e54be6e78f178bcb497c3ae1ea9a5c41297812db3cae6fec8a
3
+ size 1556
download.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import os, sys
2
+ now_dir = os.getcwd()
3
+ sys.path.insert(0, now_dir)
4
+ from text.g2pw import G2PWPinyin
5
+ g2pw = G2PWPinyin(model_dir="moyoyo_tts/text/G2PWModel",model_source="moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large",v_to_u=False, neutral_tone_with_five=True)
export_torch_script.py ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import argparse
4
+ from typing import Optional
5
+ from my_utils import load_audio
6
+ import torch
7
+ import torchaudio
8
+
9
+ from torch import IntTensor, LongTensor, Tensor, nn
10
+ from torch.nn import functional as F
11
+
12
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
13
+ from feature_extractor import cnhubert
14
+
15
+ from AR.models.t2s_lightning_module import Text2SemanticLightningModule
16
+ from module.models_onnx import SynthesizerTrn
17
+
18
+ from inference_webui import get_phones_and_bert
19
+
20
+ import os
21
+ import soundfile
22
+
23
+ default_config = {
24
+ "embedding_dim": 512,
25
+ "hidden_dim": 512,
26
+ "num_head": 8,
27
+ "num_layers": 12,
28
+ "num_codebook": 8,
29
+ "p_dropout": 0.0,
30
+ "vocab_size": 1024 + 1,
31
+ "phoneme_vocab_size": 512,
32
+ "EOS": 1024,
33
+ }
34
+
35
+ def get_raw_t2s_model(dict_s1) -> Text2SemanticLightningModule:
36
+ config = dict_s1["config"]
37
+ config["model"]["dropout"] = float(config["model"]["dropout"])
38
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
39
+ t2s_model.load_state_dict(dict_s1["weight"])
40
+ t2s_model = t2s_model.eval()
41
+ return t2s_model
42
+
43
+ @torch.jit.script
44
+ def logits_to_probs(
45
+ logits,
46
+ previous_tokens: Optional[torch.Tensor] = None,
47
+ temperature: float = 1.0,
48
+ top_k: Optional[int] = None,
49
+ top_p: Optional[int] = None,
50
+ repetition_penalty: float = 1.0,
51
+ ):
52
+ # if previous_tokens is not None:
53
+ # previous_tokens = previous_tokens.squeeze()
54
+ # print(logits.shape,previous_tokens.shape)
55
+ # pdb.set_trace()
56
+ if previous_tokens is not None and repetition_penalty != 1.0:
57
+ previous_tokens = previous_tokens.long()
58
+ score = torch.gather(logits, dim=1, index=previous_tokens)
59
+ score = torch.where(
60
+ score < 0, score * repetition_penalty, score / repetition_penalty
61
+ )
62
+ logits.scatter_(dim=1, index=previous_tokens, src=score)
63
+
64
+ if top_p is not None and top_p < 1.0:
65
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
66
+ cum_probs = torch.cumsum(
67
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
68
+ )
69
+ sorted_indices_to_remove = cum_probs > top_p
70
+ sorted_indices_to_remove[:, 0] = False # keep at least one option
71
+ indices_to_remove = sorted_indices_to_remove.scatter(
72
+ dim=1, index=sorted_indices, src=sorted_indices_to_remove
73
+ )
74
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
75
+
76
+ logits = logits / max(temperature, 1e-5)
77
+
78
+ if top_k is not None:
79
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
80
+ pivot = v[: , -1].unsqueeze(-1)
81
+ logits = torch.where(logits < pivot, -float("Inf"), logits)
82
+
83
+ probs = torch.nn.functional.softmax(logits, dim=-1)
84
+ return probs
85
+
86
+ @torch.jit.script
87
+ def multinomial_sample_one_no_sync(probs_sort):
88
+ # Does multinomial sampling without a cuda synchronization
89
+ q = torch.randn_like(probs_sort)
90
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
91
+
92
+ @torch.jit.script
93
+ def sample(
94
+ logits,
95
+ previous_tokens,
96
+ temperature: float = 1.0,
97
+ top_k: Optional[int] = None,
98
+ top_p: Optional[int] = None,
99
+ repetition_penalty: float = 1.0,
100
+ ):
101
+ probs = logits_to_probs(
102
+ logits=logits, previous_tokens=previous_tokens, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty
103
+ )
104
+ idx_next = multinomial_sample_one_no_sync(probs)
105
+ return idx_next, probs
106
+
107
+
108
+ @torch.jit.script
109
+ def spectrogram_torch(y:Tensor, n_fft:int, sampling_rate:int, hop_size:int, win_size:int, center:bool=False):
110
+ hann_window = torch.hann_window(win_size,device=y.device,dtype=y.dtype)
111
+ y = torch.nn.functional.pad(
112
+ y.unsqueeze(1),
113
+ (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
114
+ mode="reflect",
115
+ )
116
+ y = y.squeeze(1)
117
+ spec = torch.stft(
118
+ y,
119
+ n_fft,
120
+ hop_length=hop_size,
121
+ win_length=win_size,
122
+ window=hann_window,
123
+ center=center,
124
+ pad_mode="reflect",
125
+ normalized=False,
126
+ onesided=True,
127
+ return_complex=False,
128
+ )
129
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
130
+ return spec
131
+
132
+
133
+ class DictToAttrRecursive(dict):
134
+ def __init__(self, input_dict):
135
+ super().__init__(input_dict)
136
+ for key, value in input_dict.items():
137
+ if isinstance(value, dict):
138
+ value = DictToAttrRecursive(value)
139
+ self[key] = value
140
+ setattr(self, key, value)
141
+
142
+ def __getattr__(self, item):
143
+ try:
144
+ return self[item]
145
+ except KeyError:
146
+ raise AttributeError(f"Attribute {item} not found")
147
+
148
+ def __setattr__(self, key, value):
149
+ if isinstance(value, dict):
150
+ value = DictToAttrRecursive(value)
151
+ super(DictToAttrRecursive, self).__setitem__(key, value)
152
+ super().__setattr__(key, value)
153
+
154
+ def __delattr__(self, item):
155
+ try:
156
+ del self[item]
157
+ except KeyError:
158
+ raise AttributeError(f"Attribute {item} not found")
159
+
160
+ @torch.jit.script
161
+ class T2SMLP:
162
+ def __init__(self, w1, b1, w2, b2):
163
+ self.w1 = w1
164
+ self.b1 = b1
165
+ self.w2 = w2
166
+ self.b2 = b2
167
+
168
+ def forward(self, x):
169
+ x = F.relu(F.linear(x, self.w1, self.b1))
170
+ x = F.linear(x, self.w2, self.b2)
171
+ return x
172
+
173
+ @torch.jit.script
174
+ class T2SBlock:
175
+ def __init__(
176
+ self,
177
+ num_heads: int,
178
+ hidden_dim: int,
179
+ mlp: T2SMLP,
180
+ qkv_w,
181
+ qkv_b,
182
+ out_w,
183
+ out_b,
184
+ norm_w1,
185
+ norm_b1,
186
+ norm_eps1: float,
187
+ norm_w2,
188
+ norm_b2,
189
+ norm_eps2: float,
190
+ ):
191
+ self.num_heads = num_heads
192
+ self.mlp = mlp
193
+ self.hidden_dim: int = hidden_dim
194
+ self.qkv_w = qkv_w
195
+ self.qkv_b = qkv_b
196
+ self.out_w = out_w
197
+ self.out_b = out_b
198
+ self.norm_w1 = norm_w1
199
+ self.norm_b1 = norm_b1
200
+ self.norm_eps1 = norm_eps1
201
+ self.norm_w2 = norm_w2
202
+ self.norm_b2 = norm_b2
203
+ self.norm_eps2 = norm_eps2
204
+
205
+ self.false = torch.tensor(False, dtype=torch.bool)
206
+
207
+ @torch.jit.ignore
208
+ def to_mask(self, x:torch.Tensor, padding_mask:Optional[torch.Tensor]):
209
+ if padding_mask is None:
210
+ return x
211
+
212
+ if padding_mask.dtype == torch.bool:
213
+ return x.masked_fill(padding_mask, 0)
214
+ else:
215
+ return x * padding_mask
216
+
217
+ def process_prompt(self, x:torch.Tensor, attn_mask : torch.Tensor, padding_mask:Optional[torch.Tensor]=None):
218
+ q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1)
219
+
220
+ batch_size = q.shape[0]
221
+ q_len = q.shape[1]
222
+ kv_len = k.shape[1]
223
+
224
+ q = self.to_mask(q, padding_mask)
225
+ k_cache = self.to_mask(k, padding_mask)
226
+ v_cache = self.to_mask(v, padding_mask)
227
+
228
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
229
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
230
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
231
+
232
+ attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask)
233
+
234
+ attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
235
+ attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
236
+ attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b)
237
+
238
+ if padding_mask is not None:
239
+ for i in range(batch_size):
240
+ # mask = padding_mask[i,:,0]
241
+ if self.false.device!= padding_mask.device:
242
+ self.false = self.false.to(padding_mask.device)
243
+ idx = torch.where(padding_mask[i,:,0]==self.false)[0]
244
+ x_item = x[i,idx,:].unsqueeze(0)
245
+ attn_item = attn[i,idx,:].unsqueeze(0)
246
+ x_item = x_item + attn_item
247
+ x_item = F.layer_norm(
248
+ x_item, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
249
+ )
250
+ x_item = x_item + self.mlp.forward(x_item)
251
+ x_item = F.layer_norm(
252
+ x_item,
253
+ [self.hidden_dim],
254
+ self.norm_w2,
255
+ self.norm_b2,
256
+ self.norm_eps2,
257
+ )
258
+ x[i,idx,:] = x_item.squeeze(0)
259
+ x = self.to_mask(x, padding_mask)
260
+ else:
261
+ x = x + attn
262
+ x = F.layer_norm(
263
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
264
+ )
265
+ x = x + self.mlp.forward(x)
266
+ x = F.layer_norm(
267
+ x,
268
+ [self.hidden_dim],
269
+ self.norm_w2,
270
+ self.norm_b2,
271
+ self.norm_eps2,
272
+ )
273
+ return x, k_cache, v_cache
274
+
275
+ def decode_next_token(self, x:torch.Tensor, k_cache:torch.Tensor, v_cache:torch.Tensor):
276
+ q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1)
277
+
278
+ k_cache = torch.cat([k_cache, k], dim=1)
279
+ v_cache = torch.cat([v_cache, v], dim=1)
280
+
281
+ batch_size = q.shape[0]
282
+ q_len = q.shape[1]
283
+ kv_len = k_cache.shape[1]
284
+
285
+ q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
286
+ k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
287
+ v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
288
+
289
+ attn = F.scaled_dot_product_attention(q, k, v)
290
+
291
+ attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
292
+ attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
293
+ attn = F.linear(attn, self.out_w, self.out_b)
294
+
295
+ x = x + attn
296
+ x = F.layer_norm(
297
+ x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
298
+ )
299
+ x = x + self.mlp.forward(x)
300
+ x = F.layer_norm(
301
+ x,
302
+ [self.hidden_dim],
303
+ self.norm_w2,
304
+ self.norm_b2,
305
+ self.norm_eps2,
306
+ )
307
+ return x, k_cache, v_cache
308
+
309
+ @torch.jit.script
310
+ class T2STransformer:
311
+ def __init__(self, num_blocks : int, blocks: list[T2SBlock]):
312
+ self.num_blocks : int = num_blocks
313
+ self.blocks = blocks
314
+
315
+ def process_prompt(
316
+ self, x:torch.Tensor, attn_mask : torch.Tensor,padding_mask : Optional[torch.Tensor]=None):
317
+ k_cache : list[torch.Tensor] = []
318
+ v_cache : list[torch.Tensor] = []
319
+ for i in range(self.num_blocks):
320
+ x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask)
321
+ k_cache.append(k_cache_)
322
+ v_cache.append(v_cache_)
323
+ return x, k_cache, v_cache
324
+
325
+ def decode_next_token(
326
+ self, x:torch.Tensor,
327
+ k_cache: list[torch.Tensor],
328
+ v_cache: list[torch.Tensor]):
329
+ for i in range(self.num_blocks):
330
+ x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(x, k_cache[i], v_cache[i])
331
+ return x, k_cache, v_cache
332
+
333
+ class VitsModel(nn.Module):
334
+ def __init__(self, vits_path):
335
+ super().__init__()
336
+ # dict_s2 = torch.load(vits_path,map_location="cpu")
337
+ dict_s2 = torch.load(vits_path)
338
+ self.hps = dict_s2["config"]
339
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
340
+ self.hps["model"]["version"] = "v1"
341
+ else:
342
+ self.hps["model"]["version"] = "v2"
343
+
344
+ self.hps = DictToAttrRecursive(self.hps)
345
+ self.hps.model.semantic_frame_rate = "25hz"
346
+ self.vq_model = SynthesizerTrn(
347
+ self.hps.data.filter_length // 2 + 1,
348
+ self.hps.train.segment_size // self.hps.data.hop_length,
349
+ n_speakers=self.hps.data.n_speakers,
350
+ **self.hps.model
351
+ )
352
+ self.vq_model.eval()
353
+ self.vq_model.load_state_dict(dict_s2["weight"], strict=False)
354
+
355
+ def forward(self, text_seq, pred_semantic, ref_audio, speed=1.0):
356
+ refer = spectrogram_torch(
357
+ ref_audio,
358
+ self.hps.data.filter_length,
359
+ self.hps.data.sampling_rate,
360
+ self.hps.data.hop_length,
361
+ self.hps.data.win_length,
362
+ center=False
363
+ )
364
+ return self.vq_model(pred_semantic, text_seq, refer, speed)[0, 0]
365
+
366
+ class T2SModel(nn.Module):
367
+ def __init__(self,raw_t2s:Text2SemanticLightningModule):
368
+ super(T2SModel, self).__init__()
369
+ self.model_dim = raw_t2s.model.model_dim
370
+ self.embedding_dim = raw_t2s.model.embedding_dim
371
+ self.num_head = raw_t2s.model.num_head
372
+ self.num_layers = raw_t2s.model.num_layers
373
+ self.vocab_size = raw_t2s.model.vocab_size
374
+ self.phoneme_vocab_size = raw_t2s.model.phoneme_vocab_size
375
+ # self.p_dropout = float(raw_t2s.model.p_dropout)
376
+ self.EOS:int = int(raw_t2s.model.EOS)
377
+ self.norm_first = raw_t2s.model.norm_first
378
+ assert self.EOS == self.vocab_size - 1
379
+ self.hz = 50
380
+
381
+ self.bert_proj = raw_t2s.model.bert_proj
382
+ self.ar_text_embedding = raw_t2s.model.ar_text_embedding
383
+ self.ar_text_position = raw_t2s.model.ar_text_position
384
+ self.ar_audio_embedding = raw_t2s.model.ar_audio_embedding
385
+ self.ar_audio_position = raw_t2s.model.ar_audio_position
386
+
387
+ # self.t2s_transformer = T2STransformer(self.num_layers, blocks)
388
+ # self.t2s_transformer = raw_t2s.model.t2s_transformer
389
+
390
+ blocks = []
391
+ h = raw_t2s.model.h
392
+
393
+ for i in range(self.num_layers):
394
+ layer = h.layers[i]
395
+ t2smlp = T2SMLP(
396
+ layer.linear1.weight,
397
+ layer.linear1.bias,
398
+ layer.linear2.weight,
399
+ layer.linear2.bias
400
+ )
401
+
402
+ block = T2SBlock(
403
+ self.num_head,
404
+ self.model_dim,
405
+ t2smlp,
406
+ layer.self_attn.in_proj_weight,
407
+ layer.self_attn.in_proj_bias,
408
+ layer.self_attn.out_proj.weight,
409
+ layer.self_attn.out_proj.bias,
410
+ layer.norm1.weight,
411
+ layer.norm1.bias,
412
+ layer.norm1.eps,
413
+ layer.norm2.weight,
414
+ layer.norm2.bias,
415
+ layer.norm2.eps
416
+ )
417
+
418
+ blocks.append(block)
419
+
420
+ self.t2s_transformer = T2STransformer(self.num_layers, blocks)
421
+
422
+ # self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
423
+ self.ar_predict_layer = raw_t2s.model.ar_predict_layer
424
+ # self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
425
+ self.max_sec = raw_t2s.config["data"]["max_sec"]
426
+ self.top_k = int(raw_t2s.config["inference"]["top_k"])
427
+ self.early_stop_num = torch.LongTensor([self.hz * self.max_sec])
428
+
429
+ def forward(self,prompts:LongTensor, ref_seq:LongTensor, text_seq:LongTensor, ref_bert:torch.Tensor, text_bert:torch.Tensor):
430
+ bert = torch.cat([ref_bert.T, text_bert.T], 1)
431
+ all_phoneme_ids = torch.cat([ref_seq, text_seq], 1)
432
+ bert = bert.unsqueeze(0)
433
+
434
+ x = self.ar_text_embedding(all_phoneme_ids)
435
+ x = x + self.bert_proj(bert.transpose(1, 2))
436
+ x:torch.Tensor = self.ar_text_position(x)
437
+
438
+ early_stop_num = self.early_stop_num
439
+
440
+
441
+ #[1,N,512] [1,N]
442
+ # y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts)
443
+ y = prompts
444
+ # x_example = x[:,:,0] * 0.0
445
+
446
+ x_len = x.shape[1]
447
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
448
+
449
+ y_emb = self.ar_audio_embedding(y)
450
+ y_len = y_emb.shape[1]
451
+ prefix_len = y.shape[1]
452
+ y_pos = self.ar_audio_position(y_emb)
453
+ xy_pos = torch.concat([x, y_pos], dim=1)
454
+
455
+ bsz = x.shape[0]
456
+ src_len = x_len + y_len
457
+ x_attn_mask_pad = F.pad(
458
+ x_attn_mask,
459
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
460
+ value=True,
461
+ )
462
+ y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
463
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
464
+ (x_len, 0),
465
+ value=False,
466
+ )
467
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)\
468
+ .unsqueeze(0)\
469
+ .expand(bsz*self.num_head, -1, -1)\
470
+ .view(bsz, self.num_head, src_len, src_len)\
471
+ .to(device=x.device, dtype=torch.bool)
472
+
473
+ idx = 0
474
+
475
+ xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None)
476
+
477
+ logits = self.ar_predict_layer(xy_dec[:, -1])
478
+ logits = logits[:, :-1]
479
+ samples = sample(logits, y, top_k=self.top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0]
480
+ y = torch.concat([y, samples], dim=1)
481
+ y_emb = self.ar_audio_embedding(y[:, -1:])
482
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
483
+
484
+ stop = False
485
+ # for idx in range(1, 50):
486
+ for idx in range(1, 1500):
487
+ #[1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N]
488
+ # y, k, v, y_emb, logits, samples = self.stage_decoder(y, k, v, y_emb, x_example)
489
+ xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache)
490
+ logits = self.ar_predict_layer(xy_dec[:, -1])
491
+
492
+ if(idx<11):###至少预测出10个token不然不给停止(0.4s)
493
+ logits = logits[:, :-1]
494
+
495
+ samples = sample(logits, y, top_k=self.top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0]
496
+
497
+ y = torch.concat([y, samples], dim=1)
498
+
499
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
500
+ stop = True
501
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
502
+ stop = True
503
+ if stop:
504
+ if y.shape[1] == 0:
505
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
506
+ break
507
+
508
+ y_emb = self.ar_audio_embedding(y[:, -1:])
509
+ xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
510
+
511
+ y[0,-1] = 0
512
+
513
+ return y[:, -idx:].unsqueeze(0)
514
+
515
+ bert_path = os.environ.get(
516
+ "bert_path", "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large"
517
+ )
518
+ cnhubert_base_path = "moyoyo_tts/pretrained_models/chinese-hubert-base"
519
+ cnhubert.cnhubert_base_path = cnhubert_base_path
520
+
521
+ @torch.jit.script
522
+ def build_phone_level_feature(res:Tensor, word2ph:IntTensor):
523
+ phone_level_feature = []
524
+ for i in range(word2ph.shape[0]):
525
+ repeat_feature = res[i].repeat(word2ph[i].item(), 1)
526
+ phone_level_feature.append(repeat_feature)
527
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
528
+ # [sum(word2ph), 1024]
529
+ return phone_level_feature
530
+
531
+ class MyBertModel(torch.nn.Module):
532
+ def __init__(self, bert_model):
533
+ super(MyBertModel, self).__init__()
534
+ self.bert = bert_model
535
+
536
+ def forward(self, input_ids:torch.Tensor, attention_mask:torch.Tensor, token_type_ids:torch.Tensor, word2ph:IntTensor):
537
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
538
+ # res = torch.cat(outputs["hidden_states"][-3:-2], -1)[0][1:-1]
539
+ res = torch.cat(outputs[1][-3:-2], -1)[0][1:-1]
540
+ return build_phone_level_feature(res, word2ph)
541
+
542
+ class SSLModel(torch.nn.Module):
543
+ def __init__(self):
544
+ super().__init__()
545
+ self.ssl = cnhubert.get_model().model
546
+
547
+ def forward(self, ref_audio_16k)-> torch.Tensor:
548
+ ssl_content = self.ssl(ref_audio_16k)["last_hidden_state"].transpose(1, 2)
549
+ return ssl_content
550
+
551
+ class ExportSSLModel(torch.nn.Module):
552
+ def __init__(self,ssl:SSLModel):
553
+ super().__init__()
554
+ self.ssl = ssl
555
+
556
+ def forward(self, ref_audio:torch.Tensor):
557
+ return self.ssl(ref_audio)
558
+
559
+ @torch.jit.export
560
+ def resample(self,ref_audio:torch.Tensor,src_sr:int,dst_sr:int)->torch.Tensor:
561
+ audio = resamplex(ref_audio,src_sr,dst_sr).float()
562
+ return audio
563
+
564
+ def export_bert(output_path):
565
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
566
+
567
+ text = "叹息声一声接着一声传出,木兰对着房门织布.听不见织布机织布的声音,只听见木兰在叹息.问木兰在想什么?问木兰在惦记什么?木兰答道,我也没有在想什么,也没有在惦记什么."
568
+ ref_bert_inputs = tokenizer(text, return_tensors="pt")
569
+ word2ph = []
570
+ for c in text:
571
+ if c in [',','。',':','?',",",".","?"]:
572
+ word2ph.append(1)
573
+ else:
574
+ word2ph.append(2)
575
+ ref_bert_inputs['word2ph'] = torch.Tensor(word2ph).int()
576
+
577
+ bert_model = AutoModelForMaskedLM.from_pretrained(bert_path,output_hidden_states=True,torchscript=True)
578
+ my_bert_model = MyBertModel(bert_model)
579
+
580
+ ref_bert_inputs = {
581
+ 'input_ids': ref_bert_inputs['input_ids'],
582
+ 'attention_mask': ref_bert_inputs['attention_mask'],
583
+ 'token_type_ids': ref_bert_inputs['token_type_ids'],
584
+ 'word2ph': ref_bert_inputs['word2ph']
585
+ }
586
+
587
+ torch._dynamo.mark_dynamic(ref_bert_inputs['input_ids'], 1)
588
+ torch._dynamo.mark_dynamic(ref_bert_inputs['attention_mask'], 1)
589
+ torch._dynamo.mark_dynamic(ref_bert_inputs['token_type_ids'], 1)
590
+ torch._dynamo.mark_dynamic(ref_bert_inputs['word2ph'], 0)
591
+
592
+ my_bert_model = torch.jit.trace(my_bert_model,example_kwarg_inputs=ref_bert_inputs)
593
+ output_path = os.path.join(output_path, "bert_model.pt")
594
+ my_bert_model.save(output_path)
595
+ print('#### exported bert ####')
596
+
597
+ def export(gpt_path, vits_path, ref_audio_path, ref_text, output_path, export_bert_and_ssl=False, device='cpu'):
598
+ if not os.path.exists(output_path):
599
+ os.makedirs(output_path)
600
+ print(f"目录已创建: {output_path}")
601
+ else:
602
+ print(f"目录已存在: {output_path}")
603
+
604
+ ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float()
605
+ ssl = SSLModel()
606
+ if export_bert_and_ssl:
607
+ s = ExportSSLModel(torch.jit.trace(ssl,example_inputs=(ref_audio)))
608
+ ssl_path = os.path.join(output_path, "ssl_model.pt")
609
+ torch.jit.script(s).save(ssl_path)
610
+ print('#### exported ssl ####')
611
+ export_bert(output_path)
612
+ else:
613
+ s = ExportSSLModel(ssl)
614
+
615
+ print(f"device: {device}")
616
+
617
+
618
+ ref_seq_id,ref_bert_T,ref_norm_text = get_phones_and_bert(ref_text,"all_zh",'v2')
619
+ ref_seq = torch.LongTensor([ref_seq_id]).to(device)
620
+ ref_bert = ref_bert_T.T.to(ref_seq.device)
621
+ text_seq_id,text_bert_T,norm_text = get_phones_and_bert("这是一条测试语音,说什么无所谓,只是给它一个例子","all_zh",'v2')
622
+ text_seq = torch.LongTensor([text_seq_id]).to(device)
623
+ text_bert = text_bert_T.T.to(text_seq.device)
624
+
625
+ ssl_content = ssl(ref_audio).to(device)
626
+
627
+ # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth"
628
+ vits = VitsModel(vits_path).to(device)
629
+ vits.eval()
630
+
631
+ # gpt_path = "GPT_weights_v2/xw-e15.ckpt"
632
+ # dict_s1 = torch.load(gpt_path, map_location=device)
633
+ dict_s1 = torch.load(gpt_path)
634
+ raw_t2s = get_raw_t2s_model(dict_s1).to(device)
635
+ print('#### get_raw_t2s_model ####')
636
+ print(raw_t2s.config)
637
+ t2s_m = T2SModel(raw_t2s)
638
+ t2s_m.eval()
639
+ t2s = torch.jit.script(t2s_m).to(device)
640
+ print('#### script t2s_m ####')
641
+
642
+ print("vits.hps.data.sampling_rate:",vits.hps.data.sampling_rate)
643
+ gpt_sovits = GPT_SoVITS(t2s,vits).to(device)
644
+ gpt_sovits.eval()
645
+
646
+ ref_audio_sr = s.resample(ref_audio,16000,32000).to(device)
647
+
648
+ torch._dynamo.mark_dynamic(ssl_content, 2)
649
+ torch._dynamo.mark_dynamic(ref_audio_sr, 1)
650
+ torch._dynamo.mark_dynamic(ref_seq, 1)
651
+ torch._dynamo.mark_dynamic(text_seq, 1)
652
+ torch._dynamo.mark_dynamic(ref_bert, 0)
653
+ torch._dynamo.mark_dynamic(text_bert, 0)
654
+
655
+ with torch.no_grad():
656
+ gpt_sovits_export = torch.jit.trace(
657
+ gpt_sovits,
658
+ example_inputs=(
659
+ ssl_content,
660
+ ref_audio_sr,
661
+ ref_seq,
662
+ text_seq,
663
+ ref_bert,
664
+ text_bert))
665
+
666
+ gpt_sovits_path = os.path.join(output_path, "gpt_sovits_model.pt")
667
+ gpt_sovits_export.save(gpt_sovits_path)
668
+ print('#### exported gpt_sovits ####')
669
+
670
+ @torch.jit.script
671
+ def parse_audio(ref_audio):
672
+ ref_audio_16k = torchaudio.functional.resample(ref_audio,48000,16000).float()#.to(ref_audio.device)
673
+ ref_audio_sr = torchaudio.functional.resample(ref_audio,48000,32000).float()#.to(ref_audio.device)
674
+ return ref_audio_16k,ref_audio_sr
675
+
676
+ @torch.jit.script
677
+ def resamplex(ref_audio:torch.Tensor,src_sr:int,dst_sr:int)->torch.Tensor:
678
+ return torchaudio.functional.resample(ref_audio,src_sr,dst_sr).float()
679
+
680
+ class GPT_SoVITS(nn.Module):
681
+ def __init__(self, t2s:T2SModel,vits:VitsModel):
682
+ super().__init__()
683
+ self.t2s = t2s
684
+ self.vits = vits
685
+
686
+ def forward(self, ssl_content:torch.Tensor, ref_audio_sr:torch.Tensor, ref_seq:Tensor, text_seq:Tensor, ref_bert:Tensor, text_bert:Tensor, speed=1.0):
687
+ codes = self.vits.vq_model.extract_latent(ssl_content)
688
+ prompt_semantic = codes[0, 0]
689
+ prompts = prompt_semantic.unsqueeze(0)
690
+
691
+ pred_semantic = self.t2s(prompts, ref_seq, text_seq, ref_bert, text_bert)
692
+ audio = self.vits(text_seq, pred_semantic, ref_audio_sr, speed)
693
+ return audio
694
+
695
+ def test():
696
+ parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool")
697
+ parser.add_argument('--gpt_model', required=True, help="Path to the GPT model file")
698
+ parser.add_argument('--sovits_model', required=True, help="Path to the SoVITS model file")
699
+ parser.add_argument('--ref_audio', required=True, help="Path to the reference audio file")
700
+ parser.add_argument('--ref_text', required=True, help="Path to the reference text file")
701
+ parser.add_argument('--output_path', required=True, help="Path to the output directory")
702
+
703
+
704
+ args = parser.parse_args()
705
+ gpt_path = args.gpt_model
706
+ vits_path = args.sovits_model
707
+ ref_audio_path = args.ref_audio
708
+ ref_text = args.ref_text
709
+
710
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
711
+ # bert_model = AutoModelForMaskedLM.from_pretrained(bert_path,output_hidden_states=True,torchscript=True)
712
+ # bert = MyBertModel(bert_model)
713
+ my_bert = torch.jit.load("onnx/bert_model.pt",map_location='cuda')
714
+
715
+ # dict_s1 = torch.load(gpt_path, map_location="cuda")
716
+ # raw_t2s = get_raw_t2s_model(dict_s1)
717
+ # t2s = T2SModel(raw_t2s)
718
+ # t2s.eval()
719
+ # t2s = torch.jit.load("onnx/xw/t2s_model.pt",map_location='cuda')
720
+
721
+ # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth"
722
+ # vits = VitsModel(vits_path)
723
+ # vits.eval()
724
+
725
+ # ssl = ExportSSLModel(SSLModel()).to('cuda')
726
+ # ssl.eval()
727
+ ssl = torch.jit.load("onnx/by/ssl_model.pt",map_location='cuda')
728
+
729
+ # gpt_sovits = moyoyo_tts(t2s,vits)
730
+ gpt_sovits = torch.jit.load("onnx/by/gpt_sovits_model.pt",map_location='cuda')
731
+
732
+ ref_seq_id,ref_bert_T,ref_norm_text = get_phones_and_bert(ref_text,"all_zh",'v2')
733
+ ref_seq = torch.LongTensor([ref_seq_id])
734
+ ref_bert = ref_bert_T.T.to(ref_seq.device)
735
+ # text_seq_id,text_bert_T,norm_text = get_phones_and_bert("昨天晚上看见征兵文书,知道君主在大规模征兵,那么多卷征兵文册,每一卷上都有父亲的名字.","all_zh",'v2')
736
+ text = "昨天晚上看见征兵文书,知道君主在大规模征兵,那么多卷征兵文册,每一卷上都有父亲的名字."
737
+
738
+ text_seq_id,text_bert_T,norm_text = get_phones_and_bert(text,"all_zh",'v2')
739
+
740
+ test_bert = tokenizer(text, return_tensors="pt")
741
+ word2ph = []
742
+ for c in text:
743
+ if c in [',','。',':','?',"?",",","."]:
744
+ word2ph.append(1)
745
+ else:
746
+ word2ph.append(2)
747
+ test_bert['word2ph'] = torch.Tensor(word2ph).int()
748
+
749
+ test_bert = my_bert(
750
+ test_bert['input_ids'].to('cuda'),
751
+ test_bert['attention_mask'].to('cuda'),
752
+ test_bert['token_type_ids'].to('cuda'),
753
+ test_bert['word2ph'].to('cuda')
754
+ )
755
+
756
+ text_seq = torch.LongTensor([text_seq_id])
757
+ text_bert = text_bert_T.T.to(text_seq.device)
758
+
759
+ print('text_bert:',text_bert.shape,text_bert)
760
+ print('test_bert:',test_bert.shape,test_bert)
761
+ print(torch.allclose(text_bert.to('cuda'),test_bert))
762
+
763
+ print('text_seq:',text_seq.shape)
764
+ print('text_bert:',text_bert.shape,text_bert.type())
765
+
766
+ #[1,N]
767
+ ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float().to('cuda')
768
+ print('ref_audio:',ref_audio.shape)
769
+
770
+ ref_audio_sr = ssl.resample(ref_audio,16000,32000)
771
+ print('start ssl')
772
+ ssl_content = ssl(ref_audio)
773
+
774
+ print('start gpt_sovits:')
775
+ print('ssl_content:',ssl_content.shape)
776
+ print('ref_audio_sr:',ref_audio_sr.shape)
777
+ print('ref_seq:',ref_seq.shape)
778
+ ref_seq=ref_seq.to('cuda')
779
+ print('text_seq:',text_seq.shape)
780
+ text_seq=text_seq.to('cuda')
781
+ print('ref_bert:',ref_bert.shape)
782
+ ref_bert=ref_bert.to('cuda')
783
+ print('text_bert:',text_bert.shape)
784
+ text_bert=text_bert.to('cuda')
785
+
786
+ with torch.no_grad():
787
+ audio = gpt_sovits(ssl_content, ref_audio_sr, ref_seq, text_seq, ref_bert, test_bert)
788
+ print('start write wav')
789
+ soundfile.write("out.wav", audio.detach().cpu().numpy(), 32000)
790
+
791
+
792
+ import text
793
+ import json
794
+
795
+ def export_symbel(version='v2'):
796
+ if version=='v1':
797
+ symbols = text._symbol_to_id_v1
798
+ with open(f"onnx/symbols_v1.json", "w") as file:
799
+ json.dump(symbols, file, indent=4)
800
+ else:
801
+ symbols = text._symbol_to_id_v2
802
+ with open(f"onnx/symbols_v2.json", "w") as file:
803
+ json.dump(symbols, file, indent=4)
804
+
805
+ def main():
806
+ parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool")
807
+ parser.add_argument('--gpt_model', required=True, help="Path to the GPT model file")
808
+ parser.add_argument('--sovits_model', required=True, help="Path to the SoVITS model file")
809
+ parser.add_argument('--ref_audio', required=True, help="Path to the reference audio file")
810
+ parser.add_argument('--ref_text', required=True, help="Path to the reference text file")
811
+ parser.add_argument('--output_path', required=True, help="Path to the output directory")
812
+ parser.add_argument('--export_common_model', action='store_true', help="Export Bert and SSL model")
813
+ parser.add_argument('--device', help="Device to use")
814
+
815
+ args = parser.parse_args()
816
+ export(
817
+ gpt_path=args.gpt_model,
818
+ vits_path=args.sovits_model,
819
+ ref_audio_path=args.ref_audio,
820
+ ref_text=args.ref_text,
821
+ output_path=args.output_path,
822
+ device=args.device,
823
+ export_bert_and_ssl=args.export_common_model,
824
+ )
825
+
826
+ import inference_webui
827
+ if __name__ == "__main__":
828
+ inference_webui.is_half=False
829
+ inference_webui.dtype=torch.float32
830
+ main()
831
+ # test()
feature_extractor/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import cnhubert, whisper_enc
2
+
3
+ content_module_map = {
4
+ 'cnhubert': cnhubert,
5
+ 'whisper': whisper_enc
6
+ }
feature_extractor/cnhubert.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from transformers import logging as tf_logging
5
+
6
+ tf_logging.set_verbosity_error()
7
+
8
+ import logging
9
+ logging.getLogger("numba").setLevel(logging.WARNING)
10
+
11
+ from transformers import (
12
+ Wav2Vec2FeatureExtractor,
13
+ HubertModel,
14
+ )
15
+
16
+ import moyoyo_tts.utils as utils
17
+ import torch.nn as nn
18
+
19
+ cnhubert_base_path = None
20
+
21
+
22
+ class CNHubert(nn.Module):
23
+ def __init__(self, base_path:str=None):
24
+ super().__init__()
25
+ if base_path is None:
26
+ base_path = cnhubert_base_path
27
+ if os.path.exists(base_path):...
28
+ else:raise FileNotFoundError(base_path)
29
+ self.model = HubertModel.from_pretrained(base_path, local_files_only=True)
30
+ self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
31
+ base_path, local_files_only=True
32
+ )
33
+
34
+ def forward(self, x):
35
+ input_values = self.feature_extractor(
36
+ x, return_tensors="pt", sampling_rate=16000
37
+ ).input_values.to(x.device)
38
+ feats = self.model(input_values)["last_hidden_state"]
39
+ return feats
40
+
41
+
42
+ # class CNHubertLarge(nn.Module):
43
+ # def __init__(self):
44
+ # super().__init__()
45
+ # self.model = HubertModel.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large")
46
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large")
47
+ # def forward(self, x):
48
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
49
+ # feats = self.model(input_values)["last_hidden_state"]
50
+ # return feats
51
+ #
52
+ # class CVec(nn.Module):
53
+ # def __init__(self):
54
+ # super().__init__()
55
+ # self.model = HubertModel.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base")
56
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base")
57
+ # def forward(self, x):
58
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
59
+ # feats = self.model(input_values)["last_hidden_state"]
60
+ # return feats
61
+ #
62
+ # class cnw2v2base(nn.Module):
63
+ # def __init__(self):
64
+ # super().__init__()
65
+ # self.model = Wav2Vec2Model.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base")
66
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base")
67
+ # def forward(self, x):
68
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
69
+ # feats = self.model(input_values)["last_hidden_state"]
70
+ # return feats
71
+
72
+
73
+ def get_model():
74
+ model = CNHubert()
75
+ model.eval()
76
+ return model
77
+
78
+
79
+ # def get_large_model():
80
+ # model = CNHubertLarge()
81
+ # model.eval()
82
+ # return model
83
+ #
84
+ # def get_model_cvec():
85
+ # model = CVec()
86
+ # model.eval()
87
+ # return model
88
+ #
89
+ # def get_model_cnw2v2base():
90
+ # model = cnw2v2base()
91
+ # model.eval()
92
+ # return model
93
+
94
+
95
+ def get_content(hmodel, wav_16k_tensor):
96
+ with torch.no_grad():
97
+ feats = hmodel(wav_16k_tensor)
98
+ return feats.transpose(1, 2)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ model = get_model()
103
+ src_path = "/Users/Shared/原音频2.wav"
104
+ wav_16k_tensor = utils.load_wav_to_torch_and_resample(src_path, 16000)
105
+ model = model
106
+ wav_16k_tensor = wav_16k_tensor
107
+ feats = get_content(model, wav_16k_tensor)
108
+ print(feats.shape)
feature_extractor/whisper_enc.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def get_model():
5
+ import whisper
6
+
7
+ model = whisper.load_model("small", device="cpu")
8
+
9
+ return model.encoder
10
+
11
+
12
+ def get_content(model=None, wav_16k_tensor=None):
13
+ from whisper import log_mel_spectrogram, pad_or_trim
14
+
15
+ dev = next(model.parameters()).device
16
+ mel = log_mel_spectrogram(wav_16k_tensor).to(dev)[:, :3000]
17
+ # if torch.cuda.is_available():
18
+ # mel = mel.to(torch.float16)
19
+ feature_len = mel.shape[-1] // 2
20
+ assert mel.shape[-1] < 3000, "输入音频过长,只允许输入30以内音频"
21
+ with torch.no_grad():
22
+ feature = model(pad_or_trim(mel, 3000).unsqueeze(0))[
23
+ :1, :feature_len, :
24
+ ].transpose(1, 2)
25
+ return feature
inference_gui.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from PyQt5.QtCore import QEvent
4
+ from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
5
+ from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox
6
+ import soundfile as sf
7
+
8
+ from tools.i18n.i18n import I18nAuto
9
+ i18n = I18nAuto()
10
+
11
+ from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav
12
+
13
+
14
+ class GPTSoVITSGUI(QMainWindow):
15
+ GPT_Path = gpt_path
16
+ SoVITS_Path = sovits_path
17
+
18
+ def __init__(self):
19
+ super().__init__()
20
+
21
+ self.setWindowTitle('GPT-SoVITS GUI')
22
+ self.setGeometry(800, 450, 950, 850)
23
+
24
+ self.setStyleSheet("""
25
+ QWidget {
26
+ background-color: #a3d3b1;
27
+ }
28
+
29
+ QTabWidget::pane {
30
+ background-color: #a3d3b1;
31
+ }
32
+
33
+ QTabWidget::tab-bar {
34
+ alignment: left;
35
+ }
36
+
37
+ QTabBar::tab {
38
+ background: #8da4bf;
39
+ color: #ffffff;
40
+ padding: 8px;
41
+ }
42
+
43
+ QTabBar::tab:selected {
44
+ background: #2a3f54;
45
+ }
46
+
47
+ QLabel {
48
+ color: #000000;
49
+ }
50
+
51
+ QPushButton {
52
+ background-color: #4CAF50;
53
+ color: white;
54
+ padding: 8px;
55
+ border: 1px solid #4CAF50;
56
+ border-radius: 4px;
57
+ }
58
+
59
+ QPushButton:hover {
60
+ background-color: #45a049;
61
+ border: 1px solid #45a049;
62
+ box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1);
63
+ }
64
+ """)
65
+
66
+ license_text = (
67
+ "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. "
68
+ "如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
69
+ license_label = QLabel(license_text)
70
+ license_label.setWordWrap(True)
71
+
72
+ self.GPT_model_label = QLabel("选择GPT模型:")
73
+ self.GPT_model_input = QLineEdit()
74
+ self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
75
+ self.GPT_model_input.setText(self.GPT_Path)
76
+ self.GPT_model_input.setReadOnly(True)
77
+ self.GPT_model_button = QPushButton("选择GPT模型文件")
78
+ self.GPT_model_button.clicked.connect(self.select_GPT_model)
79
+
80
+ self.SoVITS_model_label = QLabel("选择SoVITS模型:")
81
+ self.SoVITS_model_input = QLineEdit()
82
+ self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
83
+ self.SoVITS_model_input.setText(self.SoVITS_Path)
84
+ self.SoVITS_model_input.setReadOnly(True)
85
+ self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
86
+ self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
87
+
88
+ self.ref_audio_label = QLabel("上传参考音频:")
89
+ self.ref_audio_input = QLineEdit()
90
+ self.ref_audio_input.setPlaceholderText("拖拽或选择文件")
91
+ self.ref_audio_input.setReadOnly(True)
92
+ self.ref_audio_button = QPushButton("选择音频文件")
93
+ self.ref_audio_button.clicked.connect(self.select_ref_audio)
94
+
95
+ self.ref_text_label = QLabel("参考音频文本:")
96
+ self.ref_text_input = QLineEdit()
97
+ self.ref_text_input.setPlaceholderText("直接输入文字或上传文本")
98
+ self.ref_text_button = QPushButton("上传文本")
99
+ self.ref_text_button.clicked.connect(self.upload_ref_text)
100
+
101
+ self.ref_language_label = QLabel("参考音频语言:")
102
+ self.ref_language_combobox = QComboBox()
103
+ self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
104
+ self.ref_language_combobox.setCurrentText("多语种混合")
105
+
106
+ self.target_text_label = QLabel("合成目标文本:")
107
+ self.target_text_input = QLineEdit()
108
+ self.target_text_input.setPlaceholderText("直接输入文字或上传文本")
109
+ self.target_text_button = QPushButton("上传文本")
110
+ self.target_text_button.clicked.connect(self.upload_target_text)
111
+
112
+ self.target_language_label = QLabel("合成音频语言:")
113
+ self.target_language_combobox = QComboBox()
114
+ self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
115
+ self.target_language_combobox.setCurrentText("多语种混合")
116
+
117
+ self.output_label = QLabel("输出音频路径:")
118
+ self.output_input = QLineEdit()
119
+ self.output_input.setPlaceholderText("拖拽或选择文件")
120
+ self.output_input.setReadOnly(True)
121
+ self.output_button = QPushButton("选择文件夹")
122
+ self.output_button.clicked.connect(self.select_output_path)
123
+
124
+ self.output_text = QTextEdit()
125
+ self.output_text.setReadOnly(True)
126
+
127
+ self.add_drag_drop_events([
128
+ self.GPT_model_input,
129
+ self.SoVITS_model_input,
130
+ self.ref_audio_input,
131
+ self.ref_text_input,
132
+ self.target_text_input,
133
+ self.output_input,
134
+ ])
135
+
136
+ self.synthesize_button = QPushButton("合成")
137
+ self.synthesize_button.clicked.connect(self.synthesize)
138
+
139
+ self.clear_output_button = QPushButton("清空输出")
140
+ self.clear_output_button.clicked.connect(self.clear_output)
141
+
142
+ self.status_bar = QStatusBar()
143
+
144
+ main_layout = QVBoxLayout()
145
+
146
+ input_layout = QGridLayout(self)
147
+ input_layout.setSpacing(10)
148
+
149
+ input_layout.addWidget(license_label, 0, 0, 1, 3)
150
+
151
+ input_layout.addWidget(self.GPT_model_label, 1, 0)
152
+ input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2)
153
+ input_layout.addWidget(self.GPT_model_button, 2, 2)
154
+
155
+ input_layout.addWidget(self.SoVITS_model_label, 3, 0)
156
+ input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2)
157
+ input_layout.addWidget(self.SoVITS_model_button, 4, 2)
158
+
159
+ input_layout.addWidget(self.ref_audio_label, 5, 0)
160
+ input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
161
+ input_layout.addWidget(self.ref_audio_button, 6, 2)
162
+
163
+ input_layout.addWidget(self.ref_language_label, 7, 0)
164
+ input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1)
165
+ input_layout.addWidget(self.ref_text_label, 9, 0)
166
+ input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
167
+ input_layout.addWidget(self.ref_text_button, 10, 2)
168
+
169
+ input_layout.addWidget(self.target_language_label, 11, 0)
170
+ input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1)
171
+ input_layout.addWidget(self.target_text_label, 13, 0)
172
+ input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
173
+ input_layout.addWidget(self.target_text_button, 14, 2)
174
+
175
+ input_layout.addWidget(self.output_label, 15, 0)
176
+ input_layout.addWidget(self.output_input, 16, 0, 1, 2)
177
+ input_layout.addWidget(self.output_button, 16, 2)
178
+
179
+ main_layout.addLayout(input_layout)
180
+
181
+ output_layout = QVBoxLayout()
182
+ output_layout.addWidget(self.output_text)
183
+ main_layout.addLayout(output_layout)
184
+
185
+ main_layout.addWidget(self.synthesize_button)
186
+
187
+ main_layout.addWidget(self.clear_output_button)
188
+
189
+ main_layout.addWidget(self.status_bar)
190
+
191
+ self.central_widget = QWidget()
192
+ self.central_widget.setLayout(main_layout)
193
+ self.setCentralWidget(self.central_widget)
194
+
195
+ def dragEnterEvent(self, event):
196
+ if event.mimeData().hasUrls():
197
+ event.acceptProposedAction()
198
+
199
+ def dropEvent(self, event):
200
+ if event.mimeData().hasUrls():
201
+ file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
202
+ if len(file_paths) == 1:
203
+ self.update_ref_audio(file_paths[0])
204
+ else:
205
+ self.update_ref_audio(", ".join(file_paths))
206
+
207
+ def add_drag_drop_events(self, widgets):
208
+ for widget in widgets:
209
+ widget.setAcceptDrops(True)
210
+ widget.installEventFilter(self)
211
+
212
+ def eventFilter(self, obj, event):
213
+ if event.type() in (QEvent.DragEnter, QEvent.Drop):
214
+ mime_data = event.mimeData()
215
+ if mime_data.hasUrls():
216
+ event.acceptProposedAction()
217
+
218
+ return super().eventFilter(obj, event)
219
+
220
+ def select_GPT_model(self):
221
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)")
222
+ if file_path:
223
+ self.GPT_model_input.setText(file_path)
224
+
225
+ def select_SoVITS_model(self):
226
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)")
227
+ if file_path:
228
+ self.SoVITS_model_input.setText(file_path)
229
+
230
+ def select_ref_audio(self):
231
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)")
232
+ if file_path:
233
+ self.update_ref_audio(file_path)
234
+
235
+ def upload_ref_text(self):
236
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
237
+ if file_path:
238
+ with open(file_path, 'r', encoding='utf-8') as file:
239
+ content = file.read()
240
+ self.ref_text_input.setText(content)
241
+
242
+ def upload_target_text(self):
243
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
244
+ if file_path:
245
+ with open(file_path, 'r', encoding='utf-8') as file:
246
+ content = file.read()
247
+ self.target_text_input.setText(content)
248
+
249
+ def select_output_path(self):
250
+ options = QFileDialog.Options()
251
+ options |= QFileDialog.DontUseNativeDialog
252
+ options |= QFileDialog.ShowDirsOnly
253
+
254
+ folder_dialog = QFileDialog()
255
+ folder_dialog.setOptions(options)
256
+ folder_dialog.setFileMode(QFileDialog.Directory)
257
+
258
+ if folder_dialog.exec_():
259
+ folder_path = folder_dialog.selectedFiles()[0]
260
+ self.output_input.setText(folder_path)
261
+
262
+ def update_ref_audio(self, file_path):
263
+ self.ref_audio_input.setText(file_path)
264
+
265
+ def clear_output(self):
266
+ self.output_text.clear()
267
+
268
+ def synthesize(self):
269
+ GPT_model_path = self.GPT_model_input.text()
270
+ SoVITS_model_path = self.SoVITS_model_input.text()
271
+ ref_audio_path = self.ref_audio_input.text()
272
+ language_combobox = self.ref_language_combobox.currentText()
273
+ language_combobox = i18n(language_combobox)
274
+ ref_text = self.ref_text_input.text()
275
+ target_language_combobox = self.target_language_combobox.currentText()
276
+ target_language_combobox = i18n(target_language_combobox)
277
+ target_text = self.target_text_input.text()
278
+ output_path = self.output_input.text()
279
+
280
+ if GPT_model_path != self.GPT_Path:
281
+ change_gpt_weights(gpt_path=GPT_model_path)
282
+ self.GPT_Path = GPT_model_path
283
+ if SoVITS_model_path != self.SoVITS_Path:
284
+ change_sovits_weights(sovits_path=SoVITS_model_path)
285
+ self.SoVITS_Path = SoVITS_model_path
286
+
287
+ synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
288
+ prompt_text=ref_text,
289
+ prompt_language=language_combobox,
290
+ text=target_text,
291
+ text_language=target_language_combobox)
292
+
293
+ result_list = list(synthesis_result)
294
+
295
+ if result_list:
296
+ last_sampling_rate, last_audio_data = result_list[-1]
297
+ output_wav_path = os.path.join(output_path, "output.wav")
298
+ sf.write(output_wav_path, last_audio_data, last_sampling_rate)
299
+
300
+ result = "Audio saved to " + output_wav_path
301
+
302
+ self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
303
+ self.output_text.append("处理结果:\n" + result)
304
+
305
+
306
+ if __name__ == '__main__':
307
+ app = QApplication(sys.argv)
308
+ mainWin = GPTSoVITSGUI()
309
+ mainWin.show()
310
+ sys.exit(app.exec_())
inference_webui.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 按中英混合识别
3
+ 按日英混合识别
4
+ 多语种启动切分识别语种
5
+ 全部按中文识别
6
+ 全部按英文识别
7
+ 全部按日文识别
8
+ '''
9
+ import json
10
+ import logging
11
+ import os
12
+ import re
13
+ import sys
14
+ import traceback
15
+
16
+
17
+ logging.getLogger("markdown_it").setLevel(logging.ERROR)
18
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
19
+ logging.getLogger("httpcore").setLevel(logging.ERROR)
20
+ logging.getLogger("httpx").setLevel(logging.ERROR)
21
+ logging.getLogger("asyncio").setLevel(logging.ERROR)
22
+ logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
23
+ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
24
+ logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
25
+ import LangSegment.LangSegment as LangSegment
26
+ import torch
27
+
28
+ try:
29
+ import gradio.analytics as analytics
30
+ analytics.version_check = lambda:None
31
+ except:...
32
+
33
+ version=os.environ.get("version","v2")
34
+ pretrained_sovits_name=["moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "moyoyo_tts/pretrained_models/s2G488k.pth"]
35
+ pretrained_gpt_name=["moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
36
+
37
+ _ =[[],[]]
38
+ for i in range(2):
39
+ if os.path.exists(pretrained_gpt_name[i]):
40
+ _[0].append(pretrained_gpt_name[i])
41
+ if os.path.exists(pretrained_sovits_name[i]):
42
+ _[-1].append(pretrained_sovits_name[i])
43
+ pretrained_gpt_name,pretrained_sovits_name = _
44
+
45
+
46
+
47
+ if os.path.exists(f"./weight.json"):
48
+ pass
49
+ else:
50
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
51
+
52
+ with open(f"./weight.json", 'r', encoding="utf-8") as file:
53
+ weight_data = file.read()
54
+ weight_data=json.loads(weight_data)
55
+ gpt_path = os.environ.get(
56
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
57
+ sovits_path = os.environ.get(
58
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
59
+ if isinstance(gpt_path,list):
60
+ gpt_path = gpt_path[0]
61
+ if isinstance(sovits_path,list):
62
+ sovits_path = sovits_path[0]
63
+
64
+ # gpt_path = os.environ.get(
65
+ # "gpt_path", pretrained_gpt_name
66
+ # )
67
+ # sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
68
+ cnhubert_base_path = os.environ.get(
69
+ "cnhubert_base_path", "moyoyo_tts/pretrained_models/chinese-hubert-base"
70
+ )
71
+ bert_path = os.environ.get(
72
+ "bert_path", "moyoyo_tts/pretrained_models/chinese-roberta-wwm-ext-large"
73
+ )
74
+ infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
75
+ infer_ttswebui = int(infer_ttswebui)
76
+ is_share = os.environ.get("is_share", "False")
77
+ is_share = eval(is_share)
78
+ if "_CUDA_VISIBLE_DEVICES" in os.environ:
79
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
80
+ is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
81
+ punctuation = set(['!', '?', '…', ',', '.', '-'," "])
82
+ import gradio as gr
83
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
84
+ import numpy as np
85
+ import librosa
86
+ from feature_extractor import cnhubert
87
+
88
+ cnhubert.cnhubert_base_path = cnhubert_base_path
89
+
90
+ from module.models import SynthesizerTrn
91
+ from AR.models.t2s_lightning_module import Text2SemanticLightningModule
92
+ from text import cleaned_text_to_sequence
93
+ from text.cleaner import clean_text
94
+ from time import time as ttime
95
+ from module.mel_processing import spectrogram_torch
96
+ from tools.my_utils import load_audio
97
+ from tools.i18n.i18n import I18nAuto, scan_language_list
98
+
99
+ language=os.environ.get("language","Auto")
100
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
101
+ i18n = I18nAuto(language=language)
102
+
103
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
104
+
105
+ if torch.cuda.is_available():
106
+ device = "cuda"
107
+ else:
108
+ device = "cpu"
109
+
110
+ dict_language_v1 = {
111
+ i18n("中文"): "all_zh",#全部按中文识别
112
+ i18n("英文"): "en",#全部按英文识别#######不变
113
+ i18n("日文"): "all_ja",#全部按日文识别
114
+ i18n("中英混合"): "zh",#按中英混合识别####不变
115
+ i18n("日英混合"): "ja",#按日英混合识别####不变
116
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
117
+ }
118
+ dict_language_v2 = {
119
+ i18n("中文"): "all_zh",#全部按中文识别
120
+ i18n("英文"): "en",#全部按英文识别#######不变
121
+ i18n("日文"): "all_ja",#全部按日文识别
122
+ i18n("粤语"): "all_yue",#全部按中文识别
123
+ i18n("韩文"): "all_ko",#全部按韩文识别
124
+ i18n("中英混合"): "zh",#按中英混合识别####不变
125
+ i18n("日英混合"): "ja",#按日英混合识别####不变
126
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
127
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
128
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
129
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
130
+ }
131
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
132
+
133
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
134
+ bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
135
+ if is_half == True:
136
+ bert_model = bert_model.half().to(device)
137
+ else:
138
+ bert_model = bert_model.to(device)
139
+
140
+
141
+ def get_bert_feature(text, word2ph):
142
+ with torch.no_grad():
143
+ inputs = tokenizer(text, return_tensors="pt")
144
+ for i in inputs:
145
+ inputs[i] = inputs[i].to(device)
146
+ res = bert_model(**inputs, output_hidden_states=True)
147
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
148
+ assert len(word2ph) == len(text)
149
+ phone_level_feature = []
150
+ for i in range(len(word2ph)):
151
+ repeat_feature = res[i].repeat(word2ph[i], 1)
152
+ phone_level_feature.append(repeat_feature)
153
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
154
+ return phone_level_feature.T
155
+
156
+
157
+ class DictToAttrRecursive(dict):
158
+ def __init__(self, input_dict):
159
+ super().__init__(input_dict)
160
+ for key, value in input_dict.items():
161
+ if isinstance(value, dict):
162
+ value = DictToAttrRecursive(value)
163
+ self[key] = value
164
+ setattr(self, key, value)
165
+
166
+ def __getattr__(self, item):
167
+ try:
168
+ return self[item]
169
+ except KeyError:
170
+ raise AttributeError(f"Attribute {item} not found")
171
+
172
+ def __setattr__(self, key, value):
173
+ if isinstance(value, dict):
174
+ value = DictToAttrRecursive(value)
175
+ super(DictToAttrRecursive, self).__setitem__(key, value)
176
+ super().__setattr__(key, value)
177
+
178
+ def __delattr__(self, item):
179
+ try:
180
+ del self[item]
181
+ except KeyError:
182
+ raise AttributeError(f"Attribute {item} not found")
183
+
184
+
185
+ ssl_model = cnhubert.get_model()
186
+ if is_half == True:
187
+ ssl_model = ssl_model.half().to(device)
188
+ else:
189
+ ssl_model = ssl_model.to(device)
190
+
191
+
192
+ def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
193
+ global vq_model, hps, version, dict_language
194
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
195
+ hps = dict_s2["config"]
196
+ hps = DictToAttrRecursive(hps)
197
+ hps.model.semantic_frame_rate = "25hz"
198
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
199
+ hps.model.version = "v1"
200
+ else:
201
+ hps.model.version = "v2"
202
+ version = hps.model.version
203
+ # print("sovits版本:",hps.model.version)
204
+ vq_model = SynthesizerTrn(
205
+ hps.data.filter_length // 2 + 1,
206
+ hps.train.segment_size // hps.data.hop_length,
207
+ n_speakers=hps.data.n_speakers,
208
+ **hps.model
209
+ )
210
+ if ("pretrained" not in sovits_path):
211
+ del vq_model.enc_q
212
+ if is_half == True:
213
+ vq_model = vq_model.half().to(device)
214
+ else:
215
+ vq_model = vq_model.to(device)
216
+ vq_model.eval()
217
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
218
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
219
+ with open("./weight.json")as f:
220
+ data=f.read()
221
+ data=json.loads(data)
222
+ data["SoVITS"][version]=sovits_path
223
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
224
+ if prompt_language is not None and text_language is not None:
225
+ if prompt_language in list(dict_language.keys()):
226
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
227
+ else:
228
+ prompt_text_update = {'__type__':'update', 'value':''}
229
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
230
+ if text_language in list(dict_language.keys()):
231
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
232
+ else:
233
+ text_update = {'__type__':'update', 'value':''}
234
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
235
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
236
+
237
+
238
+
239
+ change_sovits_weights(sovits_path)
240
+
241
+
242
+ def change_gpt_weights(gpt_path):
243
+ global hz, max_sec, t2s_model, config
244
+ hz = 50
245
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
246
+ config = dict_s1["config"]
247
+ max_sec = config["data"]["max_sec"]
248
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
249
+ t2s_model.load_state_dict(dict_s1["weight"])
250
+ if is_half == True:
251
+ t2s_model = t2s_model.half()
252
+ t2s_model = t2s_model.to(device)
253
+ t2s_model.eval()
254
+ total = sum([param.nelement() for param in t2s_model.parameters()])
255
+ print("Number of parameter: %.2fM" % (total / 1e6))
256
+ with open("./weight.json")as f:
257
+ data=f.read()
258
+ data=json.loads(data)
259
+ data["GPT"][version]=gpt_path
260
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
261
+
262
+
263
+ change_gpt_weights(gpt_path)
264
+
265
+
266
+ def get_spepc(hps, filename):
267
+ audio = load_audio(filename, int(hps.data.sampling_rate))
268
+ audio = torch.FloatTensor(audio)
269
+ maxx=audio.abs().max()
270
+ if(maxx>1):audio/=min(2,maxx)
271
+ audio_norm = audio
272
+ audio_norm = audio_norm.unsqueeze(0)
273
+ spec = spectrogram_torch(
274
+ audio_norm,
275
+ hps.data.filter_length,
276
+ hps.data.sampling_rate,
277
+ hps.data.hop_length,
278
+ hps.data.win_length,
279
+ center=False,
280
+ )
281
+ return spec
282
+
283
+ def clean_text_inf(text, language, version):
284
+ phones, word2ph, norm_text = clean_text(text, language, version)
285
+ phones = cleaned_text_to_sequence(phones, version)
286
+ return phones, word2ph, norm_text
287
+
288
+ dtype=torch.float16 if is_half == True else torch.float32
289
+ def get_bert_inf(phones, word2ph, norm_text, language):
290
+ language=language.replace("all_","")
291
+ if language == "zh":
292
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
293
+ else:
294
+ bert = torch.zeros(
295
+ (1024, len(phones)),
296
+ dtype=torch.float16 if is_half == True else torch.float32,
297
+ ).to(device)
298
+
299
+ return bert
300
+
301
+
302
+ splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
303
+
304
+
305
+ def get_first(text):
306
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
307
+ text = re.split(pattern, text)[0].strip()
308
+ return text
309
+
310
+ from text import chinese
311
+ def get_phones_and_bert(text,language,version,final=False):
312
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
313
+ language = language.replace("all_","")
314
+ if language == "en":
315
+ LangSegment.setfilters(["en"])
316
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
317
+ else:
318
+ # 因无法区别中日韩文汉字,以用户输入为准
319
+ formattext = text
320
+ while " " in formattext:
321
+ formattext = formattext.replace(" ", " ")
322
+ if language == "zh":
323
+ if re.search(r'[A-Za-z]', formattext):
324
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
325
+ formattext = chinese.mix_text_normalize(formattext)
326
+ return get_phones_and_bert(formattext,"zh",version)
327
+ else:
328
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
329
+ bert = get_bert_feature(norm_text, word2ph).to(device)
330
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
331
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
332
+ formattext = chinese.mix_text_normalize(formattext)
333
+ return get_phones_and_bert(formattext,"yue",version)
334
+ else:
335
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
336
+ bert = torch.zeros(
337
+ (1024, len(phones)),
338
+ dtype=torch.float16 if is_half == True else torch.float32,
339
+ ).to(device)
340
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
341
+ textlist=[]
342
+ langlist=[]
343
+ LangSegment.setfilters(["zh","ja","en","ko"])
344
+ if language == "auto":
345
+ for tmp in LangSegment.getTexts(text):
346
+ langlist.append(tmp["lang"])
347
+ textlist.append(tmp["text"])
348
+ elif language == "auto_yue":
349
+ for tmp in LangSegment.getTexts(text):
350
+ if tmp["lang"] == "zh":
351
+ tmp["lang"] = "yue"
352
+ langlist.append(tmp["lang"])
353
+ textlist.append(tmp["text"])
354
+ else:
355
+ for tmp in LangSegment.getTexts(text):
356
+ if tmp["lang"] == "en":
357
+ langlist.append(tmp["lang"])
358
+ else:
359
+ # 因无法区别中日韩文汉字,以用户输入为准
360
+ langlist.append(language)
361
+ textlist.append(tmp["text"])
362
+ print(textlist)
363
+ print(langlist)
364
+ phones_list = []
365
+ bert_list = []
366
+ norm_text_list = []
367
+ for i in range(len(textlist)):
368
+ lang = langlist[i]
369
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
370
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
371
+ phones_list.append(phones)
372
+ norm_text_list.append(norm_text)
373
+ bert_list.append(bert)
374
+ bert = torch.cat(bert_list, dim=1)
375
+ phones = sum(phones_list, [])
376
+ norm_text = ''.join(norm_text_list)
377
+
378
+ if not final and len(phones) < 6:
379
+ return get_phones_and_bert("." + text,language,version,final=True)
380
+
381
+ return phones,bert.to(dtype),norm_text
382
+
383
+
384
+ def merge_short_text_in_array(texts, threshold):
385
+ if (len(texts)) < 2:
386
+ return texts
387
+ result = []
388
+ text = ""
389
+ for ele in texts:
390
+ text += ele
391
+ if len(text) >= threshold:
392
+ result.append(text)
393
+ text = ""
394
+ if (len(text) > 0):
395
+ if len(result) == 0:
396
+ result.append(text)
397
+ else:
398
+ result[len(result) - 1] += text
399
+ return result
400
+
401
+ ##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
402
+ # cache_tokens={}#暂未实现清理机制
403
+ cache= {}
404
+ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free
405
+ =False,speed=1,if_freeze=False,inp_refs=None):
406
+ global cache
407
+ if ref_wav_path:pass
408
+ else:gr.Warning(i18n('请上传参考音频'))
409
+ if text:pass
410
+ else:gr.Warning(i18n('请填入推理文本'))
411
+ t = []
412
+ if prompt_text is None or len(prompt_text) == 0:
413
+ ref_free = True
414
+ t0 = ttime()
415
+ prompt_language = dict_language[prompt_language]
416
+ text_language = dict_language[text_language]
417
+
418
+
419
+ if not ref_free:
420
+ prompt_text = prompt_text.strip("\n")
421
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
422
+ print(i18n("实际输入的参考文本:"), prompt_text)
423
+ text = text.strip("\n")
424
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
425
+
426
+ print(i18n("实际输入的目标文本:"), text)
427
+ zero_wav = np.zeros(
428
+ int(hps.data.sampling_rate * 0.3),
429
+ dtype=np.float16 if is_half == True else np.float32,
430
+ )
431
+ if not ref_free:
432
+ with torch.no_grad():
433
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
434
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
435
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
436
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
437
+ wav16k = torch.from_numpy(wav16k)
438
+ zero_wav_torch = torch.from_numpy(zero_wav)
439
+ if is_half == True:
440
+ wav16k = wav16k.half().to(device)
441
+ zero_wav_torch = zero_wav_torch.half().to(device)
442
+ else:
443
+ wav16k = wav16k.to(device)
444
+ zero_wav_torch = zero_wav_torch.to(device)
445
+ wav16k = torch.cat([wav16k, zero_wav_torch])
446
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
447
+ "last_hidden_state"
448
+ ].transpose(
449
+ 1, 2
450
+ ) # .float()
451
+ codes = vq_model.extract_latent(ssl_content)
452
+ prompt_semantic = codes[0, 0]
453
+ prompt = prompt_semantic.unsqueeze(0).to(device)
454
+
455
+ t1 = ttime()
456
+ t.append(t1-t0)
457
+
458
+ if (how_to_cut == i18n("凑四句一切")):
459
+ text = cut1(text)
460
+ elif (how_to_cut == i18n("凑50字一切")):
461
+ text = cut2(text)
462
+ elif (how_to_cut == i18n("按中文句号。切")):
463
+ text = cut3(text)
464
+ elif (how_to_cut == i18n("按英文句号.切")):
465
+ text = cut4(text)
466
+ elif (how_to_cut == i18n("按标点符号切")):
467
+ text = cut5(text)
468
+ while "\n\n" in text:
469
+ text = text.replace("\n\n", "\n")
470
+ print(i18n("实际输入的目标文本(切句后):"), text)
471
+ texts = text.split("\n")
472
+ texts = process_text(texts)
473
+ texts = merge_short_text_in_array(texts, 5)
474
+ audio_opt = []
475
+ if not ref_free:
476
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
477
+
478
+ for i_text,text in enumerate(texts):
479
+ # 解决输入目标文本的空行导致报错的问题
480
+ if (len(text.strip()) == 0):
481
+ continue
482
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
483
+ print(i18n("实际输入的目标文本(每句):"), text)
484
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
485
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
486
+ if not ref_free:
487
+ bert = torch.cat([bert1, bert2], 1)
488
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
489
+ else:
490
+ bert = bert2
491
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
492
+
493
+ bert = bert.to(device).unsqueeze(0)
494
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
495
+
496
+ t2 = ttime()
497
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
498
+ # print(cache.keys(),if_freeze)
499
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
500
+ else:
501
+ with torch.no_grad():
502
+ pred_semantic, idx = t2s_model.model.infer_panel(
503
+ all_phoneme_ids,
504
+ all_phoneme_len,
505
+ None if ref_free else prompt,
506
+ bert,
507
+ # prompt_phone_len=ph_offset,
508
+ top_k=top_k,
509
+ top_p=top_p,
510
+ temperature=temperature,
511
+ early_stop_num=hz * max_sec,
512
+ )
513
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
514
+ cache[i_text]=pred_semantic
515
+ t3 = ttime()
516
+ refers=[]
517
+ if(inp_refs):
518
+ for path in inp_refs:
519
+ try:
520
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
521
+ refers.append(refer)
522
+ except:
523
+ traceback.print_exc()
524
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
525
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
526
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
527
+ if max_audio>1:audio/=max_audio
528
+ audio_opt.append(audio)
529
+ audio_opt.append(zero_wav)
530
+ t4 = ttime()
531
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
532
+ t1 = ttime()
533
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
534
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
535
+ )
536
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
537
+ np.int16
538
+ )
539
+
540
+
541
+ def split(todo_text):
542
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
543
+ if todo_text[-1] not in splits:
544
+ todo_text += "。"
545
+ i_split_head = i_split_tail = 0
546
+ len_text = len(todo_text)
547
+ todo_texts = []
548
+ while 1:
549
+ if i_split_head >= len_text:
550
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
551
+ if todo_text[i_split_head] in splits:
552
+ i_split_head += 1
553
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
554
+ i_split_tail = i_split_head
555
+ else:
556
+ i_split_head += 1
557
+ return todo_texts
558
+
559
+
560
+ def cut1(inp):
561
+ inp = inp.strip("\n")
562
+ inps = split(inp)
563
+ split_idx = list(range(0, len(inps), 4))
564
+ split_idx[-1] = None
565
+ if len(split_idx) > 1:
566
+ opts = []
567
+ for idx in range(len(split_idx) - 1):
568
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
569
+ else:
570
+ opts = [inp]
571
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
572
+ return "\n".join(opts)
573
+
574
+
575
+ def cut2(inp):
576
+ inp = inp.strip("\n")
577
+ inps = split(inp)
578
+ if len(inps) < 2:
579
+ return inp
580
+ opts = []
581
+ summ = 0
582
+ tmp_str = ""
583
+ for i in range(len(inps)):
584
+ summ += len(inps[i])
585
+ tmp_str += inps[i]
586
+ if summ > 50:
587
+ summ = 0
588
+ opts.append(tmp_str)
589
+ tmp_str = ""
590
+ if tmp_str != "":
591
+ opts.append(tmp_str)
592
+ # print(opts)
593
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
594
+ opts[-2] = opts[-2] + opts[-1]
595
+ opts = opts[:-1]
596
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
597
+ return "\n".join(opts)
598
+
599
+
600
+ def cut3(inp):
601
+ inp = inp.strip("\n")
602
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
603
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
604
+ return "\n".join(opts)
605
+
606
+ def cut4(inp):
607
+ inp = inp.strip("\n")
608
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
609
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
610
+ return "\n".join(opts)
611
+
612
+
613
+ # contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
614
+ def cut5(inp):
615
+ inp = inp.strip("\n")
616
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
617
+ mergeitems = []
618
+ items = []
619
+
620
+ for i, char in enumerate(inp):
621
+ if char in punds:
622
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
623
+ items.append(char)
624
+ else:
625
+ items.append(char)
626
+ mergeitems.append("".join(items))
627
+ items = []
628
+ else:
629
+ items.append(char)
630
+
631
+ if items:
632
+ mergeitems.append("".join(items))
633
+
634
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
635
+ return "\n".join(opt)
636
+
637
+
638
+ def custom_sort_key(s):
639
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
640
+ parts = re.split('(\d+)', s)
641
+ # 将数字部分转换为整数,非数字部分保持不变
642
+ parts = [int(part) if part.isdigit() else part for part in parts]
643
+ return parts
644
+
645
+ def process_text(texts):
646
+ _text=[]
647
+ if all(text in [None, " ", "\n",""] for text in texts):
648
+ raise ValueError(i18n("请输入有效文本"))
649
+ for text in texts:
650
+ if text in [None, " ", ""]:
651
+ pass
652
+ else:
653
+ _text.append(text)
654
+ return _text
655
+
656
+
657
+ def change_choices():
658
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
659
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
660
+
661
+
662
+ # SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
663
+ # GPT_weight_root=["GPT_weights_v2","GPT_weights"]
664
+ SoVITS_weight_root=["checkpoints"]
665
+ GPT_weight_root=["checkpoints"]
666
+ for path in SoVITS_weight_root+GPT_weight_root:
667
+ os.makedirs(path,exist_ok=True)
668
+
669
+
670
+ def get_weights_names(GPT_weight_root, SoVITS_weight_root):
671
+ SoVITS_names = [i for i in pretrained_sovits_name]
672
+ for path in SoVITS_weight_root:
673
+ for name in os.listdir(path):
674
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
675
+ GPT_names = [i for i in pretrained_gpt_name]
676
+ for path in GPT_weight_root:
677
+ for name in os.listdir(path):
678
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
679
+ return SoVITS_names, GPT_names
680
+
681
+
682
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
683
+
684
+ def html_center(text, label='p'):
685
+ return f"""<div style="text-align: center; margin: 100; padding: 50;">
686
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
687
+ </div>"""
688
+
689
+ def html_left(text, label='p'):
690
+ return f"""<div style="text-align: left; margin: 0; padding: 0;">
691
+ <{label} style="margin: 0; padding: 0;">{text}</{label}>
692
+ </div>"""
693
+
694
+
695
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
696
+ gr.Markdown(
697
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
698
+ )
699
+ with gr.Group():
700
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
701
+ with gr.Row():
702
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
703
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
704
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
705
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
706
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
707
+ with gr.Row():
708
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
709
+ with gr.Column(scale=13):
710
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True,scale=1)
711
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。<br>开启后无视填写的参考文本。")))
712
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
713
+ with gr.Column(scale=14):
714
+ prompt_language = gr.Dropdown(
715
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
716
+ )
717
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")
718
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
719
+ with gr.Row():
720
+ with gr.Column(scale=13):
721
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
722
+ with gr.Column(scale=7):
723
+ text_language = gr.Dropdown(
724
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
725
+ )
726
+ how_to_cut = gr.Dropdown(
727
+ label=i18n("怎么切"),
728
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
729
+ value=i18n("凑四句一切"),
730
+ interactive=True, scale=1
731
+ )
732
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
733
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
734
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
735
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
736
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=15,interactive=True, scale=1)
737
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True, scale=1)
738
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True, scale=1)
739
+ # with gr.Column():
740
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
741
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
742
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
743
+ with gr.Row():
744
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
745
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
746
+
747
+ inference_button.click(
748
+ get_tts_wav,
749
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs],
750
+ [output],
751
+ )
752
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
753
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
754
+
755
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
756
+ # with gr.Row():
757
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
758
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
759
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
760
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
761
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
762
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
763
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
764
+ # button1.click(cut1, [text_inp], [text_opt])
765
+ # button2.click(cut2, [text_inp], [text_opt])
766
+ # button3.click(cut3, [text_inp], [text_opt])
767
+ # button4.click(cut4, [text_inp], [text_opt])
768
+ # button5.click(cut5, [text_inp], [text_opt])
769
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
770
+
771
+ if __name__ == '__main__':
772
+ app.queue().launch(#concurrency_count=511, max_size=1022
773
+ server_name="0.0.0.0",
774
+ inbrowser=True,
775
+ share=is_share,
776
+ server_port=infer_ttswebui,
777
+ quiet=True,
778
+ )
inference_webui_fast.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 按中英混合识别
3
+ 按日英混合识别
4
+ 多语种启动切分识别语种
5
+ 全部按中文识别
6
+ 全部按英文识别
7
+ 全部按日文识别
8
+ '''
9
+ import random
10
+ import os, re, logging
11
+ import sys
12
+ now_dir = os.getcwd()
13
+ sys.path.append(now_dir)
14
+ sys.path.append("%s/moyoyo_tts" % (now_dir))
15
+
16
+ logging.getLogger("markdown_it").setLevel(logging.ERROR)
17
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
18
+ logging.getLogger("httpcore").setLevel(logging.ERROR)
19
+ logging.getLogger("httpx").setLevel(logging.ERROR)
20
+ logging.getLogger("asyncio").setLevel(logging.ERROR)
21
+ logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
22
+ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
23
+ import pdb
24
+ import torch
25
+
26
+ try:
27
+ import gradio.analytics as analytics
28
+ analytics.version_check = lambda:None
29
+ except:...
30
+
31
+
32
+ infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
33
+ infer_ttswebui = int(infer_ttswebui)
34
+ is_share = os.environ.get("is_share", "False")
35
+ is_share = eval(is_share)
36
+ if "_CUDA_VISIBLE_DEVICES" in os.environ:
37
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
38
+
39
+ is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
40
+ gpt_path = os.environ.get("gpt_path", None)
41
+ sovits_path = os.environ.get("sovits_path", None)
42
+ cnhubert_base_path = os.environ.get("cnhubert_base_path", None)
43
+ bert_path = os.environ.get("bert_path", None)
44
+ version=os.environ.get("version","v2")
45
+
46
+ import gradio as gr
47
+ from TTS_infer_pack.TTS import TTS, TTS_Config
48
+ from TTS_infer_pack.text_segmentation_method import get_method
49
+ from tools.i18n.i18n import I18nAuto, scan_language_list
50
+
51
+ language=os.environ.get("language","Auto")
52
+ language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
53
+ i18n = I18nAuto(language=language)
54
+
55
+
56
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
57
+
58
+ if torch.cuda.is_available():
59
+ device = "cuda"
60
+ # elif torch.backends.mps.is_available():
61
+ # device = "mps"
62
+ else:
63
+ device = "cpu"
64
+
65
+ dict_language_v1 = {
66
+ i18n("中文"): "all_zh",#全部按中文识别
67
+ i18n("英文"): "en",#全部按英文识别#######不变
68
+ i18n("日文"): "all_ja",#全部按日文识别
69
+ i18n("中英混合"): "zh",#按中英混合识别####不变
70
+ i18n("日英混合"): "ja",#按日英混合识别####不变
71
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
72
+ }
73
+ dict_language_v2 = {
74
+ i18n("中文"): "all_zh",#全部按中文识别
75
+ i18n("英文"): "en",#全部按英文识别#######不变
76
+ i18n("日文"): "all_ja",#全部按日文识别
77
+ i18n("粤语"): "all_yue",#全部按中文识别
78
+ i18n("韩文"): "all_ko",#全部按韩文识别
79
+ i18n("中英混合"): "zh",#按中英混合识别####不变
80
+ i18n("日英混合"): "ja",#按日英混合识别####不变
81
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
82
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
83
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
84
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
85
+ }
86
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
87
+
88
+ cut_method = {
89
+ i18n("不切"):"cut0",
90
+ i18n("凑四句一切"): "cut1",
91
+ i18n("凑50字一切"): "cut2",
92
+ i18n("按中文句号。切"): "cut3",
93
+ i18n("按英文句号.切"): "cut4",
94
+ i18n("按标点符号切"): "cut5",
95
+ }
96
+
97
+ tts_config = TTS_Config("moyoyo_tts/configs/tts_infer.yaml")
98
+ tts_config.device = device
99
+ tts_config.is_half = is_half
100
+ tts_config.version = version
101
+ if gpt_path is not None:
102
+ tts_config.t2s_weights_path = gpt_path
103
+ if sovits_path is not None:
104
+ tts_config.vits_weights_path = sovits_path
105
+ if cnhubert_base_path is not None:
106
+ tts_config.cnhuhbert_base_path = cnhubert_base_path
107
+ if bert_path is not None:
108
+ tts_config.bert_base_path = bert_path
109
+
110
+ print(tts_config)
111
+ tts_pipeline = TTS(tts_config)
112
+ gpt_path = tts_config.t2s_weights_path
113
+ sovits_path = tts_config.vits_weights_path
114
+ version = tts_config.version
115
+
116
+ def inference(text, text_lang,
117
+ ref_audio_path,
118
+ aux_ref_audio_paths,
119
+ prompt_text,
120
+ prompt_lang, top_k,
121
+ top_p, temperature,
122
+ text_split_method, batch_size,
123
+ speed_factor, ref_text_free,
124
+ split_bucket,fragment_interval,
125
+ seed, keep_random, parallel_infer,
126
+ repetition_penalty
127
+ ):
128
+
129
+ seed = -1 if keep_random else seed
130
+ actual_seed = seed if seed not in [-1, "", None] else random.randrange(1 << 32)
131
+ inputs={
132
+ "text": text,
133
+ "text_lang": dict_language[text_lang],
134
+ "ref_audio_path": ref_audio_path,
135
+ "aux_ref_audio_paths": [item.name for item in aux_ref_audio_paths] if aux_ref_audio_paths is not None else [],
136
+ "prompt_text": prompt_text if not ref_text_free else "",
137
+ "prompt_lang": dict_language[prompt_lang],
138
+ "top_k": top_k,
139
+ "top_p": top_p,
140
+ "temperature": temperature,
141
+ "text_split_method": cut_method[text_split_method],
142
+ "batch_size":int(batch_size),
143
+ "speed_factor":float(speed_factor),
144
+ "split_bucket":split_bucket,
145
+ "return_fragment":False,
146
+ "fragment_interval":fragment_interval,
147
+ "seed":actual_seed,
148
+ "parallel_infer": parallel_infer,
149
+ "repetition_penalty": repetition_penalty,
150
+ }
151
+ for item in tts_pipeline.run(inputs):
152
+ yield item, actual_seed
153
+
154
+ def custom_sort_key(s):
155
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
156
+ parts = re.split('(\d+)', s)
157
+ # 将数字部分转换为整数,非数字部分保持不变
158
+ parts = [int(part) if part.isdigit() else part for part in parts]
159
+ return parts
160
+
161
+
162
+ def change_choices():
163
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
164
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
165
+
166
+
167
+ pretrained_sovits_name=["moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "moyoyo_tts/pretrained_models/s2G488k.pth"]
168
+ pretrained_gpt_name=["moyoyo_tts/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "moyoyo_tts/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
169
+ _ =[[],[]]
170
+ for i in range(2):
171
+ if os.path.exists(pretrained_gpt_name[i]):
172
+ _[0].append(pretrained_gpt_name[i])
173
+ if os.path.exists(pretrained_sovits_name[i]):
174
+ _[-1].append(pretrained_sovits_name[i])
175
+ pretrained_gpt_name,pretrained_sovits_name = _
176
+
177
+ SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
178
+ GPT_weight_root=["GPT_weights_v2","GPT_weights"]
179
+ for path in SoVITS_weight_root+GPT_weight_root:
180
+ os.makedirs(path,exist_ok=True)
181
+
182
+ def get_weights_names(GPT_weight_root, SoVITS_weight_root):
183
+ SoVITS_names = [i for i in pretrained_sovits_name]
184
+ for path in SoVITS_weight_root:
185
+ for name in os.listdir(path):
186
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
187
+ GPT_names = [i for i in pretrained_gpt_name]
188
+ for path in GPT_weight_root:
189
+ for name in os.listdir(path):
190
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
191
+ return SoVITS_names, GPT_names
192
+
193
+
194
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
195
+
196
+
197
+
198
+ def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
199
+ tts_pipeline.init_vits_weights(sovits_path)
200
+ global version, dict_language
201
+ dict_language = dict_language_v1 if tts_pipeline.configs.version =='v1' else dict_language_v2
202
+ if prompt_language is not None and text_language is not None:
203
+ if prompt_language in list(dict_language.keys()):
204
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
205
+ else:
206
+ prompt_text_update = {'__type__':'update', 'value':''}
207
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
208
+ if text_language in list(dict_language.keys()):
209
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
210
+ else:
211
+ text_update = {'__type__':'update', 'value':''}
212
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
213
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
214
+
215
+
216
+
217
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
218
+ gr.Markdown(
219
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
220
+ )
221
+
222
+ with gr.Column():
223
+ # with gr.Group():
224
+ gr.Markdown(value=i18n("模型切换"))
225
+ with gr.Row():
226
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True)
227
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True)
228
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
229
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
230
+
231
+
232
+ with gr.Row():
233
+ with gr.Column():
234
+ gr.Markdown(value=i18n("*请上传并填写参考信息"))
235
+ with gr.Row():
236
+ inp_ref = gr.Audio(label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), type="filepath")
237
+ inp_refs = gr.File(label=i18n("辅参考音频(可选多个,或不选)"),file_count="multiple")
238
+ prompt_text = gr.Textbox(label=i18n("主参考音频的文本"), value="", lines=2)
239
+ with gr.Row():
240
+ prompt_language = gr.Dropdown(
241
+ label=i18n("主参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文")
242
+ )
243
+ with gr.Column():
244
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True)
245
+ gr.Markdown(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。"))
246
+
247
+ with gr.Column():
248
+ gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式"))
249
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=20, max_lines=20)
250
+ text_language = gr.Dropdown(
251
+ label=i18n("需要合成的文本的语种"), choices=list(dict_language.keys()), value=i18n("中文")
252
+ )
253
+
254
+
255
+ with gr.Group():
256
+ gr.Markdown(value=i18n("推理设置"))
257
+ with gr.Row():
258
+
259
+ with gr.Column():
260
+ batch_size = gr.Slider(minimum=1,maximum=200,step=1,label=i18n("batch_size"),value=20,interactive=True)
261
+ fragment_interval = gr.Slider(minimum=0.01,maximum=1,step=0.01,label=i18n("分段间隔(秒)"),value=0.3,interactive=True)
262
+ speed_factor = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label="speed_factor",value=1.0,interactive=True)
263
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=5,interactive=True)
264
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True)
265
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True)
266
+ repetition_penalty = gr.Slider(minimum=0,maximum=2,step=0.05,label=i18n("重复惩罚"),value=1.35,interactive=True)
267
+ with gr.Column():
268
+ with gr.Row():
269
+ how_to_cut = gr.Dropdown(
270
+ label=i18n("怎么切"),
271
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
272
+ value=i18n("凑四句一切"),
273
+ interactive=True, scale=1
274
+ )
275
+ parallel_infer = gr.Checkbox(label=i18n("并行推理"), value=True, interactive=True, show_label=True)
276
+ split_bucket = gr.Checkbox(label=i18n("数据分桶(并行推理时会降低一点计算量)"), value=True, interactive=True, show_label=True)
277
+
278
+ with gr.Row():
279
+ seed = gr.Number(label=i18n("随机种子"),value=-1)
280
+ keep_random = gr.Checkbox(label=i18n("保持随机"), value=True, interactive=True, show_label=True)
281
+
282
+ output = gr.Audio(label=i18n("输出的语音"))
283
+ with gr.Row():
284
+ inference_button = gr.Button(i18n("合成语音"), variant="primary")
285
+ stop_infer = gr.Button(i18n("终止合成"), variant="primary")
286
+
287
+
288
+ inference_button.click(
289
+ inference,
290
+ [
291
+ text,text_language, inp_ref, inp_refs,
292
+ prompt_text, prompt_language,
293
+ top_k, top_p, temperature,
294
+ how_to_cut, batch_size,
295
+ speed_factor, ref_text_free,
296
+ split_bucket,fragment_interval,
297
+ seed, keep_random, parallel_infer,
298
+ repetition_penalty
299
+ ],
300
+ [output, seed],
301
+ )
302
+ stop_infer.click(tts_pipeline.stop, [], [])
303
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
304
+ GPT_dropdown.change(tts_pipeline.init_t2s_weights, [GPT_dropdown], [])
305
+
306
+ with gr.Group():
307
+ gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
308
+ with gr.Row():
309
+ text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="", lines=4)
310
+ with gr.Column():
311
+ _how_to_cut = gr.Radio(
312
+ label=i18n("怎么切"),
313
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
314
+ value=i18n("凑四句一切"),
315
+ interactive=True,
316
+ )
317
+ cut_text= gr.Button(i18n("切分"), variant="primary")
318
+
319
+ def to_cut(text_inp, how_to_cut):
320
+ if len(text_inp.strip()) == 0 or text_inp==[]:
321
+ return ""
322
+ method = get_method(cut_method[how_to_cut])
323
+ return method(text_inp)
324
+
325
+ text_opt = gr.Textbox(label=i18n("切分后文本"), value="", lines=4)
326
+ cut_text.click(to_cut, [text_inp, _how_to_cut], [text_opt])
327
+ gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
328
+
329
+ if __name__ == '__main__':
330
+ app.queue().launch(#concurrency_count=511, max_size=1022
331
+ server_name="0.0.0.0",
332
+ inbrowser=True,
333
+ share=is_share,
334
+ server_port=infer_ttswebui,
335
+ quiet=True,
336
+ )
module/__init__.py ADDED
File without changes
module/attentions.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from moyoyo_tts.module import commons
7
+ from moyoyo_tts.module.modules import LayerNorm
8
+
9
+
10
+ class Encoder(nn.Module):
11
+ def __init__(
12
+ self,
13
+ hidden_channels,
14
+ filter_channels,
15
+ n_heads,
16
+ n_layers,
17
+ kernel_size=1,
18
+ p_dropout=0.0,
19
+ window_size=4,
20
+ isflow=False,
21
+ **kwargs
22
+ ):
23
+ super().__init__()
24
+ self.hidden_channels = hidden_channels
25
+ self.filter_channels = filter_channels
26
+ self.n_heads = n_heads
27
+ self.n_layers = n_layers
28
+ self.kernel_size = kernel_size
29
+ self.p_dropout = p_dropout
30
+ self.window_size = window_size
31
+
32
+ self.drop = nn.Dropout(p_dropout)
33
+ self.attn_layers = nn.ModuleList()
34
+ self.norm_layers_1 = nn.ModuleList()
35
+ self.ffn_layers = nn.ModuleList()
36
+ self.norm_layers_2 = nn.ModuleList()
37
+ for i in range(self.n_layers):
38
+ self.attn_layers.append(
39
+ MultiHeadAttention(
40
+ hidden_channels,
41
+ hidden_channels,
42
+ n_heads,
43
+ p_dropout=p_dropout,
44
+ window_size=window_size,
45
+ )
46
+ )
47
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
48
+ self.ffn_layers.append(
49
+ FFN(
50
+ hidden_channels,
51
+ hidden_channels,
52
+ filter_channels,
53
+ kernel_size,
54
+ p_dropout=p_dropout,
55
+ )
56
+ )
57
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
58
+ if isflow:
59
+ cond_layer = torch.nn.Conv1d(
60
+ kwargs["gin_channels"], 2 * hidden_channels * n_layers, 1
61
+ )
62
+ self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
63
+ self.cond_layer = weight_norm_modules(cond_layer, name="weight")
64
+ self.gin_channels = kwargs["gin_channels"]
65
+
66
+ def forward(self, x, x_mask, g=None):
67
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
68
+ x = x * x_mask
69
+ if g is not None:
70
+ g = self.cond_layer(g)
71
+
72
+ for i in range(self.n_layers):
73
+ if g is not None:
74
+ x = self.cond_pre(x)
75
+ cond_offset = i * 2 * self.hidden_channels
76
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
77
+ x = commons.fused_add_tanh_sigmoid_multiply(
78
+ x, g_l, torch.IntTensor([self.hidden_channels])
79
+ )
80
+ y = self.attn_layers[i](x, x, attn_mask)
81
+ y = self.drop(y)
82
+ x = self.norm_layers_1[i](x + y)
83
+
84
+ y = self.ffn_layers[i](x, x_mask)
85
+ y = self.drop(y)
86
+ x = self.norm_layers_2[i](x + y)
87
+ x = x * x_mask
88
+ return x
89
+
90
+
91
+ class Decoder(nn.Module):
92
+ def __init__(
93
+ self,
94
+ hidden_channels,
95
+ filter_channels,
96
+ n_heads,
97
+ n_layers,
98
+ kernel_size=1,
99
+ p_dropout=0.0,
100
+ proximal_bias=False,
101
+ proximal_init=True,
102
+ **kwargs
103
+ ):
104
+ super().__init__()
105
+ self.hidden_channels = hidden_channels
106
+ self.filter_channels = filter_channels
107
+ self.n_heads = n_heads
108
+ self.n_layers = n_layers
109
+ self.kernel_size = kernel_size
110
+ self.p_dropout = p_dropout
111
+ self.proximal_bias = proximal_bias
112
+ self.proximal_init = proximal_init
113
+
114
+ self.drop = nn.Dropout(p_dropout)
115
+ self.self_attn_layers = nn.ModuleList()
116
+ self.norm_layers_0 = nn.ModuleList()
117
+ self.encdec_attn_layers = nn.ModuleList()
118
+ self.norm_layers_1 = nn.ModuleList()
119
+ self.ffn_layers = nn.ModuleList()
120
+ self.norm_layers_2 = nn.ModuleList()
121
+ for i in range(self.n_layers):
122
+ self.self_attn_layers.append(
123
+ MultiHeadAttention(
124
+ hidden_channels,
125
+ hidden_channels,
126
+ n_heads,
127
+ p_dropout=p_dropout,
128
+ proximal_bias=proximal_bias,
129
+ proximal_init=proximal_init,
130
+ )
131
+ )
132
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
133
+ self.encdec_attn_layers.append(
134
+ MultiHeadAttention(
135
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
136
+ )
137
+ )
138
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
139
+ self.ffn_layers.append(
140
+ FFN(
141
+ hidden_channels,
142
+ hidden_channels,
143
+ filter_channels,
144
+ kernel_size,
145
+ p_dropout=p_dropout,
146
+ causal=True,
147
+ )
148
+ )
149
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
150
+
151
+ def forward(self, x, x_mask, h, h_mask):
152
+ """
153
+ x: decoder input
154
+ h: encoder output
155
+ """
156
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
157
+ device=x.device, dtype=x.dtype
158
+ )
159
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
160
+ x = x * x_mask
161
+ for i in range(self.n_layers):
162
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
163
+ y = self.drop(y)
164
+ x = self.norm_layers_0[i](x + y)
165
+
166
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
167
+ y = self.drop(y)
168
+ x = self.norm_layers_1[i](x + y)
169
+
170
+ y = self.ffn_layers[i](x, x_mask)
171
+ y = self.drop(y)
172
+ x = self.norm_layers_2[i](x + y)
173
+ x = x * x_mask
174
+ return x
175
+
176
+
177
+ class MultiHeadAttention(nn.Module):
178
+ def __init__(
179
+ self,
180
+ channels,
181
+ out_channels,
182
+ n_heads,
183
+ p_dropout=0.0,
184
+ window_size=None,
185
+ heads_share=True,
186
+ block_length=None,
187
+ proximal_bias=False,
188
+ proximal_init=False,
189
+ ):
190
+ super().__init__()
191
+ assert channels % n_heads == 0
192
+
193
+ self.channels = channels
194
+ self.out_channels = out_channels
195
+ self.n_heads = n_heads
196
+ self.p_dropout = p_dropout
197
+ self.window_size = window_size
198
+ self.heads_share = heads_share
199
+ self.block_length = block_length
200
+ self.proximal_bias = proximal_bias
201
+ self.proximal_init = proximal_init
202
+ self.attn = None
203
+
204
+ self.k_channels = channels // n_heads
205
+ self.conv_q = nn.Conv1d(channels, channels, 1)
206
+ self.conv_k = nn.Conv1d(channels, channels, 1)
207
+ self.conv_v = nn.Conv1d(channels, channels, 1)
208
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
209
+ self.drop = nn.Dropout(p_dropout)
210
+
211
+ if window_size is not None:
212
+ n_heads_rel = 1 if heads_share else n_heads
213
+ rel_stddev = self.k_channels**-0.5
214
+ self.emb_rel_k = nn.Parameter(
215
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
216
+ * rel_stddev
217
+ )
218
+ self.emb_rel_v = nn.Parameter(
219
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
220
+ * rel_stddev
221
+ )
222
+
223
+ nn.init.xavier_uniform_(self.conv_q.weight)
224
+ nn.init.xavier_uniform_(self.conv_k.weight)
225
+ nn.init.xavier_uniform_(self.conv_v.weight)
226
+ if proximal_init:
227
+ with torch.no_grad():
228
+ self.conv_k.weight.copy_(self.conv_q.weight)
229
+ self.conv_k.bias.copy_(self.conv_q.bias)
230
+
231
+ def forward(self, x, c, attn_mask=None):
232
+ q = self.conv_q(x)
233
+ k = self.conv_k(c)
234
+ v = self.conv_v(c)
235
+
236
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
237
+
238
+ x = self.conv_o(x)
239
+ return x
240
+
241
+ def attention(self, query, key, value, mask=None):
242
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
243
+ b, d, t_s, t_t = (*key.size(), query.size(2))
244
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
245
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
246
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
247
+
248
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
249
+ if self.window_size is not None:
250
+ assert (
251
+ t_s == t_t
252
+ ), "Relative attention is only available for self-attention."
253
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
254
+ rel_logits = self._matmul_with_relative_keys(
255
+ query / math.sqrt(self.k_channels), key_relative_embeddings
256
+ )
257
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
258
+ scores = scores + scores_local
259
+ if self.proximal_bias:
260
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
261
+ scores = scores + self._attention_bias_proximal(t_s).to(
262
+ device=scores.device, dtype=scores.dtype
263
+ )
264
+ if mask is not None:
265
+ scores = scores.masked_fill(mask == 0, -1e4)
266
+ if self.block_length is not None:
267
+ assert (
268
+ t_s == t_t
269
+ ), "Local attention is only available for self-attention."
270
+ block_mask = (
271
+ torch.ones_like(scores)
272
+ .triu(-self.block_length)
273
+ .tril(self.block_length)
274
+ )
275
+ scores = scores.masked_fill(block_mask == 0, -1e4)
276
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
277
+ p_attn = self.drop(p_attn)
278
+ output = torch.matmul(p_attn, value)
279
+ if self.window_size is not None:
280
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
281
+ value_relative_embeddings = self._get_relative_embeddings(
282
+ self.emb_rel_v, t_s
283
+ )
284
+ output = output + self._matmul_with_relative_values(
285
+ relative_weights, value_relative_embeddings
286
+ )
287
+ output = (
288
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
289
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
290
+ return output, p_attn
291
+
292
+ def _matmul_with_relative_values(self, x, y):
293
+ """
294
+ x: [b, h, l, m]
295
+ y: [h or 1, m, d]
296
+ ret: [b, h, l, d]
297
+ """
298
+ ret = torch.matmul(x, y.unsqueeze(0))
299
+ return ret
300
+
301
+ def _matmul_with_relative_keys(self, x, y):
302
+ """
303
+ x: [b, h, l, d]
304
+ y: [h or 1, m, d]
305
+ ret: [b, h, l, m]
306
+ """
307
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
308
+ return ret
309
+
310
+ def _get_relative_embeddings(self, relative_embeddings, length):
311
+ max_relative_position = 2 * self.window_size + 1
312
+ # Pad first before slice to avoid using cond ops.
313
+ pad_length = max(length - (self.window_size + 1), 0)
314
+ slice_start_position = max((self.window_size + 1) - length, 0)
315
+ slice_end_position = slice_start_position + 2 * length - 1
316
+ if pad_length > 0:
317
+ padded_relative_embeddings = F.pad(
318
+ relative_embeddings,
319
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
320
+ )
321
+ else:
322
+ padded_relative_embeddings = relative_embeddings
323
+ used_relative_embeddings = padded_relative_embeddings[
324
+ :, slice_start_position:slice_end_position
325
+ ]
326
+ return used_relative_embeddings
327
+
328
+ def _relative_position_to_absolute_position(self, x):
329
+ """
330
+ x: [b, h, l, 2*l-1]
331
+ ret: [b, h, l, l]
332
+ """
333
+ batch, heads, length, _ = x.size()
334
+ # Concat columns of pad to shift from relative to absolute indexing.
335
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
336
+
337
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
338
+ x_flat = x.view([batch, heads, length * 2 * length])
339
+ x_flat = F.pad(
340
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
341
+ )
342
+
343
+ # Reshape and slice out the padded elements.
344
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
345
+ :, :, :length, length - 1 :
346
+ ]
347
+ return x_final
348
+
349
+ def _absolute_position_to_relative_position(self, x):
350
+ """
351
+ x: [b, h, l, l]
352
+ ret: [b, h, l, 2*l-1]
353
+ """
354
+ batch, heads, length, _ = x.size()
355
+ # padd along column
356
+ x = F.pad(
357
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
358
+ )
359
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
360
+ # add 0's in the beginning that will skew the elements after reshape
361
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
362
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
363
+ return x_final
364
+
365
+ def _attention_bias_proximal(self, length):
366
+ """Bias for self-attention to encourage attention to close positions.
367
+ Args:
368
+ length: an integer scalar.
369
+ Returns:
370
+ a Tensor with shape [1, 1, length, length]
371
+ """
372
+ r = torch.arange(length, dtype=torch.float32)
373
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
374
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
375
+
376
+
377
+ class FFN(nn.Module):
378
+ def __init__(
379
+ self,
380
+ in_channels,
381
+ out_channels,
382
+ filter_channels,
383
+ kernel_size,
384
+ p_dropout=0.0,
385
+ activation=None,
386
+ causal=False,
387
+ ):
388
+ super().__init__()
389
+ self.in_channels = in_channels
390
+ self.out_channels = out_channels
391
+ self.filter_channels = filter_channels
392
+ self.kernel_size = kernel_size
393
+ self.p_dropout = p_dropout
394
+ self.activation = activation
395
+ self.causal = causal
396
+
397
+ if causal:
398
+ self.padding = self._causal_padding
399
+ else:
400
+ self.padding = self._same_padding
401
+
402
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
403
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
404
+ self.drop = nn.Dropout(p_dropout)
405
+
406
+ def forward(self, x, x_mask):
407
+ x = self.conv_1(self.padding(x * x_mask))
408
+ if self.activation == "gelu":
409
+ x = x * torch.sigmoid(1.702 * x)
410
+ else:
411
+ x = torch.relu(x)
412
+ x = self.drop(x)
413
+ x = self.conv_2(self.padding(x * x_mask))
414
+ return x * x_mask
415
+
416
+ def _causal_padding(self, x):
417
+ if self.kernel_size == 1:
418
+ return x
419
+ pad_l = self.kernel_size - 1
420
+ pad_r = 0
421
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
422
+ x = F.pad(x, commons.convert_pad_shape(padding))
423
+ return x
424
+
425
+ def _same_padding(self, x):
426
+ if self.kernel_size == 1:
427
+ return x
428
+ pad_l = (self.kernel_size - 1) // 2
429
+ pad_r = self.kernel_size // 2
430
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
431
+ x = F.pad(x, commons.convert_pad_shape(padding))
432
+ return x
433
+
434
+
435
+ import torch.nn as nn
436
+ from torch.nn.utils import remove_weight_norm, weight_norm
437
+
438
+
439
+ class Depthwise_Separable_Conv1D(nn.Module):
440
+ def __init__(
441
+ self,
442
+ in_channels,
443
+ out_channels,
444
+ kernel_size,
445
+ stride=1,
446
+ padding=0,
447
+ dilation=1,
448
+ bias=True,
449
+ padding_mode="zeros", # TODO: refine this type
450
+ device=None,
451
+ dtype=None,
452
+ ):
453
+ super().__init__()
454
+ self.depth_conv = nn.Conv1d(
455
+ in_channels=in_channels,
456
+ out_channels=in_channels,
457
+ kernel_size=kernel_size,
458
+ groups=in_channels,
459
+ stride=stride,
460
+ padding=padding,
461
+ dilation=dilation,
462
+ bias=bias,
463
+ padding_mode=padding_mode,
464
+ device=device,
465
+ dtype=dtype,
466
+ )
467
+ self.point_conv = nn.Conv1d(
468
+ in_channels=in_channels,
469
+ out_channels=out_channels,
470
+ kernel_size=1,
471
+ bias=bias,
472
+ device=device,
473
+ dtype=dtype,
474
+ )
475
+
476
+ def forward(self, input):
477
+ return self.point_conv(self.depth_conv(input))
478
+
479
+ def weight_norm(self):
480
+ self.depth_conv = weight_norm(self.depth_conv, name="weight")
481
+ self.point_conv = weight_norm(self.point_conv, name="weight")
482
+
483
+ def remove_weight_norm(self):
484
+ self.depth_conv = remove_weight_norm(self.depth_conv, name="weight")
485
+ self.point_conv = remove_weight_norm(self.point_conv, name="weight")
486
+
487
+
488
+ class Depthwise_Separable_TransposeConv1D(nn.Module):
489
+ def __init__(
490
+ self,
491
+ in_channels,
492
+ out_channels,
493
+ kernel_size,
494
+ stride=1,
495
+ padding=0,
496
+ output_padding=0,
497
+ bias=True,
498
+ dilation=1,
499
+ padding_mode="zeros", # TODO: refine this type
500
+ device=None,
501
+ dtype=None,
502
+ ):
503
+ super().__init__()
504
+ self.depth_conv = nn.ConvTranspose1d(
505
+ in_channels=in_channels,
506
+ out_channels=in_channels,
507
+ kernel_size=kernel_size,
508
+ groups=in_channels,
509
+ stride=stride,
510
+ output_padding=output_padding,
511
+ padding=padding,
512
+ dilation=dilation,
513
+ bias=bias,
514
+ padding_mode=padding_mode,
515
+ device=device,
516
+ dtype=dtype,
517
+ )
518
+ self.point_conv = nn.Conv1d(
519
+ in_channels=in_channels,
520
+ out_channels=out_channels,
521
+ kernel_size=1,
522
+ bias=bias,
523
+ device=device,
524
+ dtype=dtype,
525
+ )
526
+
527
+ def forward(self, input):
528
+ return self.point_conv(self.depth_conv(input))
529
+
530
+ def weight_norm(self):
531
+ self.depth_conv = weight_norm(self.depth_conv, name="weight")
532
+ self.point_conv = weight_norm(self.point_conv, name="weight")
533
+
534
+ def remove_weight_norm(self):
535
+ remove_weight_norm(self.depth_conv, name="weight")
536
+ remove_weight_norm(self.point_conv, name="weight")
537
+
538
+
539
+ def weight_norm_modules(module, name="weight", dim=0):
540
+ if isinstance(module, Depthwise_Separable_Conv1D) or isinstance(
541
+ module, Depthwise_Separable_TransposeConv1D
542
+ ):
543
+ module.weight_norm()
544
+ return module
545
+ else:
546
+ return weight_norm(module, name, dim)
547
+
548
+
549
+ def remove_weight_norm_modules(module, name="weight"):
550
+ if isinstance(module, Depthwise_Separable_Conv1D) or isinstance(
551
+ module, Depthwise_Separable_TransposeConv1D
552
+ ):
553
+ module.remove_weight_norm()
554
+ else:
555
+ remove_weight_norm(module, name)
556
+
557
+
558
+ class FFT(nn.Module):
559
+ def __init__(
560
+ self,
561
+ hidden_channels,
562
+ filter_channels,
563
+ n_heads,
564
+ n_layers=1,
565
+ kernel_size=1,
566
+ p_dropout=0.0,
567
+ proximal_bias=False,
568
+ proximal_init=True,
569
+ isflow=False,
570
+ **kwargs
571
+ ):
572
+ super().__init__()
573
+ self.hidden_channels = hidden_channels
574
+ self.filter_channels = filter_channels
575
+ self.n_heads = n_heads
576
+ self.n_layers = n_layers
577
+ self.kernel_size = kernel_size
578
+ self.p_dropout = p_dropout
579
+ self.proximal_bias = proximal_bias
580
+ self.proximal_init = proximal_init
581
+ if isflow:
582
+ cond_layer = torch.nn.Conv1d(
583
+ kwargs["gin_channels"], 2 * hidden_channels * n_layers, 1
584
+ )
585
+ self.cond_pre = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, 1)
586
+ self.cond_layer = weight_norm_modules(cond_layer, name="weight")
587
+ self.gin_channels = kwargs["gin_channels"]
588
+ self.drop = nn.Dropout(p_dropout)
589
+ self.self_attn_layers = nn.ModuleList()
590
+ self.norm_layers_0 = nn.ModuleList()
591
+ self.ffn_layers = nn.ModuleList()
592
+ self.norm_layers_1 = nn.ModuleList()
593
+ for i in range(self.n_layers):
594
+ self.self_attn_layers.append(
595
+ MultiHeadAttention(
596
+ hidden_channels,
597
+ hidden_channels,
598
+ n_heads,
599
+ p_dropout=p_dropout,
600
+ proximal_bias=proximal_bias,
601
+ proximal_init=proximal_init,
602
+ )
603
+ )
604
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
605
+ self.ffn_layers.append(
606
+ FFN(
607
+ hidden_channels,
608
+ hidden_channels,
609
+ filter_channels,
610
+ kernel_size,
611
+ p_dropout=p_dropout,
612
+ causal=True,
613
+ )
614
+ )
615
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
616
+
617
+ def forward(self, x, x_mask, g=None):
618
+ """
619
+ x: decoder input
620
+ h: encoder output
621
+ """
622
+ if g is not None:
623
+ g = self.cond_layer(g)
624
+
625
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
626
+ device=x.device, dtype=x.dtype
627
+ )
628
+ x = x * x_mask
629
+ for i in range(self.n_layers):
630
+ if g is not None:
631
+ x = self.cond_pre(x)
632
+ cond_offset = i * 2 * self.hidden_channels
633
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
634
+ x = commons.fused_add_tanh_sigmoid_multiply(
635
+ x, g_l, torch.IntTensor([self.hidden_channels])
636
+ )
637
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
638
+ y = self.drop(y)
639
+ x = self.norm_layers_0[i](x + y)
640
+
641
+ y = self.ffn_layers[i](x, x_mask)
642
+ y = self.drop(y)
643
+ x = self.norm_layers_1[i](x + y)
644
+ x = x * x_mask
645
+ return x
646
+
647
+
648
+ class TransformerCouplingLayer(nn.Module):
649
+ def __init__(
650
+ self,
651
+ channels,
652
+ hidden_channels,
653
+ kernel_size,
654
+ n_layers,
655
+ n_heads,
656
+ p_dropout=0,
657
+ filter_channels=0,
658
+ mean_only=False,
659
+ wn_sharing_parameter=None,
660
+ gin_channels=0,
661
+ ):
662
+ assert channels % 2 == 0, "channels should be divisible by 2"
663
+ super().__init__()
664
+ self.channels = channels
665
+ self.hidden_channels = hidden_channels
666
+ self.kernel_size = kernel_size
667
+ self.n_layers = n_layers
668
+ self.half_channels = channels // 2
669
+ self.mean_only = mean_only
670
+
671
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
672
+ self.enc = (
673
+ Encoder(
674
+ hidden_channels,
675
+ filter_channels,
676
+ n_heads,
677
+ n_layers,
678
+ kernel_size,
679
+ p_dropout,
680
+ isflow=True,
681
+ gin_channels=gin_channels,
682
+ )
683
+ if wn_sharing_parameter is None
684
+ else wn_sharing_parameter
685
+ )
686
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
687
+ self.post.weight.data.zero_()
688
+ self.post.bias.data.zero_()
689
+
690
+ def forward(self, x, x_mask, g=None, reverse=False):
691
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
692
+ h = self.pre(x0) * x_mask
693
+ h = self.enc(h, x_mask, g=g)
694
+ stats = self.post(h) * x_mask
695
+ if not self.mean_only:
696
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
697
+ else:
698
+ m = stats
699
+ logs = torch.zeros_like(m)
700
+
701
+ if not reverse:
702
+ x1 = m + x1 * torch.exp(logs) * x_mask
703
+ x = torch.cat([x0, x1], 1)
704
+ logdet = torch.sum(logs, [1, 2])
705
+ return x, logdet
706
+ else:
707
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
708
+ x = torch.cat([x0, x1], 1)
709
+ return x
module/attentions_onnx.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+
6
+ from moyoyo_tts.module import commons
7
+
8
+ from typing import Optional
9
+
10
+ class LayerNorm(nn.Module):
11
+ def __init__(self, channels, eps=1e-5):
12
+ super().__init__()
13
+ self.channels = channels
14
+ self.eps = eps
15
+
16
+ self.gamma = nn.Parameter(torch.ones(channels))
17
+ self.beta = nn.Parameter(torch.zeros(channels))
18
+
19
+ def forward(self, x):
20
+ x = x.transpose(1, -1)
21
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
22
+ return x.transpose(1, -1)
23
+
24
+
25
+ @torch.jit.script
26
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
27
+ n_channels_int = n_channels[0]
28
+ in_act = input_a + input_b
29
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
30
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
31
+ acts = t_act * s_act
32
+ return acts
33
+
34
+
35
+ class Encoder(nn.Module):
36
+ def __init__(
37
+ self,
38
+ hidden_channels,
39
+ filter_channels,
40
+ n_heads,
41
+ n_layers,
42
+ kernel_size=1,
43
+ p_dropout=0.0,
44
+ window_size=4,
45
+ isflow=True,
46
+ **kwargs
47
+ ):
48
+ super().__init__()
49
+ self.hidden_channels = hidden_channels
50
+ self.filter_channels = filter_channels
51
+ self.n_heads = n_heads
52
+ self.n_layers = n_layers
53
+ self.kernel_size = kernel_size
54
+ self.p_dropout = p_dropout
55
+ self.window_size = window_size
56
+ # if isflow:
57
+ # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
58
+ # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
59
+ # self.cond_layer = weight_norm(cond_layer, name='weight')
60
+ # self.gin_channels = 256
61
+ self.cond_layer_idx = self.n_layers
62
+ self.spk_emb_linear = nn.Linear(256, self.hidden_channels)
63
+ if "gin_channels" in kwargs:
64
+ self.gin_channels = kwargs["gin_channels"]
65
+ if self.gin_channels != 0:
66
+ self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
67
+ # vits2 says 3rd block, so idx is 2 by default
68
+ self.cond_layer_idx = (
69
+ kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2
70
+ )
71
+ logging.debug(self.gin_channels, self.cond_layer_idx)
72
+ assert (
73
+ self.cond_layer_idx < self.n_layers
74
+ ), "cond_layer_idx should be less than n_layers"
75
+ self.drop = nn.Dropout(p_dropout)
76
+ self.attn_layers = nn.ModuleList()
77
+ self.norm_layers_1 = nn.ModuleList()
78
+ self.ffn_layers = nn.ModuleList()
79
+ self.norm_layers_2 = nn.ModuleList()
80
+ for i in range(self.n_layers):
81
+ self.attn_layers.append(
82
+ MultiHeadAttention(
83
+ hidden_channels,
84
+ hidden_channels,
85
+ n_heads,
86
+ p_dropout=p_dropout,
87
+ window_size=window_size,
88
+ )
89
+ )
90
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
91
+ self.ffn_layers.append(
92
+ FFN(
93
+ hidden_channels,
94
+ hidden_channels,
95
+ filter_channels,
96
+ kernel_size,
97
+ p_dropout=p_dropout,
98
+ )
99
+ )
100
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
101
+
102
+ # def forward(self, x, x_mask, g=None):
103
+ # attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
104
+ # x = x * x_mask
105
+ # for i in range(self.n_layers):
106
+ # if i == self.cond_layer_idx and g is not None:
107
+ # g = self.spk_emb_linear(g.transpose(1, 2))
108
+ # g = g.transpose(1, 2)
109
+ # x = x + g
110
+ # x = x * x_mask
111
+ # y = self.attn_layers[i](x, x, attn_mask)
112
+ # y = self.drop(y)
113
+ # x = self.norm_layers_1[i](x + y)
114
+
115
+ # y = self.ffn_layers[i](x, x_mask)
116
+ # y = self.drop(y)
117
+ # x = self.norm_layers_2[i](x + y)
118
+ # x = x * x_mask
119
+ # return x
120
+
121
+ def forward(self, x, x_mask):
122
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
123
+ x = x * x_mask
124
+ for attn_layers,norm_layers_1,ffn_layers,norm_layers_2 in zip(self.attn_layers,self.norm_layers_1,self.ffn_layers,self.norm_layers_2):
125
+ y = attn_layers(x, x, attn_mask)
126
+ y = self.drop(y)
127
+ x = norm_layers_1(x + y)
128
+
129
+ y = ffn_layers(x, x_mask)
130
+ y = self.drop(y)
131
+ x = norm_layers_2(x + y)
132
+ x = x * x_mask
133
+ return x
134
+
135
+
136
+ class MultiHeadAttention(nn.Module):
137
+ def __init__(
138
+ self,
139
+ channels,
140
+ out_channels,
141
+ n_heads,
142
+ p_dropout=0.0,
143
+ window_size=None,
144
+ heads_share=True,
145
+ block_length=None,
146
+ proximal_bias=False,
147
+ proximal_init=False,
148
+ ):
149
+ super().__init__()
150
+ assert channels % n_heads == 0
151
+
152
+ self.channels = channels
153
+ self.out_channels = out_channels
154
+ self.n_heads = n_heads
155
+ self.p_dropout = p_dropout
156
+ self.window_size = window_size
157
+ self.heads_share = heads_share
158
+ self.block_length = block_length
159
+ self.proximal_bias = proximal_bias
160
+ self.proximal_init = proximal_init
161
+ self.attn = None
162
+
163
+ self.k_channels = channels // n_heads
164
+ self.conv_q = nn.Conv1d(channels, channels, 1)
165
+ self.conv_k = nn.Conv1d(channels, channels, 1)
166
+ self.conv_v = nn.Conv1d(channels, channels, 1)
167
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
168
+ self.drop = nn.Dropout(p_dropout)
169
+
170
+ if window_size is not None:
171
+ n_heads_rel = 1 if heads_share else n_heads
172
+ rel_stddev = self.k_channels**-0.5
173
+ self.emb_rel_k = nn.Parameter(
174
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
175
+ * rel_stddev
176
+ )
177
+ self.emb_rel_v = nn.Parameter(
178
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
179
+ * rel_stddev
180
+ )
181
+
182
+ nn.init.xavier_uniform_(self.conv_q.weight)
183
+ nn.init.xavier_uniform_(self.conv_k.weight)
184
+ nn.init.xavier_uniform_(self.conv_v.weight)
185
+ if proximal_init:
186
+ with torch.no_grad():
187
+ self.conv_k.weight.copy_(self.conv_q.weight)
188
+ self.conv_k.bias.copy_(self.conv_q.bias)
189
+
190
+ def forward(self, x, c, attn_mask:Optional[torch.Tensor]=None):
191
+ q = self.conv_q(x)
192
+ k = self.conv_k(c)
193
+ v = self.conv_v(c)
194
+
195
+ # x, self.attn = self.attention(q, k, v, mask=attn_mask)
196
+ x, _ = self.attention(q, k, v, mask=attn_mask)
197
+
198
+ x = self.conv_o(x)
199
+ return x
200
+
201
+ def attention(self, query, key, value, mask:Optional[torch.Tensor]=None):
202
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
203
+ b, d, t_s, _ = (*key.size(), query.size(2))
204
+ query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
205
+ key = key.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
206
+ value = value.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
207
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
208
+
209
+ if self.window_size is not None:
210
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
211
+ rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings)
212
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
213
+ scores = scores + scores_local
214
+
215
+ if mask is not None:
216
+ scores = scores.masked_fill(mask == 0, -1e4)
217
+
218
+ p_attn = F.softmax(scores, dim=-1)
219
+ p_attn = self.drop(p_attn)
220
+ output = torch.matmul(p_attn, value)
221
+
222
+ if self.window_size is not None:
223
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
224
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
225
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
226
+
227
+ output = (output.transpose(2, 3).contiguous().view(b, d, -1))
228
+ return output, p_attn
229
+
230
+ def _matmul_with_relative_values(self, x, y):
231
+ """
232
+ x: [b, h, l, m]
233
+ y: [h or 1, m, d]
234
+ ret: [b, h, l, d]
235
+ """
236
+ ret = torch.matmul(x, y.unsqueeze(0))
237
+ return ret
238
+
239
+ def _matmul_with_relative_keys(self, x, y):
240
+ """
241
+ x: [b, h, l, d]
242
+ y: [h or 1, m, d]
243
+ ret: [b, h, l, m]
244
+ """
245
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
246
+ return ret
247
+
248
+ def _get_relative_embeddings(self, relative_embeddings, length):
249
+ max_relative_position = 2 * self.window_size + 1
250
+ # Pad first before slice to avoid using cond ops.
251
+ pad_l = torch.zeros((1), dtype = torch.int64) + length - (self.window_size + 1)
252
+ pad_s = torch.zeros((1), dtype = torch.int64) + (self.window_size + 1) - length
253
+ pad_length = torch.max(pad_l, other=torch.zeros((1), dtype = torch.int64))
254
+ slice_start_position = torch.max(pad_s, other=torch.zeros((1), dtype = torch.int64))
255
+
256
+ slice_end_position = slice_start_position + 2 * length - 1
257
+ padded_relative_embeddings = F.pad(
258
+ relative_embeddings,
259
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
260
+ )
261
+ used_relative_embeddings = padded_relative_embeddings[
262
+ :, slice_start_position:slice_end_position
263
+ ]
264
+ return used_relative_embeddings
265
+
266
+ def _relative_position_to_absolute_position(self, x):
267
+ """
268
+ x: [b, h, l, 2*l-1]
269
+ ret: [b, h, l, l]
270
+ """
271
+ batch, heads, length, _ = x.size()
272
+ # Concat columns of pad to shift from relative to absolute indexing.
273
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
274
+
275
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
276
+ x_flat = x.view([batch, heads, length * 2 * length])
277
+ x_flat = F.pad(
278
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
279
+ )
280
+
281
+ # Reshape and slice out the padded elements.
282
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
283
+ :, :, :length, length - 1 :
284
+ ]
285
+ return x_final
286
+
287
+ def _absolute_position_to_relative_position(self, x):
288
+ """
289
+ x: [b, h, l, l]
290
+ ret: [b, h, l, 2*l-1]
291
+ """
292
+ batch, heads, length, _ = x.size()
293
+ # padd along column
294
+ x = F.pad(
295
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
296
+ )
297
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
298
+ # add 0's in the beginning that will skew the elements after reshape
299
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
300
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
301
+ return x_final
302
+
303
+ def _attention_bias_proximal(self, length):
304
+ """Bias for self-attention to encourage attention to close positions.
305
+ Args:
306
+ length: an integer scalar.
307
+ Returns:
308
+ a Tensor with shape [1, 1, length, length]
309
+ """
310
+ r = torch.arange(length, dtype=torch.float32)
311
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
312
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
313
+
314
+
315
+ class FFN(nn.Module):
316
+ def __init__(
317
+ self,
318
+ in_channels,
319
+ out_channels,
320
+ filter_channels,
321
+ kernel_size,
322
+ p_dropout=0.0,
323
+ activation="",
324
+ causal=False,
325
+ ):
326
+ super().__init__()
327
+ self.in_channels = in_channels
328
+ self.out_channels = out_channels
329
+ self.filter_channels = filter_channels
330
+ self.kernel_size = kernel_size
331
+ self.p_dropout = p_dropout
332
+ self.activation = activation
333
+ self.causal = causal
334
+
335
+ # 从上下文看这里一定是 False
336
+ # if causal:
337
+ # self.padding = self._causal_padding
338
+ # else:
339
+ # self.padding = self._same_padding
340
+
341
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
342
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
343
+ self.drop = nn.Dropout(p_dropout)
344
+
345
+ def forward(self, x, x_mask):
346
+ x = self.conv_1(self.padding(x * x_mask))
347
+ if self.activation == "gelu":
348
+ x = x * torch.sigmoid(1.702 * x)
349
+ else:
350
+ x = torch.relu(x)
351
+ x = self.drop(x)
352
+ x = self.conv_2(self.padding(x * x_mask))
353
+ return x * x_mask
354
+
355
+ def padding(self, x):
356
+ return self._same_padding(x)
357
+
358
+ def _causal_padding(self, x):
359
+ if self.kernel_size == 1:
360
+ return x
361
+ pad_l = self.kernel_size - 1
362
+ pad_r = 0
363
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
364
+ x = F.pad(x, commons.convert_pad_shape(padding))
365
+ return x
366
+
367
+ def _same_padding(self, x):
368
+ if self.kernel_size == 1:
369
+ return x
370
+ pad_l = (self.kernel_size - 1) // 2
371
+ pad_r = self.kernel_size // 2
372
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
373
+ x = F.pad(x, commons.convert_pad_shape(padding))
374
+ return x
375
+
376
+
377
+ class MRTE(nn.Module):
378
+ def __init__(
379
+ self,
380
+ content_enc_channels=192,
381
+ hidden_size=512,
382
+ out_channels=192,
383
+ kernel_size=5,
384
+ n_heads=4,
385
+ ge_layer=2,
386
+ ):
387
+ super(MRTE, self).__init__()
388
+ self.cross_attention = MultiHeadAttention(hidden_size, hidden_size, n_heads)
389
+ self.c_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
390
+ self.text_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
391
+ self.c_post = nn.Conv1d(hidden_size, out_channels, 1)
392
+
393
+ def forward(self, ssl_enc, ssl_mask, text, text_mask, ge):
394
+ attn_mask = text_mask.unsqueeze(2) * ssl_mask.unsqueeze(-1)
395
+
396
+ ssl_enc = self.c_pre(ssl_enc * ssl_mask)
397
+ text_enc = self.text_pre(text * text_mask)
398
+ x = (
399
+ self.cross_attention(
400
+ ssl_enc * ssl_mask, text_enc * text_mask, attn_mask
401
+ )
402
+ + ssl_enc
403
+ + ge
404
+ )
405
+ x = self.c_post(x * ssl_mask)
406
+ return x