primepake commited on
Commit
32d5b2b
·
1 Parent(s): 24941fa

add flow matching

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. speech/.gitignore +52 -0
  2. speech/.gitmodules +3 -0
  3. speech/asset/dingding.png +3 -0
  4. speech/cosyvoice/__init__.py +0 -0
  5. speech/cosyvoice/bin/export_jit.py +103 -0
  6. speech/cosyvoice/bin/export_onnx.py +120 -0
  7. speech/cosyvoice/cli/__init__.py +0 -0
  8. speech/cosyvoice/cli/cosyvoice.py +194 -0
  9. speech/cosyvoice/cli/frontend.py +215 -0
  10. speech/cosyvoice/cli/model.py +386 -0
  11. speech/cosyvoice/dataset/__init__.py +0 -0
  12. speech/cosyvoice/dataset/dataset.py +151 -0
  13. speech/cosyvoice/dataset/processor.py +434 -0
  14. speech/cosyvoice/flow/decoder.py +494 -0
  15. speech/cosyvoice/flow/flow.py +281 -0
  16. speech/cosyvoice/flow/flow_matching.py +227 -0
  17. speech/cosyvoice/flow/length_regulator.py +70 -0
  18. speech/cosyvoice/hifigan/discriminator.py +230 -0
  19. speech/cosyvoice/hifigan/f0_predictor.py +58 -0
  20. speech/cosyvoice/hifigan/generator.py +582 -0
  21. speech/cosyvoice/hifigan/hifigan.py +67 -0
  22. speech/cosyvoice/llm/llm.py +610 -0
  23. speech/cosyvoice/tokenizer/assets/multilingual_zh_ja_yue_char_del.tiktoken +0 -0
  24. speech/cosyvoice/tokenizer/tokenizer.py +279 -0
  25. speech/cosyvoice/transformer/__init__.py +0 -0
  26. speech/cosyvoice/transformer/activation.py +84 -0
  27. speech/cosyvoice/transformer/attention.py +330 -0
  28. speech/cosyvoice/transformer/convolution.py +145 -0
  29. speech/cosyvoice/transformer/decoder.py +396 -0
  30. speech/cosyvoice/transformer/decoder_layer.py +132 -0
  31. speech/cosyvoice/transformer/embedding.py +302 -0
  32. speech/cosyvoice/transformer/encoder.py +474 -0
  33. speech/cosyvoice/transformer/encoder_layer.py +236 -0
  34. speech/cosyvoice/transformer/label_smoothing_loss.py +96 -0
  35. speech/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
  36. speech/cosyvoice/transformer/subsampling.py +383 -0
  37. speech/cosyvoice/transformer/upsample_encoder.py +320 -0
  38. speech/cosyvoice/utils/__init__.py +0 -0
  39. speech/cosyvoice/utils/class_utils.py +83 -0
  40. speech/cosyvoice/utils/common.py +186 -0
  41. speech/cosyvoice/utils/executor.py +176 -0
  42. speech/cosyvoice/utils/file_utils.py +129 -0
  43. speech/cosyvoice/utils/frontend_utils.py +136 -0
  44. speech/cosyvoice/utils/losses.py +57 -0
  45. speech/cosyvoice/utils/mask.py +265 -0
  46. speech/cosyvoice/utils/scheduler.py +738 -0
  47. speech/cosyvoice/utils/train_utils.py +367 -0
  48. speech/examples/magicdata-read/cosyvoice/conf +1 -0
  49. speech/examples/magicdata-read/cosyvoice/cosyvoice +1 -0
  50. speech/examples/magicdata-read/cosyvoice/local/prepare_data.py +52 -0
speech/.gitignore ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Visual Studio Code files
7
+ .vscode
8
+ .vs
9
+
10
+ # PyCharm files
11
+ .idea
12
+
13
+ # Eclipse Project settings
14
+ *.*project
15
+ .settings
16
+
17
+ # Sublime Text settings
18
+ *.sublime-workspace
19
+ *.sublime-project
20
+
21
+ # Editor temporaries
22
+ *.swn
23
+ *.swo
24
+ *.swp
25
+ *.swm
26
+ *~
27
+
28
+ # IPython notebook checkpoints
29
+ .ipynb_checkpoints
30
+
31
+ # macOS dir files
32
+ .DS_Store
33
+
34
+ exp
35
+ data
36
+ raw_wav
37
+ tensorboard
38
+ **/*build*
39
+
40
+ # Clangd files
41
+ .cache
42
+ compile_commands.json
43
+
44
+ # train/inference files
45
+ *.wav
46
+ *.m4a
47
+ *.aac
48
+ *.pt
49
+ pretrained_models/*
50
+ *_pb2_grpc.py
51
+ *_pb2.py
52
+ *.tar
speech/.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [submodule "third_party/Matcha-TTS"]
2
+ path = third_party/Matcha-TTS
3
+ url = https://github.com/shivammehta25/Matcha-TTS.git
speech/asset/dingding.png ADDED

Git LFS Details

  • SHA256: ff82909abd313b24ab6c6bf1cf5ce09014068474a35a4d8d3b8084c8cf0e9503
  • Pointer size: 130 Bytes
  • Size of remote file: 96.4 kB
speech/cosyvoice/__init__.py ADDED
File without changes
speech/cosyvoice/bin/export_jit.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import print_function
16
+
17
+ import argparse
18
+ import logging
19
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
20
+ import os
21
+ import sys
22
+ import torch
23
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
24
+ sys.path.append('{}/../..'.format(ROOT_DIR))
25
+ sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
26
+ from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
27
+ from cosyvoice.utils.file_utils import logging
28
+
29
+
30
+ def get_args():
31
+ parser = argparse.ArgumentParser(description='export your model for deployment')
32
+ parser.add_argument('--model_dir',
33
+ type=str,
34
+ default='pretrained_models/CosyVoice-300M',
35
+ help='local path')
36
+ args = parser.parse_args()
37
+ print(args)
38
+ return args
39
+
40
+
41
+ def get_optimized_script(model, preserved_attrs=[]):
42
+ script = torch.jit.script(model)
43
+ if preserved_attrs != []:
44
+ script = torch.jit.freeze(script, preserved_attrs=preserved_attrs)
45
+ else:
46
+ script = torch.jit.freeze(script)
47
+ script = torch.jit.optimize_for_inference(script)
48
+ return script
49
+
50
+
51
+ def main():
52
+ args = get_args()
53
+ logging.basicConfig(level=logging.DEBUG,
54
+ format='%(asctime)s %(levelname)s %(message)s')
55
+
56
+ torch._C._jit_set_fusion_strategy([('STATIC', 1)])
57
+ torch._C._jit_set_profiling_mode(False)
58
+ torch._C._jit_set_profiling_executor(False)
59
+
60
+ try:
61
+ model = CosyVoice(args.model_dir)
62
+ except Exception:
63
+ try:
64
+ model = CosyVoice2(args.model_dir)
65
+ except Exception:
66
+ raise TypeError('no valid model_type!')
67
+
68
+ if not isinstance(model, CosyVoice2):
69
+ # 1. export llm text_encoder
70
+ llm_text_encoder = model.model.llm.text_encoder
71
+ script = get_optimized_script(llm_text_encoder)
72
+ script.save('{}/llm.text_encoder.fp32.zip'.format(args.model_dir))
73
+ script = get_optimized_script(llm_text_encoder.half())
74
+ script.save('{}/llm.text_encoder.fp16.zip'.format(args.model_dir))
75
+ logging.info('successfully export llm_text_encoder')
76
+
77
+ # 2. export llm llm
78
+ llm_llm = model.model.llm.llm
79
+ script = get_optimized_script(llm_llm, ['forward_chunk'])
80
+ script.save('{}/llm.llm.fp32.zip'.format(args.model_dir))
81
+ script = get_optimized_script(llm_llm.half(), ['forward_chunk'])
82
+ script.save('{}/llm.llm.fp16.zip'.format(args.model_dir))
83
+ logging.info('successfully export llm_llm')
84
+
85
+ # 3. export flow encoder
86
+ flow_encoder = model.model.flow.encoder
87
+ script = get_optimized_script(flow_encoder)
88
+ script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir))
89
+ script = get_optimized_script(flow_encoder.half())
90
+ script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir))
91
+ logging.info('successfully export flow_encoder')
92
+ else:
93
+ # 3. export flow encoder
94
+ flow_encoder = model.model.flow.encoder
95
+ script = get_optimized_script(flow_encoder)
96
+ script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir))
97
+ script = get_optimized_script(flow_encoder.half())
98
+ script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir))
99
+ logging.info('successfully export flow_encoder')
100
+
101
+
102
+ if __name__ == '__main__':
103
+ main()
speech/cosyvoice/bin/export_onnx.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Antgroup Inc (authors: Zhoubofan, hexisyztem@icloud.com)
2
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from __future__ import print_function
17
+
18
+ import argparse
19
+ import logging
20
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
21
+ import os
22
+ import sys
23
+ import onnxruntime
24
+ import random
25
+ import torch
26
+ from tqdm import tqdm
27
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
28
+ sys.path.append('{}/../..'.format(ROOT_DIR))
29
+ sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
30
+ from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
31
+ from cosyvoice.utils.file_utils import logging
32
+
33
+
34
+ def get_dummy_input(batch_size, seq_len, out_channels, device):
35
+ x = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
36
+ mask = torch.ones((batch_size, 1, seq_len), dtype=torch.float32, device=device)
37
+ mu = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
38
+ t = torch.rand((batch_size), dtype=torch.float32, device=device)
39
+ spks = torch.rand((batch_size, out_channels), dtype=torch.float32, device=device)
40
+ cond = torch.rand((batch_size, out_channels, seq_len), dtype=torch.float32, device=device)
41
+ return x, mask, mu, t, spks, cond
42
+
43
+
44
+ def get_args():
45
+ parser = argparse.ArgumentParser(description='export your model for deployment')
46
+ parser.add_argument('--model_dir',
47
+ type=str,
48
+ default='pretrained_models/CosyVoice-300M',
49
+ help='local path')
50
+ args = parser.parse_args()
51
+ print(args)
52
+ return args
53
+
54
+
55
+ @torch.no_grad()
56
+ def main():
57
+ args = get_args()
58
+ logging.basicConfig(level=logging.DEBUG,
59
+ format='%(asctime)s %(levelname)s %(message)s')
60
+
61
+ try:
62
+ model = CosyVoice(args.model_dir)
63
+ except Exception:
64
+ try:
65
+ model = CosyVoice2(args.model_dir)
66
+ except Exception:
67
+ raise TypeError('no valid model_type!')
68
+
69
+ # 1. export flow decoder estimator
70
+ estimator = model.model.flow.decoder.estimator
71
+ estimator.eval()
72
+
73
+ device = model.model.device
74
+ batch_size, seq_len = 2, 256
75
+ out_channels = model.model.flow.decoder.estimator.out_channels
76
+ x, mask, mu, t, spks, cond = get_dummy_input(batch_size, seq_len, out_channels, device)
77
+ torch.onnx.export(
78
+ estimator,
79
+ (x, mask, mu, t, spks, cond),
80
+ '{}/flow.decoder.estimator.fp32.onnx'.format(args.model_dir),
81
+ export_params=True,
82
+ opset_version=18,
83
+ do_constant_folding=True,
84
+ input_names=['x', 'mask', 'mu', 't', 'spks', 'cond'],
85
+ output_names=['estimator_out'],
86
+ dynamic_axes={
87
+ 'x': {2: 'seq_len'},
88
+ 'mask': {2: 'seq_len'},
89
+ 'mu': {2: 'seq_len'},
90
+ 'cond': {2: 'seq_len'},
91
+ 'estimator_out': {2: 'seq_len'},
92
+ }
93
+ )
94
+
95
+ # 2. test computation consistency
96
+ option = onnxruntime.SessionOptions()
97
+ option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
98
+ option.intra_op_num_threads = 1
99
+ providers = ['CUDAExecutionProvider' if torch.cuda.is_available() else 'CPUExecutionProvider']
100
+ estimator_onnx = onnxruntime.InferenceSession('{}/flow.decoder.estimator.fp32.onnx'.format(args.model_dir),
101
+ sess_options=option, providers=providers)
102
+
103
+ for _ in tqdm(range(10)):
104
+ x, mask, mu, t, spks, cond = get_dummy_input(batch_size, random.randint(16, 512), out_channels, device)
105
+ output_pytorch = estimator(x, mask, mu, t, spks, cond)
106
+ ort_inputs = {
107
+ 'x': x.cpu().numpy(),
108
+ 'mask': mask.cpu().numpy(),
109
+ 'mu': mu.cpu().numpy(),
110
+ 't': t.cpu().numpy(),
111
+ 'spks': spks.cpu().numpy(),
112
+ 'cond': cond.cpu().numpy()
113
+ }
114
+ output_onnx = estimator_onnx.run(None, ort_inputs)[0]
115
+ torch.testing.assert_allclose(output_pytorch, torch.from_numpy(output_onnx).to(device), rtol=1e-2, atol=1e-4)
116
+ logging.info('successfully export estimator')
117
+
118
+
119
+ if __name__ == "__main__":
120
+ main()
speech/cosyvoice/cli/__init__.py ADDED
File without changes
speech/cosyvoice/cli/cosyvoice.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import time
16
+ from typing import Generator
17
+ from tqdm import tqdm
18
+ from hyperpyyaml import load_hyperpyyaml
19
+ from modelscope import snapshot_download
20
+ import torch
21
+ from cosyvoice.cli.frontend import CosyVoiceFrontEnd
22
+ from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model
23
+ from cosyvoice.utils.file_utils import logging
24
+ from cosyvoice.utils.class_utils import get_model_type
25
+
26
+
27
+ class CosyVoice:
28
+
29
+ def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False, trt_concurrent=1):
30
+ self.instruct = True if '-Instruct' in model_dir else False
31
+ self.model_dir = model_dir
32
+ self.fp16 = fp16
33
+ if not os.path.exists(model_dir):
34
+ model_dir = snapshot_download(model_dir)
35
+ hyper_yaml_path = '{}/cosyvoice.yaml'.format(model_dir)
36
+ if not os.path.exists(hyper_yaml_path):
37
+ raise ValueError('{} not found!'.format(hyper_yaml_path))
38
+ with open(hyper_yaml_path, 'r') as f:
39
+ configs = load_hyperpyyaml(f)
40
+ assert get_model_type(configs) != CosyVoice2Model, 'do not use {} for CosyVoice initialization!'.format(model_dir)
41
+ self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
42
+ configs['feat_extractor'],
43
+ '{}/campplus.onnx'.format(model_dir),
44
+ '{}/speech_tokenizer_v1.onnx'.format(model_dir),
45
+ '{}/spk2info.pt'.format(model_dir),
46
+ configs['allowed_special'])
47
+ self.sample_rate = configs['sample_rate']
48
+ if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
49
+ load_jit, load_trt, fp16 = False, False, False
50
+ logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
51
+ self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16)
52
+ self.model.load('{}/llm.pt'.format(model_dir),
53
+ '{}/flow.pt'.format(model_dir),
54
+ '{}/hift.pt'.format(model_dir))
55
+ if load_jit:
56
+ self.model.load_jit('{}/llm.text_encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
57
+ '{}/llm.llm.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
58
+ '{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
59
+ if load_trt:
60
+ self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
61
+ '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
62
+ trt_concurrent,
63
+ self.fp16)
64
+ del configs
65
+
66
+ def list_available_spks(self):
67
+ spks = list(self.frontend.spk2info.keys())
68
+ return spks
69
+
70
+ def add_zero_shot_spk(self, prompt_text, prompt_speech_16k, zero_shot_spk_id):
71
+ assert zero_shot_spk_id != '', 'do not use empty zero_shot_spk_id'
72
+ model_input = self.frontend.frontend_zero_shot('', prompt_text, prompt_speech_16k, self.sample_rate, '')
73
+ del model_input['text']
74
+ del model_input['text_len']
75
+ self.frontend.spk2info[zero_shot_spk_id] = model_input
76
+ return True
77
+
78
+ def save_spkinfo(self):
79
+ torch.save(self.frontend.spk2info, '{}/spk2info.pt'.format(self.model_dir))
80
+
81
+ def inference_sft(self, tts_text, spk_id, stream=False, speed=1.0, text_frontend=True):
82
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
83
+ model_input = self.frontend.frontend_sft(i, spk_id)
84
+ start_time = time.time()
85
+ logging.info('synthesis text {}'.format(i))
86
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
87
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
88
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
89
+ yield model_output
90
+ start_time = time.time()
91
+
92
+ def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, zero_shot_spk_id='', stream=False, speed=1.0, text_frontend=True):
93
+ prompt_text = self.frontend.text_normalize(prompt_text, split=False, text_frontend=text_frontend)
94
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
95
+ if (not isinstance(i, Generator)) and len(i) < 0.5 * len(prompt_text):
96
+ logging.warning('synthesis text {} too short than prompt text {}, this may lead to bad performance'.format(i, prompt_text))
97
+ model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k, self.sample_rate, zero_shot_spk_id)
98
+ start_time = time.time()
99
+ logging.info('synthesis text {}'.format(i))
100
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
101
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
102
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
103
+ yield model_output
104
+ start_time = time.time()
105
+
106
+ def inference_cross_lingual(self, tts_text, prompt_speech_16k, zero_shot_spk_id='', stream=False, speed=1.0, text_frontend=True):
107
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
108
+ model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k, self.sample_rate, zero_shot_spk_id)
109
+ start_time = time.time()
110
+ logging.info('synthesis text {}'.format(i))
111
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
112
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
113
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
114
+ yield model_output
115
+ start_time = time.time()
116
+
117
+ def inference_instruct(self, tts_text, spk_id, instruct_text, stream=False, speed=1.0, text_frontend=True):
118
+ assert isinstance(self.model, CosyVoiceModel), 'inference_instruct is only implemented for CosyVoice!'
119
+ if self.instruct is False:
120
+ raise ValueError('{} do not support instruct inference'.format(self.model_dir))
121
+ instruct_text = self.frontend.text_normalize(instruct_text, split=False, text_frontend=text_frontend)
122
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
123
+ model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
124
+ start_time = time.time()
125
+ logging.info('synthesis text {}'.format(i))
126
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
127
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
128
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
129
+ yield model_output
130
+ start_time = time.time()
131
+
132
+ def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed=1.0):
133
+ model_input = self.frontend.frontend_vc(source_speech_16k, prompt_speech_16k, self.sample_rate)
134
+ start_time = time.time()
135
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
136
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
137
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
138
+ yield model_output
139
+ start_time = time.time()
140
+
141
+
142
+ class CosyVoice2(CosyVoice):
143
+
144
+ def __init__(self, model_dir, load_jit=False, load_trt=False, load_vllm=False, fp16=False, trt_concurrent=1):
145
+ self.instruct = True if '-Instruct' in model_dir else False
146
+ self.model_dir = model_dir
147
+ self.fp16 = fp16
148
+ if not os.path.exists(model_dir):
149
+ model_dir = snapshot_download(model_dir)
150
+ hyper_yaml_path = '{}/cosyvoice2.yaml'.format(model_dir)
151
+ if not os.path.exists(hyper_yaml_path):
152
+ raise ValueError('{} not found!'.format(hyper_yaml_path))
153
+ with open(hyper_yaml_path, 'r') as f:
154
+ configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')})
155
+ assert get_model_type(configs) == CosyVoice2Model, 'do not use {} for CosyVoice2 initialization!'.format(model_dir)
156
+ self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
157
+ configs['feat_extractor'],
158
+ '{}/campplus.onnx'.format(model_dir),
159
+ '{}/speech_tokenizer_v2.onnx'.format(model_dir),
160
+ '{}/spk2info.pt'.format(model_dir),
161
+ configs['allowed_special'])
162
+ self.sample_rate = configs['sample_rate']
163
+ if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
164
+ load_jit, load_trt, fp16 = False, False, False
165
+ logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
166
+ self.model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16)
167
+ self.model.load('{}/llm.pt'.format(model_dir),
168
+ '{}/flow.pt'.format(model_dir),
169
+ '{}/hift.pt'.format(model_dir))
170
+ if load_vllm:
171
+ self.model.load_vllm('{}/vllm'.format(model_dir))
172
+ if load_jit:
173
+ self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
174
+ if load_trt:
175
+ self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
176
+ '{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
177
+ trt_concurrent,
178
+ self.fp16)
179
+ del configs
180
+
181
+ def inference_instruct(self, *args, **kwargs):
182
+ raise NotImplementedError('inference_instruct is not implemented for CosyVoice2!')
183
+
184
+ def inference_instruct2(self, tts_text, instruct_text, prompt_speech_16k, zero_shot_spk_id='', stream=False, speed=1.0, text_frontend=True):
185
+ assert isinstance(self.model, CosyVoice2Model), 'inference_instruct2 is only implemented for CosyVoice2!'
186
+ for i in tqdm(self.frontend.text_normalize(tts_text, split=True, text_frontend=text_frontend)):
187
+ model_input = self.frontend.frontend_instruct2(i, instruct_text, prompt_speech_16k, self.sample_rate, zero_shot_spk_id)
188
+ start_time = time.time()
189
+ logging.info('synthesis text {}'.format(i))
190
+ for model_output in self.model.tts(**model_input, stream=stream, speed=speed):
191
+ speech_len = model_output['tts_speech'].shape[1] / self.sample_rate
192
+ logging.info('yield speech len {}, rtf {}'.format(speech_len, (time.time() - start_time) / speech_len))
193
+ yield model_output
194
+ start_time = time.time()
speech/cosyvoice/cli/frontend.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from functools import partial
15
+ from typing import Generator
16
+ import json
17
+ import onnxruntime
18
+ import torch
19
+ import numpy as np
20
+ import whisper
21
+ from typing import Callable
22
+ import torchaudio.compliance.kaldi as kaldi
23
+ import torchaudio
24
+ import os
25
+ import re
26
+ import inflect
27
+ try:
28
+ import ttsfrd
29
+ use_ttsfrd = True
30
+ except ImportError:
31
+ print("failed to import ttsfrd, use wetext instead")
32
+ from wetext import Normalizer as ZhNormalizer
33
+ from wetext import Normalizer as EnNormalizer
34
+ use_ttsfrd = False
35
+ from cosyvoice.utils.file_utils import logging
36
+ from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph, is_only_punctuation
37
+
38
+
39
+ class CosyVoiceFrontEnd:
40
+
41
+ def __init__(self,
42
+ get_tokenizer: Callable,
43
+ feat_extractor: Callable,
44
+ campplus_model: str,
45
+ speech_tokenizer_model: str,
46
+ spk2info: str = '',
47
+ allowed_special: str = 'all'):
48
+ self.tokenizer = get_tokenizer()
49
+ self.feat_extractor = feat_extractor
50
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
51
+ option = onnxruntime.SessionOptions()
52
+ option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
53
+ option.intra_op_num_threads = 1
54
+ self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
55
+ self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option,
56
+ providers=["CUDAExecutionProvider" if torch.cuda.is_available() else
57
+ "CPUExecutionProvider"])
58
+ if os.path.exists(spk2info):
59
+ self.spk2info = torch.load(spk2info, map_location=self.device)
60
+ else:
61
+ self.spk2info = {}
62
+ self.allowed_special = allowed_special
63
+ self.use_ttsfrd = use_ttsfrd
64
+ if self.use_ttsfrd:
65
+ self.frd = ttsfrd.TtsFrontendEngine()
66
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
67
+ assert self.frd.initialize('{}/../../pretrained_models/CosyVoice-ttsfrd/resource'.format(ROOT_DIR)) is True, \
68
+ 'failed to initialize ttsfrd resource'
69
+ self.frd.set_lang_type('pinyinvg')
70
+ else:
71
+ self.zh_tn_model = ZhNormalizer(remove_erhua=False)
72
+ self.en_tn_model = EnNormalizer()
73
+ self.inflect_parser = inflect.engine()
74
+
75
+ def _extract_text_token(self, text):
76
+ if isinstance(text, Generator):
77
+ logging.info('get tts_text generator, will return _extract_text_token_generator!')
78
+ # NOTE add a dummy text_token_len for compatibility
79
+ return self._extract_text_token_generator(text), torch.tensor([0], dtype=torch.int32).to(self.device)
80
+ else:
81
+ text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
82
+ text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
83
+ text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
84
+ return text_token, text_token_len
85
+
86
+ def _extract_text_token_generator(self, text_generator):
87
+ for text in text_generator:
88
+ text_token, _ = self._extract_text_token(text)
89
+ for i in range(text_token.shape[1]):
90
+ yield text_token[:, i: i + 1]
91
+
92
+ def _extract_speech_token(self, speech):
93
+ assert speech.shape[1] / 16000 <= 30, 'do not support extract speech token for audio longer than 30s'
94
+ feat = whisper.log_mel_spectrogram(speech, n_mels=128)
95
+ speech_token = self.speech_tokenizer_session.run(None,
96
+ {self.speech_tokenizer_session.get_inputs()[0].name:
97
+ feat.detach().cpu().numpy(),
98
+ self.speech_tokenizer_session.get_inputs()[1].name:
99
+ np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
100
+ speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
101
+ speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
102
+ return speech_token, speech_token_len
103
+
104
+ def _extract_spk_embedding(self, speech):
105
+ feat = kaldi.fbank(speech,
106
+ num_mel_bins=80,
107
+ dither=0,
108
+ sample_frequency=16000)
109
+ feat = feat - feat.mean(dim=0, keepdim=True)
110
+ embedding = self.campplus_session.run(None,
111
+ {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
112
+ embedding = torch.tensor([embedding]).to(self.device)
113
+ return embedding
114
+
115
+ def _extract_speech_feat(self, speech):
116
+ speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
117
+ speech_feat = speech_feat.unsqueeze(dim=0)
118
+ speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
119
+ return speech_feat, speech_feat_len
120
+
121
+ def text_normalize(self, text, split=True, text_frontend=True):
122
+ if isinstance(text, Generator):
123
+ logging.info('get tts_text generator, will skip text_normalize!')
124
+ return [text]
125
+ if text_frontend is False or text == '':
126
+ return [text] if split is True else text
127
+ text = text.strip()
128
+ if self.use_ttsfrd:
129
+ texts = [i["text"] for i in json.loads(self.frd.do_voicegen_frd(text))["sentences"]]
130
+ text = ''.join(texts)
131
+ else:
132
+ if contains_chinese(text):
133
+ text = self.zh_tn_model.normalize(text)
134
+ text = text.replace("\n", "")
135
+ text = replace_blank(text)
136
+ text = replace_corner_mark(text)
137
+ text = text.replace(".", "。")
138
+ text = text.replace(" - ", ",")
139
+ text = remove_bracket(text)
140
+ text = re.sub(r'[,,、]+$', '。', text)
141
+ texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
142
+ token_min_n=60, merge_len=20, comma_split=False))
143
+ else:
144
+ text = self.en_tn_model.normalize(text)
145
+ text = spell_out_number(text, self.inflect_parser)
146
+ texts = list(split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
147
+ token_min_n=60, merge_len=20, comma_split=False))
148
+ texts = [i for i in texts if not is_only_punctuation(i)]
149
+ return texts if split is True else text
150
+
151
+ def frontend_sft(self, tts_text, spk_id):
152
+ tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
153
+ embedding = self.spk2info[spk_id]['embedding']
154
+ model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
155
+ return model_input
156
+
157
+ def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
158
+ tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
159
+ if zero_shot_spk_id == '':
160
+ prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
161
+ prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
162
+ speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
163
+ speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k)
164
+ if resample_rate == 24000:
165
+ # cosyvoice2, force speech_feat % speech_token = 2
166
+ token_len = min(int(speech_feat.shape[1] / 2), speech_token.shape[1])
167
+ speech_feat, speech_feat_len[:] = speech_feat[:, :2 * token_len], 2 * token_len
168
+ speech_token, speech_token_len[:] = speech_token[:, :token_len], token_len
169
+ embedding = self._extract_spk_embedding(prompt_speech_16k)
170
+ model_input = {'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
171
+ 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
172
+ 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
173
+ 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
174
+ 'llm_embedding': embedding, 'flow_embedding': embedding}
175
+ else:
176
+ model_input = self.spk2info[zero_shot_spk_id]
177
+ model_input['text'] = tts_text_token
178
+ model_input['text_len'] = tts_text_token_len
179
+ return model_input
180
+
181
+ def frontend_cross_lingual(self, tts_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
182
+ model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k, resample_rate, zero_shot_spk_id)
183
+ # in cross lingual mode, we remove prompt in llm
184
+ del model_input['prompt_text']
185
+ del model_input['prompt_text_len']
186
+ del model_input['llm_prompt_speech_token']
187
+ del model_input['llm_prompt_speech_token_len']
188
+ return model_input
189
+
190
+ def frontend_instruct(self, tts_text, spk_id, instruct_text):
191
+ model_input = self.frontend_sft(tts_text, spk_id)
192
+ # in instruct mode, we remove spk_embedding in llm due to information leakage
193
+ del model_input['llm_embedding']
194
+ instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
195
+ model_input['prompt_text'] = instruct_text_token
196
+ model_input['prompt_text_len'] = instruct_text_token_len
197
+ return model_input
198
+
199
+ def frontend_instruct2(self, tts_text, instruct_text, prompt_speech_16k, resample_rate, zero_shot_spk_id):
200
+ model_input = self.frontend_zero_shot(tts_text, instruct_text + '<|endofprompt|>', prompt_speech_16k, resample_rate, zero_shot_spk_id)
201
+ del model_input['llm_prompt_speech_token']
202
+ del model_input['llm_prompt_speech_token_len']
203
+ return model_input
204
+
205
+ def frontend_vc(self, source_speech_16k, prompt_speech_16k, resample_rate):
206
+ prompt_speech_token, prompt_speech_token_len = self._extract_speech_token(prompt_speech_16k)
207
+ prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=resample_rate)(prompt_speech_16k)
208
+ prompt_speech_feat, prompt_speech_feat_len = self._extract_speech_feat(prompt_speech_resample)
209
+ embedding = self._extract_spk_embedding(prompt_speech_16k)
210
+ source_speech_token, source_speech_token_len = self._extract_speech_token(source_speech_16k)
211
+ model_input = {'source_speech_token': source_speech_token, 'source_speech_token_len': source_speech_token_len,
212
+ 'flow_prompt_speech_token': prompt_speech_token, 'flow_prompt_speech_token_len': prompt_speech_token_len,
213
+ 'prompt_speech_feat': prompt_speech_feat, 'prompt_speech_feat_len': prompt_speech_feat_len,
214
+ 'flow_embedding': embedding}
215
+ return model_input
speech/cosyvoice/cli/model.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ # 2025 Alibaba Inc (authors: Xiang Lyu, Bofan Zhou)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from typing import Generator
17
+ import torch
18
+ import numpy as np
19
+ import threading
20
+ import time
21
+ from torch.nn import functional as F
22
+ from contextlib import nullcontext
23
+ import uuid
24
+ from cosyvoice.utils.common import fade_in_out
25
+ from cosyvoice.utils.file_utils import convert_onnx_to_trt, export_cosyvoice2_vllm
26
+ from cosyvoice.utils.common import TrtContextWrapper
27
+
28
+
29
+ class CosyVoiceModel:
30
+
31
+ def __init__(self,
32
+ llm: torch.nn.Module,
33
+ flow: torch.nn.Module,
34
+ hift: torch.nn.Module,
35
+ fp16: bool = False):
36
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
+ self.llm = llm
38
+ self.flow = flow
39
+ self.hift = hift
40
+ self.fp16 = fp16
41
+ if self.fp16 is True:
42
+ self.llm.half()
43
+ self.flow.half()
44
+ self.token_min_hop_len = 2 * self.flow.input_frame_rate
45
+ self.token_max_hop_len = 4 * self.flow.input_frame_rate
46
+ self.token_overlap_len = 20
47
+ # mel fade in out
48
+ self.mel_overlap_len = int(self.token_overlap_len / self.flow.input_frame_rate * 22050 / 256)
49
+ self.mel_window = np.hamming(2 * self.mel_overlap_len)
50
+ # hift cache
51
+ self.mel_cache_len = 20
52
+ self.source_cache_len = int(self.mel_cache_len * 256)
53
+ # speech fade in out
54
+ self.speech_window = np.hamming(2 * self.source_cache_len)
55
+ # rtf and decoding related
56
+ self.stream_scale_factor = 1
57
+ assert self.stream_scale_factor >= 1, 'stream_scale_factor should be greater than 1, change it according to your actual rtf'
58
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
59
+ self.lock = threading.Lock()
60
+ # dict used to store session related variable
61
+ self.tts_speech_token_dict = {}
62
+ self.llm_end_dict = {}
63
+ self.mel_overlap_dict = {}
64
+ self.flow_cache_dict = {}
65
+ self.hift_cache_dict = {}
66
+
67
+ def load(self, llm_model, flow_model, hift_model):
68
+ self.llm.load_state_dict(torch.load(llm_model, map_location=self.device), strict=True)
69
+ self.llm.to(self.device).eval()
70
+ self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
71
+ self.flow.to(self.device).eval()
72
+ # in case hift_model is a hifigan model
73
+ hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
74
+ self.hift.load_state_dict(hift_state_dict, strict=True)
75
+ self.hift.to(self.device).eval()
76
+
77
+ def load_jit(self, llm_text_encoder_model, llm_llm_model, flow_encoder_model):
78
+ llm_text_encoder = torch.jit.load(llm_text_encoder_model, map_location=self.device)
79
+ self.llm.text_encoder = llm_text_encoder
80
+ llm_llm = torch.jit.load(llm_llm_model, map_location=self.device)
81
+ self.llm.llm = llm_llm
82
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
83
+ self.flow.encoder = flow_encoder
84
+
85
+ def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, trt_concurrent, fp16):
86
+ assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
87
+ if not os.path.exists(flow_decoder_estimator_model) or os.path.getsize(flow_decoder_estimator_model) == 0:
88
+ convert_onnx_to_trt(flow_decoder_estimator_model, self.get_trt_kwargs(), flow_decoder_onnx_model, fp16)
89
+ del self.flow.decoder.estimator
90
+ import tensorrt as trt
91
+ with open(flow_decoder_estimator_model, 'rb') as f:
92
+ estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
93
+ assert estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
94
+ self.flow.decoder.estimator = TrtContextWrapper(estimator_engine, trt_concurrent=trt_concurrent, device=self.device)
95
+
96
+ def get_trt_kwargs(self):
97
+ min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4)]
98
+ opt_shape = [(2, 80, 500), (2, 1, 500), (2, 80, 500), (2, 80, 500)]
99
+ max_shape = [(2, 80, 3000), (2, 1, 3000), (2, 80, 3000), (2, 80, 3000)]
100
+ input_names = ["x", "mask", "mu", "cond"]
101
+ return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
102
+
103
+ def llm_job(self, text, prompt_text, llm_prompt_speech_token, llm_embedding, uuid):
104
+ with self.llm_context, torch.cuda.amp.autocast(self.fp16 is True and hasattr(self.llm, 'vllm') is False):
105
+ if isinstance(text, Generator):
106
+ assert isinstance(self, CosyVoice2Model) and not hasattr(self.llm, 'vllm'), 'streaming input text is only implemented for CosyVoice2 and do not support vllm!'
107
+ for i in self.llm.inference_bistream(text=text,
108
+ prompt_text=prompt_text.to(self.device),
109
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
110
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
111
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
112
+ embedding=llm_embedding.to(self.device)):
113
+ self.tts_speech_token_dict[uuid].append(i)
114
+ else:
115
+ for i in self.llm.inference(text=text.to(self.device),
116
+ text_len=torch.tensor([text.shape[1]], dtype=torch.int32).to(self.device),
117
+ prompt_text=prompt_text.to(self.device),
118
+ prompt_text_len=torch.tensor([prompt_text.shape[1]], dtype=torch.int32).to(self.device),
119
+ prompt_speech_token=llm_prompt_speech_token.to(self.device),
120
+ prompt_speech_token_len=torch.tensor([llm_prompt_speech_token.shape[1]], dtype=torch.int32).to(self.device),
121
+ embedding=llm_embedding.to(self.device),
122
+ uuid=uuid):
123
+ self.tts_speech_token_dict[uuid].append(i)
124
+ self.llm_end_dict[uuid] = True
125
+
126
+ def vc_job(self, source_speech_token, uuid):
127
+ self.tts_speech_token_dict[uuid] = source_speech_token.flatten().tolist()
128
+ self.llm_end_dict[uuid] = True
129
+
130
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, uuid, finalize=False, speed=1.0):
131
+ with torch.cuda.amp.autocast(self.fp16):
132
+ tts_mel, self.flow_cache_dict[uuid] = self.flow.inference(token=token.to(self.device),
133
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
134
+ prompt_token=prompt_token.to(self.device),
135
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
136
+ prompt_feat=prompt_feat.to(self.device),
137
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
138
+ embedding=embedding.to(self.device),
139
+ flow_cache=self.flow_cache_dict[uuid])
140
+
141
+ # mel overlap fade in out
142
+ if self.mel_overlap_dict[uuid].shape[2] != 0:
143
+ tts_mel = fade_in_out(tts_mel, self.mel_overlap_dict[uuid], self.mel_window)
144
+ # append hift cache
145
+ if self.hift_cache_dict[uuid] is not None:
146
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
147
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
148
+ else:
149
+ hift_cache_source = torch.zeros(1, 1, 0)
150
+ # keep overlap mel and hift cache
151
+ if finalize is False:
152
+ self.mel_overlap_dict[uuid] = tts_mel[:, :, -self.mel_overlap_len:]
153
+ tts_mel = tts_mel[:, :, :-self.mel_overlap_len]
154
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
155
+ if self.hift_cache_dict[uuid] is not None:
156
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
157
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
158
+ 'source': tts_source[:, :, -self.source_cache_len:],
159
+ 'speech': tts_speech[:, -self.source_cache_len:]}
160
+ tts_speech = tts_speech[:, :-self.source_cache_len]
161
+ else:
162
+ if speed != 1.0:
163
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
164
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
165
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
166
+ if self.hift_cache_dict[uuid] is not None:
167
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
168
+ return tts_speech
169
+
170
+ def tts(self, text=torch.zeros(1, 0, dtype=torch.int32), flow_embedding=torch.zeros(0, 192), llm_embedding=torch.zeros(0, 192),
171
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
172
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
173
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
174
+ prompt_speech_feat=torch.zeros(1, 0, 80), source_speech_token=torch.zeros(1, 0, dtype=torch.int32), stream=False, speed=1.0, **kwargs):
175
+ # this_uuid is used to track variables related to this inference thread
176
+ this_uuid = str(uuid.uuid1())
177
+ with self.lock:
178
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
179
+ self.hift_cache_dict[this_uuid] = None
180
+ self.mel_overlap_dict[this_uuid] = torch.zeros(1, 80, 0)
181
+ self.flow_cache_dict[this_uuid] = torch.zeros(1, 80, 0, 2)
182
+ if source_speech_token.shape[1] == 0:
183
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
184
+ else:
185
+ p = threading.Thread(target=self.vc_job, args=(source_speech_token, this_uuid))
186
+ p.start()
187
+ if stream is True:
188
+ token_hop_len = self.token_min_hop_len
189
+ while True:
190
+ time.sleep(0.1)
191
+ if len(self.tts_speech_token_dict[this_uuid]) >= token_hop_len + self.token_overlap_len:
192
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_hop_len + self.token_overlap_len]) \
193
+ .unsqueeze(dim=0)
194
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
195
+ prompt_token=flow_prompt_speech_token,
196
+ prompt_feat=prompt_speech_feat,
197
+ embedding=flow_embedding,
198
+ uuid=this_uuid,
199
+ finalize=False)
200
+ yield {'tts_speech': this_tts_speech.cpu()}
201
+ with self.lock:
202
+ self.tts_speech_token_dict[this_uuid] = self.tts_speech_token_dict[this_uuid][token_hop_len:]
203
+ # increase token_hop_len for better speech quality
204
+ token_hop_len = min(self.token_max_hop_len, int(token_hop_len * self.stream_scale_factor))
205
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) < token_hop_len + self.token_overlap_len:
206
+ break
207
+ p.join()
208
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
209
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
210
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
211
+ prompt_token=flow_prompt_speech_token,
212
+ prompt_feat=prompt_speech_feat,
213
+ embedding=flow_embedding,
214
+ uuid=this_uuid,
215
+ finalize=True)
216
+ yield {'tts_speech': this_tts_speech.cpu()}
217
+ else:
218
+ # deal with all tokens
219
+ p.join()
220
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
221
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
222
+ prompt_token=flow_prompt_speech_token,
223
+ prompt_feat=prompt_speech_feat,
224
+ embedding=flow_embedding,
225
+ uuid=this_uuid,
226
+ finalize=True,
227
+ speed=speed)
228
+ yield {'tts_speech': this_tts_speech.cpu()}
229
+ with self.lock:
230
+ self.tts_speech_token_dict.pop(this_uuid)
231
+ self.llm_end_dict.pop(this_uuid)
232
+ self.mel_overlap_dict.pop(this_uuid)
233
+ self.hift_cache_dict.pop(this_uuid)
234
+ self.flow_cache_dict.pop(this_uuid)
235
+ if torch.cuda.is_available():
236
+ torch.cuda.empty_cache()
237
+ torch.cuda.current_stream().synchronize()
238
+
239
+
240
+ class CosyVoice2Model(CosyVoiceModel):
241
+
242
+ def __init__(self,
243
+ llm: torch.nn.Module,
244
+ flow: torch.nn.Module,
245
+ hift: torch.nn.Module,
246
+ fp16: bool = False):
247
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
248
+ self.llm = llm
249
+ self.flow = flow
250
+ self.hift = hift
251
+ self.fp16 = fp16
252
+ if self.fp16 is True:
253
+ self.llm.half()
254
+ self.flow.half()
255
+ # NOTE must matching training static_chunk_size
256
+ self.token_hop_len = 25
257
+ # hift cache
258
+ self.mel_cache_len = 8
259
+ self.source_cache_len = int(self.mel_cache_len * 480)
260
+ # speech fade in out
261
+ self.speech_window = np.hamming(2 * self.source_cache_len)
262
+ # rtf and decoding related
263
+ self.llm_context = torch.cuda.stream(torch.cuda.Stream(self.device)) if torch.cuda.is_available() else nullcontext()
264
+ self.lock = threading.Lock()
265
+ # dict used to store session related variable
266
+ self.tts_speech_token_dict = {}
267
+ self.llm_end_dict = {}
268
+ self.hift_cache_dict = {}
269
+
270
+ def load_jit(self, flow_encoder_model):
271
+ flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
272
+ self.flow.encoder = flow_encoder
273
+
274
+ def load_vllm(self, model_dir):
275
+ export_cosyvoice2_vllm(self.llm, model_dir, self.device)
276
+ from vllm import EngineArgs, LLMEngine
277
+ engine_args = EngineArgs(model=model_dir,
278
+ skip_tokenizer_init=True,
279
+ enable_prompt_embeds=True,
280
+ gpu_memory_utilization=0.2)
281
+ self.llm.vllm = LLMEngine.from_engine_args(engine_args)
282
+ self.llm.lock = threading.Lock()
283
+ del self.llm.llm.model.model.layers
284
+
285
+ def token2wav(self, token, prompt_token, prompt_feat, embedding, token_offset, uuid, stream=False, finalize=False, speed=1.0):
286
+ with torch.cuda.amp.autocast(self.fp16):
287
+ tts_mel, _ = self.flow.inference(token=token.to(self.device),
288
+ token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
289
+ prompt_token=prompt_token.to(self.device),
290
+ prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
291
+ prompt_feat=prompt_feat.to(self.device),
292
+ prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
293
+ embedding=embedding.to(self.device),
294
+ streaming=stream,
295
+ finalize=finalize)
296
+ tts_mel = tts_mel[:, :, token_offset * self.flow.token_mel_ratio:]
297
+ # append hift cache
298
+ if self.hift_cache_dict[uuid] is not None:
299
+ hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
300
+ tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
301
+ else:
302
+ hift_cache_source = torch.zeros(1, 1, 0)
303
+ # keep overlap mel and hift cache
304
+ if finalize is False:
305
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
306
+ if self.hift_cache_dict[uuid] is not None:
307
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
308
+ self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
309
+ 'source': tts_source[:, :, -self.source_cache_len:],
310
+ 'speech': tts_speech[:, -self.source_cache_len:]}
311
+ tts_speech = tts_speech[:, :-self.source_cache_len]
312
+ else:
313
+ if speed != 1.0:
314
+ assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
315
+ tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
316
+ tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
317
+ if self.hift_cache_dict[uuid] is not None:
318
+ tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
319
+ return tts_speech
320
+
321
+ def tts(self, text=torch.zeros(1, 0, dtype=torch.int32), flow_embedding=torch.zeros(0, 192), llm_embedding=torch.zeros(0, 192),
322
+ prompt_text=torch.zeros(1, 0, dtype=torch.int32),
323
+ llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
324
+ flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32),
325
+ prompt_speech_feat=torch.zeros(1, 0, 80), source_speech_token=torch.zeros(1, 0, dtype=torch.int32), stream=False, speed=1.0, **kwargs):
326
+ # this_uuid is used to track variables related to this inference thread
327
+ this_uuid = str(uuid.uuid1())
328
+ with self.lock:
329
+ self.tts_speech_token_dict[this_uuid], self.llm_end_dict[this_uuid] = [], False
330
+ self.hift_cache_dict[this_uuid] = None
331
+ if source_speech_token.shape[1] == 0:
332
+ p = threading.Thread(target=self.llm_job, args=(text, prompt_text, llm_prompt_speech_token, llm_embedding, this_uuid))
333
+ else:
334
+ p = threading.Thread(target=self.vc_job, args=(source_speech_token, this_uuid))
335
+ p.start()
336
+ if stream is True:
337
+ token_offset = 0
338
+ prompt_token_pad = int(np.ceil(flow_prompt_speech_token.shape[1] / self.token_hop_len) * self.token_hop_len - flow_prompt_speech_token.shape[1])
339
+ while True:
340
+ time.sleep(0.1)
341
+ this_token_hop_len = self.token_hop_len + prompt_token_pad if token_offset == 0 else self.token_hop_len
342
+ if len(self.tts_speech_token_dict[this_uuid]) - token_offset >= this_token_hop_len + self.flow.pre_lookahead_len:
343
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid][:token_offset + this_token_hop_len + self.flow.pre_lookahead_len]).unsqueeze(dim=0)
344
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
345
+ prompt_token=flow_prompt_speech_token,
346
+ prompt_feat=prompt_speech_feat,
347
+ embedding=flow_embedding,
348
+ token_offset=token_offset,
349
+ uuid=this_uuid,
350
+ stream=stream,
351
+ finalize=False)
352
+ token_offset += this_token_hop_len
353
+ yield {'tts_speech': this_tts_speech.cpu()}
354
+ if self.llm_end_dict[this_uuid] is True and len(self.tts_speech_token_dict[this_uuid]) - token_offset < this_token_hop_len + self.flow.pre_lookahead_len:
355
+ break
356
+ p.join()
357
+ # deal with remain tokens, make sure inference remain token len equals token_hop_len when cache_speech is not None
358
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
359
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
360
+ prompt_token=flow_prompt_speech_token,
361
+ prompt_feat=prompt_speech_feat,
362
+ embedding=flow_embedding,
363
+ token_offset=token_offset,
364
+ uuid=this_uuid,
365
+ finalize=True)
366
+ yield {'tts_speech': this_tts_speech.cpu()}
367
+ else:
368
+ # deal with all tokens
369
+ p.join()
370
+ this_tts_speech_token = torch.tensor(self.tts_speech_token_dict[this_uuid]).unsqueeze(dim=0)
371
+ this_tts_speech = self.token2wav(token=this_tts_speech_token,
372
+ prompt_token=flow_prompt_speech_token,
373
+ prompt_feat=prompt_speech_feat,
374
+ embedding=flow_embedding,
375
+ token_offset=0,
376
+ uuid=this_uuid,
377
+ finalize=True,
378
+ speed=speed)
379
+ yield {'tts_speech': this_tts_speech.cpu()}
380
+ with self.lock:
381
+ self.tts_speech_token_dict.pop(this_uuid)
382
+ self.llm_end_dict.pop(this_uuid)
383
+ self.hift_cache_dict.pop(this_uuid)
384
+ if torch.cuda.is_available():
385
+ torch.cuda.empty_cache()
386
+ torch.cuda.current_stream().synchronize()
speech/cosyvoice/dataset/__init__.py ADDED
File without changes
speech/cosyvoice/dataset/dataset.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import random
17
+ import math
18
+ from functools import partial
19
+
20
+ import torch
21
+ import torch.distributed as dist
22
+ from torch.utils.data import IterableDataset
23
+ from cosyvoice.utils.file_utils import read_lists
24
+
25
+
26
+ class Processor(IterableDataset):
27
+
28
+ def __init__(self, source, f, *args, **kw):
29
+ assert callable(f)
30
+ self.source = source
31
+ self.f = f
32
+ self.args = args
33
+ self.kw = kw
34
+
35
+ def set_epoch(self, epoch):
36
+ self.source.set_epoch(epoch)
37
+
38
+ def __iter__(self):
39
+ """ Return an iterator over the source dataset processed by the
40
+ given processor.
41
+ """
42
+ assert self.source is not None
43
+ assert callable(self.f)
44
+ return self.f(iter(self.source), *self.args, **self.kw)
45
+
46
+ def apply(self, f):
47
+ assert callable(f)
48
+ return Processor(self, f, *self.args, **self.kw)
49
+
50
+
51
+ class DistributedSampler:
52
+
53
+ def __init__(self, shuffle=True, partition=True):
54
+ self.epoch = -1
55
+ self.update()
56
+ self.shuffle = shuffle
57
+ self.partition = partition
58
+
59
+ def update(self):
60
+ assert dist.is_available()
61
+ if dist.is_initialized():
62
+ self.rank = dist.get_rank()
63
+ self.world_size = dist.get_world_size()
64
+ else:
65
+ self.rank = 0
66
+ self.world_size = 1
67
+ worker_info = torch.utils.data.get_worker_info()
68
+ if worker_info is None:
69
+ self.worker_id = 0
70
+ self.num_workers = 1
71
+ else:
72
+ self.worker_id = worker_info.id
73
+ self.num_workers = worker_info.num_workers
74
+ return dict(rank=self.rank,
75
+ world_size=self.world_size,
76
+ worker_id=self.worker_id,
77
+ num_workers=self.num_workers)
78
+
79
+ def set_epoch(self, epoch):
80
+ self.epoch = epoch
81
+
82
+ def sample(self, data):
83
+ """ Sample data according to rank/world_size/num_workers
84
+
85
+ Args:
86
+ data(List): input data list
87
+
88
+ Returns:
89
+ List: data list after sample
90
+ """
91
+ data = list(range(len(data)))
92
+ # force datalist even
93
+ if self.partition:
94
+ if self.shuffle:
95
+ random.Random(self.epoch).shuffle(data)
96
+ if len(data) < self.world_size:
97
+ data = data * math.ceil(self.world_size / len(data))
98
+ data = data[:self.world_size]
99
+ data = data[self.rank::self.world_size]
100
+ if len(data) < self.num_workers:
101
+ data = data * math.ceil(self.num_workers / len(data))
102
+ data = data[:self.num_workers]
103
+ data = data[self.worker_id::self.num_workers]
104
+ return data
105
+
106
+
107
+ class DataList(IterableDataset):
108
+
109
+ def __init__(self, lists, shuffle=True, partition=True):
110
+ self.lists = lists
111
+ self.sampler = DistributedSampler(shuffle, partition)
112
+
113
+ def set_epoch(self, epoch):
114
+ self.sampler.set_epoch(epoch)
115
+
116
+ def __iter__(self):
117
+ sampler_info = self.sampler.update()
118
+ indexes = self.sampler.sample(self.lists)
119
+ for index in indexes:
120
+ data = dict(src=self.lists[index])
121
+ data.update(sampler_info)
122
+ yield data
123
+
124
+
125
+ def Dataset(data_list_file,
126
+ data_pipeline,
127
+ mode='train',
128
+ gan=False,
129
+ dpo=False,
130
+ shuffle=True,
131
+ partition=True):
132
+ """ Construct dataset from arguments
133
+
134
+ We have two shuffle stage in the Dataset. The first is global
135
+ shuffle at shards tar/raw file level. The second is global shuffle
136
+ at training samples level.
137
+
138
+ Args:
139
+ data_type(str): raw/shard
140
+ tokenizer (BaseTokenizer): tokenizer to tokenize
141
+ partition(bool): whether to do data partition in terms of rank
142
+ """
143
+ lists = read_lists(data_list_file)
144
+ dataset = DataList(lists,
145
+ shuffle=shuffle,
146
+ partition=partition)
147
+ # map partial arg to padding func
148
+ data_pipeline[-1] = partial(data_pipeline[-1], gan=gan, dpo=dpo)
149
+ for func in data_pipeline:
150
+ dataset = Processor(dataset, func, mode=mode)
151
+ return dataset
speech/cosyvoice/dataset/processor.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ import random
16
+
17
+ import pyarrow.parquet as pq
18
+ from io import BytesIO
19
+ import torch
20
+ import torchaudio
21
+ from torch.nn.utils.rnn import pad_sequence
22
+ import torch.nn.functional as F
23
+ import pyworld as pw
24
+
25
+
26
+ AUDIO_FORMAT_SETS = {'flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'}
27
+
28
+
29
+ def parquet_opener(data, mode='train', tts_data={}):
30
+ """ Give url or local file, return file descriptor
31
+ Inplace operation.
32
+
33
+ Args:
34
+ data(Iterable[str]): url or local file list
35
+
36
+ Returns:
37
+ Iterable[{src, stream}]
38
+ """
39
+ for sample in data:
40
+ assert 'src' in sample
41
+ url = sample['src']
42
+ try:
43
+ for df in pq.ParquetFile(url).iter_batches(batch_size=64):
44
+ df = df.to_pandas()
45
+ for i in range(len(df)):
46
+ sample.update(dict(df.loc[i]))
47
+ if mode == 'train':
48
+ # NOTE do not return sample directly, must initialize a new dict
49
+ yield {**sample}
50
+ else:
51
+ for index, text in enumerate(tts_data[df.loc[i, 'utt']]):
52
+ yield {**sample, 'tts_index': index, 'tts_text': text}
53
+ except Exception as ex:
54
+ logging.warning('Failed to open {}, ex info {}'.format(url, ex))
55
+
56
+
57
+ def filter(data,
58
+ max_length=10240,
59
+ min_length=10,
60
+ token_max_length=200,
61
+ token_min_length=1,
62
+ min_output_input_ratio=0.0005,
63
+ max_output_input_ratio=1,
64
+ mode='train'):
65
+ """ Filter sample according to feature and label length
66
+ Inplace operation.
67
+
68
+ Args::
69
+ data: Iterable[{key, wav, label, sample_rate}]
70
+ max_length: drop utterance which is greater than max_length(10ms)
71
+ min_length: drop utterance which is less than min_length(10ms)
72
+ token_max_length: drop utterance which is greater than
73
+ token_max_length, especially when use char unit for
74
+ english modeling
75
+ token_min_length: drop utterance which is
76
+ less than token_max_length
77
+ min_output_input_ratio: minimal ration of
78
+ token_length / feats_length(10ms)
79
+ max_output_input_ratio: maximum ration of
80
+ token_length / feats_length(10ms)
81
+
82
+ Returns:
83
+ Iterable[{key, wav, label, sample_rate}]
84
+ """
85
+ for sample in data:
86
+ sample['speech'], sample['sample_rate'] = torchaudio.load(BytesIO(sample['audio_data']))
87
+ sample['speech'] = sample['speech'].mean(dim=0, keepdim=True)
88
+ del sample['audio_data']
89
+ # sample['wav'] is torch.Tensor, we have 100 frames every second
90
+ num_frames = sample['speech'].size(1) / sample['sample_rate'] * 100
91
+ if num_frames < min_length:
92
+ continue
93
+ if num_frames > max_length:
94
+ continue
95
+ if len(sample['text_token']) < token_min_length:
96
+ continue
97
+ if len(sample['text_token']) > token_max_length:
98
+ continue
99
+ if len(sample['speech_token']) == 0:
100
+ continue
101
+ if 'reject_speech_token' in sample and len(sample['reject_speech_token']) == 0:
102
+ continue
103
+ if num_frames != 0:
104
+ if len(sample['text_token']) / num_frames < min_output_input_ratio:
105
+ continue
106
+ if len(sample['text_token']) / num_frames > max_output_input_ratio:
107
+ continue
108
+ yield sample
109
+
110
+
111
+ def resample(data, resample_rate=22050, min_sample_rate=16000, mode='train'):
112
+ """ Resample data.
113
+ Inplace operation.
114
+
115
+ Args:
116
+ data: Iterable[{key, wav, label, sample_rate}]
117
+ resample_rate: target resample rate
118
+
119
+ Returns:
120
+ Iterable[{key, wav, label, sample_rate}]
121
+ """
122
+ for sample in data:
123
+ assert 'sample_rate' in sample
124
+ assert 'speech' in sample
125
+ sample_rate = sample['sample_rate']
126
+ waveform = sample['speech']
127
+ if sample_rate != resample_rate:
128
+ if sample_rate < min_sample_rate:
129
+ continue
130
+ sample['sample_rate'] = resample_rate
131
+ sample['speech'] = torchaudio.transforms.Resample(
132
+ orig_freq=sample_rate, new_freq=resample_rate)(waveform)
133
+ max_val = sample['speech'].abs().max()
134
+ if max_val > 1:
135
+ sample['speech'] /= max_val
136
+ yield sample
137
+
138
+
139
+ def truncate(data, truncate_length=24576, mode='train'):
140
+ """ Truncate data.
141
+
142
+ Args:
143
+ data: Iterable[{key, wav, label, sample_rate}]
144
+ truncate_length: truncate length
145
+
146
+ Returns:
147
+ Iterable[{key, wav, label, sample_rate}]
148
+ """
149
+ for sample in data:
150
+ waveform = sample['speech']
151
+ if waveform.shape[1] > truncate_length:
152
+ start = random.randint(0, waveform.shape[1] - truncate_length)
153
+ waveform = waveform[:, start: start + truncate_length]
154
+ else:
155
+ waveform = torch.concat([waveform, torch.zeros(1, truncate_length - waveform.shape[1])], dim=1)
156
+ sample['speech'] = waveform
157
+ yield sample
158
+
159
+
160
+ def compute_fbank(data,
161
+ feat_extractor,
162
+ token_mel_ratio=0,
163
+ mode='train'):
164
+ """ Extract fbank
165
+
166
+ Args:
167
+ data: Iterable[{key, wav, label, sample_rate}]
168
+
169
+ Returns:
170
+ Iterable[{key, feat, label}]
171
+ """
172
+ for sample in data:
173
+ assert 'sample_rate' in sample
174
+ assert 'speech' in sample
175
+ assert 'utt' in sample
176
+ assert 'text_token' in sample
177
+ waveform = sample['speech']
178
+ feat = feat_extractor(waveform).squeeze(dim=0).transpose(0, 1)
179
+ if token_mel_ratio != 0:
180
+ # trim to align speech_token and speech_feat
181
+ token_len = int(min(feat.shape[0] / token_mel_ratio, sample["speech_token"].shape[0]))
182
+ feat = feat[:token_mel_ratio * token_len]
183
+ sample["speech_token"] = sample["speech_token"][:token_len]
184
+ sample['speech_feat'] = feat
185
+ yield sample
186
+
187
+
188
+ def compute_f0(data, sample_rate, hop_size, mode='train'):
189
+ """ Extract f0
190
+
191
+ Args:
192
+ data: Iterable[{key, wav, label, sample_rate}]
193
+
194
+ Returns:
195
+ Iterable[{key, feat, label}]
196
+ """
197
+ frame_period = hop_size * 1000 / sample_rate
198
+ for sample in data:
199
+ assert 'sample_rate' in sample
200
+ assert 'speech' in sample
201
+ assert 'utt' in sample
202
+ assert 'text_token' in sample
203
+ waveform = sample['speech']
204
+ _f0, t = pw.harvest(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period)
205
+ if sum(_f0 != 0) < 5: # this happens when the algorithm fails
206
+ _f0, t = pw.dio(waveform.squeeze(dim=0).numpy().astype('double'), sample_rate, frame_period=frame_period) # if harvest fails, try dio
207
+ f0 = pw.stonemask(waveform.squeeze(dim=0).numpy().astype('double'), _f0, t, sample_rate)
208
+ f0 = F.interpolate(torch.from_numpy(f0).view(1, 1, -1), size=sample['speech_feat'].shape[0], mode='linear').view(-1)
209
+ sample['pitch_feat'] = f0
210
+ yield sample
211
+
212
+
213
+ def parse_embedding(data, normalize, mode='train'):
214
+ """ Parse utt_embedding/spk_embedding
215
+
216
+ Args:
217
+ data: Iterable[{key, wav, label, sample_rate}]
218
+
219
+ Returns:
220
+ Iterable[{key, feat, label}]
221
+ """
222
+ for sample in data:
223
+ sample['utt_embedding'] = torch.tensor(sample['utt_embedding'], dtype=torch.float32)
224
+ sample['spk_embedding'] = torch.tensor(sample['spk_embedding'], dtype=torch.float32)
225
+ if normalize:
226
+ sample['utt_embedding'] = F.normalize(sample['utt_embedding'], dim=0)
227
+ sample['spk_embedding'] = F.normalize(sample['spk_embedding'], dim=0)
228
+ yield sample
229
+
230
+
231
+ def tokenize(data, get_tokenizer, allowed_special, mode='train'):
232
+ """ Decode text to chars or BPE
233
+ Inplace operation
234
+
235
+ Args:
236
+ data: Iterable[{key, wav, txt, sample_rate}]
237
+
238
+ Returns:
239
+ Iterable[{key, wav, txt, tokens, label, sample_rate}]
240
+ """
241
+ tokenizer = get_tokenizer()
242
+ for sample in data:
243
+ assert 'text' in sample
244
+ sample['text_token'] = tokenizer.encode(sample['text'], allowed_special=allowed_special)
245
+ yield sample
246
+
247
+
248
+ def shuffle(data, shuffle_size=10000, mode='train'):
249
+ """ Local shuffle the data
250
+
251
+ Args:
252
+ data: Iterable[{key, feat, label}]
253
+ shuffle_size: buffer size for shuffle
254
+
255
+ Returns:
256
+ Iterable[{key, feat, label}]
257
+ """
258
+ buf = []
259
+ for sample in data:
260
+ buf.append(sample)
261
+ if len(buf) >= shuffle_size:
262
+ random.shuffle(buf)
263
+ for x in buf:
264
+ yield x
265
+ buf = []
266
+ # The sample left over
267
+ random.shuffle(buf)
268
+ for x in buf:
269
+ yield x
270
+
271
+
272
+ def sort(data, sort_size=500, mode='train'):
273
+ """ Sort the data by feature length.
274
+ Sort is used after shuffle and before batch, so we can group
275
+ utts with similar lengths into a batch, and `sort_size` should
276
+ be less than `shuffle_size`
277
+
278
+ Args:
279
+ data: Iterable[{key, feat, label}]
280
+ sort_size: buffer size for sort
281
+
282
+ Returns:
283
+ Iterable[{key, feat, label}]
284
+ """
285
+
286
+ buf = []
287
+ for sample in data:
288
+ buf.append(sample)
289
+ if len(buf) >= sort_size:
290
+ buf.sort(key=lambda x: x['speech_feat'].size(0))
291
+ for x in buf:
292
+ yield x
293
+ buf = []
294
+ # The sample left over
295
+ buf.sort(key=lambda x: x['speech_feat'].size(0))
296
+ for x in buf:
297
+ yield x
298
+
299
+
300
+ def static_batch(data, batch_size=16):
301
+ """ Static batch the data by `batch_size`
302
+
303
+ Args:
304
+ data: Iterable[{key, feat, label}]
305
+ batch_size: batch size
306
+
307
+ Returns:
308
+ Iterable[List[{key, feat, label}]]
309
+ """
310
+ buf = []
311
+ for sample in data:
312
+ buf.append(sample)
313
+ if len(buf) >= batch_size:
314
+ yield buf
315
+ buf = []
316
+ if len(buf) > 0:
317
+ yield buf
318
+
319
+
320
+ def dynamic_batch(data, max_frames_in_batch=12000, mode='train'):
321
+ """ Dynamic batch the data until the total frames in batch
322
+ reach `max_frames_in_batch`
323
+
324
+ Args:
325
+ data: Iterable[{key, feat, label}]
326
+ max_frames_in_batch: max_frames in one batch
327
+
328
+ Returns:
329
+ Iterable[List[{key, feat, label}]]
330
+ """
331
+ buf = []
332
+ longest_frames = 0
333
+ for sample in data:
334
+ assert 'speech_feat' in sample
335
+ assert isinstance(sample['speech_feat'], torch.Tensor)
336
+ new_sample_frames = sample['speech_feat'].size(0)
337
+ longest_frames = max(longest_frames, new_sample_frames)
338
+ frames_after_padding = longest_frames * (len(buf) + 1)
339
+ if frames_after_padding > max_frames_in_batch:
340
+ yield buf
341
+ buf = [sample]
342
+ longest_frames = new_sample_frames
343
+ else:
344
+ buf.append(sample)
345
+ if len(buf) > 0:
346
+ yield buf
347
+
348
+
349
+ def batch(data, batch_type='static', batch_size=16, max_frames_in_batch=12000, mode='train'):
350
+ """ Wrapper for static/dynamic batch
351
+ """
352
+ if batch_type == 'static':
353
+ return static_batch(data, batch_size)
354
+ elif batch_type == 'dynamic':
355
+ return dynamic_batch(data, max_frames_in_batch)
356
+ else:
357
+ logging.fatal('Unsupported batch type {}'.format(batch_type))
358
+
359
+
360
+ def padding(data, use_spk_embedding, mode='train', gan=False, dpo=False):
361
+ """ Padding the data into training data
362
+
363
+ Args:
364
+ data: Iterable[List[{key, feat, label}]]
365
+
366
+ Returns:
367
+ Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)]
368
+ """
369
+ for sample in data:
370
+ assert isinstance(sample, list)
371
+ speech_feat_len = torch.tensor([x['speech_feat'].size(1) for x in sample],
372
+ dtype=torch.int32)
373
+ order = torch.argsort(speech_feat_len, descending=True)
374
+
375
+ utts = [sample[i]['utt'] for i in order]
376
+ speech = [sample[i]['speech'].squeeze(dim=0) for i in order]
377
+ speech_len = torch.tensor([i.size(0) for i in speech], dtype=torch.int32)
378
+ speech = pad_sequence(speech, batch_first=True, padding_value=0)
379
+ speech_token = [torch.tensor(sample[i]['speech_token']) for i in order]
380
+ speech_token_len = torch.tensor([i.size(0) for i in speech_token], dtype=torch.int32)
381
+ speech_token = pad_sequence(speech_token,
382
+ batch_first=True,
383
+ padding_value=0)
384
+ speech_feat = [sample[i]['speech_feat'] for i in order]
385
+ speech_feat_len = torch.tensor([i.size(0) for i in speech_feat], dtype=torch.int32)
386
+ speech_feat = pad_sequence(speech_feat,
387
+ batch_first=True,
388
+ padding_value=0)
389
+ text = [sample[i]['text'] for i in order]
390
+ text_token = [torch.tensor(sample[i]['text_token']) for i in order]
391
+ text_token_len = torch.tensor([i.size(0) for i in text_token], dtype=torch.int32)
392
+ text_token = pad_sequence(text_token, batch_first=True, padding_value=0)
393
+ utt_embedding = torch.stack([sample[i]['utt_embedding'] for i in order], dim=0)
394
+ spk_embedding = torch.stack([sample[i]['spk_embedding'] for i in order], dim=0)
395
+ batch = {
396
+ "utts": utts,
397
+ "speech": speech,
398
+ "speech_len": speech_len,
399
+ "speech_token": speech_token,
400
+ "speech_token_len": speech_token_len,
401
+ "speech_feat": speech_feat,
402
+ "speech_feat_len": speech_feat_len,
403
+ "text": text,
404
+ "text_token": text_token,
405
+ "text_token_len": text_token_len,
406
+ "utt_embedding": utt_embedding,
407
+ "spk_embedding": spk_embedding,
408
+ }
409
+ if gan is True:
410
+ # in gan train, we need pitch_feat
411
+ pitch_feat = [sample[i]['pitch_feat'] for i in order]
412
+ pitch_feat_len = torch.tensor([i.size(0) for i in pitch_feat], dtype=torch.int32)
413
+ pitch_feat = pad_sequence(pitch_feat,
414
+ batch_first=True,
415
+ padding_value=0)
416
+ batch["pitch_feat"] = pitch_feat
417
+ batch["pitch_feat_len"] = pitch_feat_len
418
+ else:
419
+ # only gan train needs speech, delete it to save memory
420
+ del batch["speech"]
421
+ del batch["speech_len"]
422
+ if dpo is True:
423
+ reject_speech_token = [torch.tensor(sample[i]['reject_speech_token']) for i in order]
424
+ reject_speech_token_len = torch.tensor([i.size(0) for i in reject_speech_token], dtype=torch.int32)
425
+ reject_speech_token = pad_sequence(reject_speech_token,
426
+ batch_first=True,
427
+ padding_value=0)
428
+ batch['reject_speech_token'] = reject_speech_token
429
+ batch['reject_speech_token_len'] = reject_speech_token_len
430
+ if use_spk_embedding is True:
431
+ batch["embedding"] = batch["spk_embedding"]
432
+ else:
433
+ batch["embedding"] = batch["utt_embedding"]
434
+ yield batch
speech/cosyvoice/flow/decoder.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Tuple
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ from einops import pack, rearrange, repeat
19
+ from cosyvoice.utils.common import mask_to_bias
20
+ from cosyvoice.utils.mask import add_optional_chunk_mask
21
+ from matcha.models.components.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, TimestepEmbedding, Upsample1D
22
+ from matcha.models.components.transformer import BasicTransformerBlock
23
+
24
+
25
+ class Transpose(torch.nn.Module):
26
+ def __init__(self, dim0: int, dim1: int):
27
+ super().__init__()
28
+ self.dim0 = dim0
29
+ self.dim1 = dim1
30
+
31
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
32
+ x = torch.transpose(x, self.dim0, self.dim1)
33
+ return x
34
+
35
+
36
+ class CausalConv1d(torch.nn.Conv1d):
37
+ def __init__(
38
+ self,
39
+ in_channels: int,
40
+ out_channels: int,
41
+ kernel_size: int,
42
+ stride: int = 1,
43
+ dilation: int = 1,
44
+ groups: int = 1,
45
+ bias: bool = True,
46
+ padding_mode: str = 'zeros',
47
+ device=None,
48
+ dtype=None
49
+ ) -> None:
50
+ super(CausalConv1d, self).__init__(in_channels, out_channels,
51
+ kernel_size, stride,
52
+ padding=0, dilation=dilation,
53
+ groups=groups, bias=bias,
54
+ padding_mode=padding_mode,
55
+ device=device, dtype=dtype)
56
+ assert stride == 1
57
+ self.causal_padding = kernel_size - 1
58
+
59
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
60
+ x = F.pad(x, (self.causal_padding, 0), value=0.0)
61
+ x = super(CausalConv1d, self).forward(x)
62
+ return x
63
+
64
+
65
+ class CausalBlock1D(Block1D):
66
+ def __init__(self, dim: int, dim_out: int):
67
+ super(CausalBlock1D, self).__init__(dim, dim_out)
68
+ self.block = torch.nn.Sequential(
69
+ CausalConv1d(dim, dim_out, 3),
70
+ Transpose(1, 2),
71
+ nn.LayerNorm(dim_out),
72
+ Transpose(1, 2),
73
+ nn.Mish(),
74
+ )
75
+
76
+ def forward(self, x: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
77
+ output = self.block(x * mask)
78
+ return output * mask
79
+
80
+
81
+ class CausalResnetBlock1D(ResnetBlock1D):
82
+ def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8):
83
+ super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups)
84
+ self.block1 = CausalBlock1D(dim, dim_out)
85
+ self.block2 = CausalBlock1D(dim_out, dim_out)
86
+
87
+
88
+ class ConditionalDecoder(nn.Module):
89
+ def __init__(
90
+ self,
91
+ in_channels,
92
+ out_channels,
93
+ channels=(256, 256),
94
+ dropout=0.05,
95
+ attention_head_dim=64,
96
+ n_blocks=1,
97
+ num_mid_blocks=2,
98
+ num_heads=4,
99
+ act_fn="snake",
100
+ ):
101
+ """
102
+ This decoder requires an input with the same shape of the target. So, if your text content
103
+ is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
104
+ """
105
+ super().__init__()
106
+ channels = tuple(channels)
107
+ self.in_channels = in_channels
108
+ self.out_channels = out_channels
109
+
110
+ self.time_embeddings = SinusoidalPosEmb(in_channels)
111
+ time_embed_dim = channels[0] * 4
112
+ self.time_mlp = TimestepEmbedding(
113
+ in_channels=in_channels,
114
+ time_embed_dim=time_embed_dim,
115
+ act_fn="silu",
116
+ )
117
+ self.down_blocks = nn.ModuleList([])
118
+ self.mid_blocks = nn.ModuleList([])
119
+ self.up_blocks = nn.ModuleList([])
120
+
121
+ output_channel = in_channels
122
+ for i in range(len(channels)): # pylint: disable=consider-using-enumerate
123
+ input_channel = output_channel
124
+ output_channel = channels[i]
125
+ is_last = i == len(channels) - 1
126
+ resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
127
+ transformer_blocks = nn.ModuleList(
128
+ [
129
+ BasicTransformerBlock(
130
+ dim=output_channel,
131
+ num_attention_heads=num_heads,
132
+ attention_head_dim=attention_head_dim,
133
+ dropout=dropout,
134
+ activation_fn=act_fn,
135
+ )
136
+ for _ in range(n_blocks)
137
+ ]
138
+ )
139
+ downsample = (
140
+ Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1)
141
+ )
142
+ self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
143
+
144
+ for _ in range(num_mid_blocks):
145
+ input_channel = channels[-1]
146
+ out_channels = channels[-1]
147
+ resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
148
+
149
+ transformer_blocks = nn.ModuleList(
150
+ [
151
+ BasicTransformerBlock(
152
+ dim=output_channel,
153
+ num_attention_heads=num_heads,
154
+ attention_head_dim=attention_head_dim,
155
+ dropout=dropout,
156
+ activation_fn=act_fn,
157
+ )
158
+ for _ in range(n_blocks)
159
+ ]
160
+ )
161
+
162
+ self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
163
+
164
+ channels = channels[::-1] + (channels[0],)
165
+ for i in range(len(channels) - 1):
166
+ input_channel = channels[i] * 2
167
+ output_channel = channels[i + 1]
168
+ is_last = i == len(channels) - 2
169
+ resnet = ResnetBlock1D(
170
+ dim=input_channel,
171
+ dim_out=output_channel,
172
+ time_emb_dim=time_embed_dim,
173
+ )
174
+ transformer_blocks = nn.ModuleList(
175
+ [
176
+ BasicTransformerBlock(
177
+ dim=output_channel,
178
+ num_attention_heads=num_heads,
179
+ attention_head_dim=attention_head_dim,
180
+ dropout=dropout,
181
+ activation_fn=act_fn,
182
+ )
183
+ for _ in range(n_blocks)
184
+ ]
185
+ )
186
+ upsample = (
187
+ Upsample1D(output_channel, use_conv_transpose=True)
188
+ if not is_last
189
+ else nn.Conv1d(output_channel, output_channel, 3, padding=1)
190
+ )
191
+ self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
192
+ self.final_block = Block1D(channels[-1], channels[-1])
193
+ self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
194
+ self.initialize_weights()
195
+
196
+ def initialize_weights(self):
197
+ for m in self.modules():
198
+ if isinstance(m, nn.Conv1d):
199
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
200
+ if m.bias is not None:
201
+ nn.init.constant_(m.bias, 0)
202
+ elif isinstance(m, nn.GroupNorm):
203
+ nn.init.constant_(m.weight, 1)
204
+ nn.init.constant_(m.bias, 0)
205
+ elif isinstance(m, nn.Linear):
206
+ nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
207
+ if m.bias is not None:
208
+ nn.init.constant_(m.bias, 0)
209
+
210
+ def forward(self, x, mask, mu, t, spks=None, cond=None, streaming=False):
211
+ """Forward pass of the UNet1DConditional model.
212
+
213
+ Args:
214
+ x (torch.Tensor): shape (batch_size, in_channels, time)
215
+ mask (_type_): shape (batch_size, 1, time)
216
+ t (_type_): shape (batch_size)
217
+ spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
218
+ cond (_type_, optional): placeholder for future use. Defaults to None.
219
+
220
+ Raises:
221
+ ValueError: _description_
222
+ ValueError: _description_
223
+
224
+ Returns:
225
+ _type_: _description_
226
+ """
227
+
228
+ t = self.time_embeddings(t).to(t.dtype)
229
+ t = self.time_mlp(t)
230
+
231
+ x = pack([x, mu], "b * t")[0]
232
+
233
+ if spks is not None:
234
+ spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
235
+ x = pack([x, spks], "b * t")[0]
236
+ if cond is not None:
237
+ x = pack([x, cond], "b * t")[0]
238
+
239
+ hiddens = []
240
+ masks = [mask]
241
+ for resnet, transformer_blocks, downsample in self.down_blocks:
242
+ mask_down = masks[-1]
243
+ x = resnet(x, mask_down, t)
244
+ x = rearrange(x, "b c t -> b t c").contiguous()
245
+ attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
246
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
247
+ for transformer_block in transformer_blocks:
248
+ x = transformer_block(
249
+ hidden_states=x,
250
+ attention_mask=attn_mask,
251
+ timestep=t,
252
+ )
253
+ x = rearrange(x, "b t c -> b c t").contiguous()
254
+ hiddens.append(x) # Save hidden states for skip connections
255
+ x = downsample(x * mask_down)
256
+ masks.append(mask_down[:, :, ::2])
257
+ masks = masks[:-1]
258
+ mask_mid = masks[-1]
259
+
260
+ for resnet, transformer_blocks in self.mid_blocks:
261
+ x = resnet(x, mask_mid, t)
262
+ x = rearrange(x, "b c t -> b t c").contiguous()
263
+ attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
264
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
265
+ for transformer_block in transformer_blocks:
266
+ x = transformer_block(
267
+ hidden_states=x,
268
+ attention_mask=attn_mask,
269
+ timestep=t,
270
+ )
271
+ x = rearrange(x, "b t c -> b c t").contiguous()
272
+
273
+ for resnet, transformer_blocks, upsample in self.up_blocks:
274
+ mask_up = masks.pop()
275
+ skip = hiddens.pop()
276
+ x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
277
+ x = resnet(x, mask_up, t)
278
+ x = rearrange(x, "b c t -> b t c").contiguous()
279
+ attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
280
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
281
+ for transformer_block in transformer_blocks:
282
+ x = transformer_block(
283
+ hidden_states=x,
284
+ attention_mask=attn_mask,
285
+ timestep=t,
286
+ )
287
+ x = rearrange(x, "b t c -> b c t").contiguous()
288
+ x = upsample(x * mask_up)
289
+ x = self.final_block(x, mask_up)
290
+ output = self.final_proj(x * mask_up)
291
+ return output * mask
292
+
293
+
294
+ class CausalConditionalDecoder(ConditionalDecoder):
295
+ def __init__(
296
+ self,
297
+ in_channels,
298
+ out_channels,
299
+ channels=(256, 256),
300
+ dropout=0.05,
301
+ attention_head_dim=64,
302
+ n_blocks=1,
303
+ num_mid_blocks=2,
304
+ num_heads=4,
305
+ act_fn="snake",
306
+ static_chunk_size=50,
307
+ num_decoding_left_chunks=2,
308
+ ):
309
+ """
310
+ This decoder requires an input with the same shape of the target. So, if your text content
311
+ is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
312
+ """
313
+ torch.nn.Module.__init__(self)
314
+ channels = tuple(channels)
315
+ self.in_channels = in_channels
316
+ self.out_channels = out_channels
317
+ self.time_embeddings = SinusoidalPosEmb(in_channels)
318
+ time_embed_dim = channels[0] * 4
319
+ self.time_mlp = TimestepEmbedding(
320
+ in_channels=in_channels,
321
+ time_embed_dim=time_embed_dim,
322
+ act_fn="silu",
323
+ )
324
+ self.static_chunk_size = static_chunk_size
325
+ self.num_decoding_left_chunks = num_decoding_left_chunks
326
+ self.down_blocks = nn.ModuleList([])
327
+ self.mid_blocks = nn.ModuleList([])
328
+ self.up_blocks = nn.ModuleList([])
329
+
330
+ output_channel = in_channels
331
+ for i in range(len(channels)): # pylint: disable=consider-using-enumerate
332
+ input_channel = output_channel
333
+ output_channel = channels[i]
334
+ is_last = i == len(channels) - 1
335
+ resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
336
+ transformer_blocks = nn.ModuleList(
337
+ [
338
+ BasicTransformerBlock(
339
+ dim=output_channel,
340
+ num_attention_heads=num_heads,
341
+ attention_head_dim=attention_head_dim,
342
+ dropout=dropout,
343
+ activation_fn=act_fn,
344
+ )
345
+ for _ in range(n_blocks)
346
+ ]
347
+ )
348
+ downsample = (
349
+ Downsample1D(output_channel) if not is_last else CausalConv1d(output_channel, output_channel, 3)
350
+ )
351
+ self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
352
+
353
+ for _ in range(num_mid_blocks):
354
+ input_channel = channels[-1]
355
+ out_channels = channels[-1]
356
+ resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
357
+
358
+ transformer_blocks = nn.ModuleList(
359
+ [
360
+ BasicTransformerBlock(
361
+ dim=output_channel,
362
+ num_attention_heads=num_heads,
363
+ attention_head_dim=attention_head_dim,
364
+ dropout=dropout,
365
+ activation_fn=act_fn,
366
+ )
367
+ for _ in range(n_blocks)
368
+ ]
369
+ )
370
+
371
+ self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
372
+
373
+ channels = channels[::-1] + (channels[0],)
374
+ for i in range(len(channels) - 1):
375
+ input_channel = channels[i] * 2
376
+ output_channel = channels[i + 1]
377
+ is_last = i == len(channels) - 2
378
+ resnet = CausalResnetBlock1D(
379
+ dim=input_channel,
380
+ dim_out=output_channel,
381
+ time_emb_dim=time_embed_dim,
382
+ )
383
+ transformer_blocks = nn.ModuleList(
384
+ [
385
+ BasicTransformerBlock(
386
+ dim=output_channel,
387
+ num_attention_heads=num_heads,
388
+ attention_head_dim=attention_head_dim,
389
+ dropout=dropout,
390
+ activation_fn=act_fn,
391
+ )
392
+ for _ in range(n_blocks)
393
+ ]
394
+ )
395
+ upsample = (
396
+ Upsample1D(output_channel, use_conv_transpose=True)
397
+ if not is_last
398
+ else CausalConv1d(output_channel, output_channel, 3)
399
+ )
400
+ self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
401
+ self.final_block = CausalBlock1D(channels[-1], channels[-1])
402
+ self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
403
+ self.initialize_weights()
404
+
405
+ def forward(self, x, mask, mu, t, spks=None, cond=None, streaming=False):
406
+ """Forward pass of the UNet1DConditional model.
407
+
408
+ Args:
409
+ x (torch.Tensor): shape (batch_size, in_channels, time)
410
+ mask (_type_): shape (batch_size, 1, time)
411
+ t (_type_): shape (batch_size)
412
+ spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
413
+ cond (_type_, optional): placeholder for future use. Defaults to None.
414
+
415
+ Raises:
416
+ ValueError: _description_
417
+ ValueError: _description_
418
+
419
+ Returns:
420
+ _type_: _description_
421
+ """
422
+ t = self.time_embeddings(t).to(t.dtype)
423
+ t = self.time_mlp(t)
424
+
425
+ x = pack([x, mu], "b * t")[0]
426
+
427
+ if spks is not None:
428
+ spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
429
+ x = pack([x, spks], "b * t")[0]
430
+ if cond is not None:
431
+ x = pack([x, cond], "b * t")[0]
432
+
433
+ hiddens = []
434
+ masks = [mask]
435
+ for resnet, transformer_blocks, downsample in self.down_blocks:
436
+ mask_down = masks[-1]
437
+ x = resnet(x, mask_down, t)
438
+ x = rearrange(x, "b c t -> b t c").contiguous()
439
+ if streaming is True:
440
+ attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1)
441
+ else:
442
+ attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
443
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
444
+ for transformer_block in transformer_blocks:
445
+ x = transformer_block(
446
+ hidden_states=x,
447
+ attention_mask=attn_mask,
448
+ timestep=t,
449
+ )
450
+ x = rearrange(x, "b t c -> b c t").contiguous()
451
+ hiddens.append(x) # Save hidden states for skip connections
452
+ x = downsample(x * mask_down)
453
+ masks.append(mask_down[:, :, ::2])
454
+ masks = masks[:-1]
455
+ mask_mid = masks[-1]
456
+
457
+ for resnet, transformer_blocks in self.mid_blocks:
458
+ x = resnet(x, mask_mid, t)
459
+ x = rearrange(x, "b c t -> b t c").contiguous()
460
+ if streaming is True:
461
+ attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1)
462
+ else:
463
+ attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
464
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
465
+ for transformer_block in transformer_blocks:
466
+ x = transformer_block(
467
+ hidden_states=x,
468
+ attention_mask=attn_mask,
469
+ timestep=t,
470
+ )
471
+ x = rearrange(x, "b t c -> b c t").contiguous()
472
+
473
+ for resnet, transformer_blocks, upsample in self.up_blocks:
474
+ mask_up = masks.pop()
475
+ skip = hiddens.pop()
476
+ x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
477
+ x = resnet(x, mask_up, t)
478
+ x = rearrange(x, "b c t -> b t c").contiguous()
479
+ if streaming is True:
480
+ attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1)
481
+ else:
482
+ attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1)
483
+ attn_mask = mask_to_bias(attn_mask, x.dtype)
484
+ for transformer_block in transformer_blocks:
485
+ x = transformer_block(
486
+ hidden_states=x,
487
+ attention_mask=attn_mask,
488
+ timestep=t,
489
+ )
490
+ x = rearrange(x, "b t c -> b c t").contiguous()
491
+ x = upsample(x * mask_up)
492
+ x = self.final_block(x, mask_up)
493
+ output = self.final_proj(x * mask_up)
494
+ return output * mask
speech/cosyvoice/flow/flow.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ import random
16
+ from typing import Dict, Optional
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.nn import functional as F
20
+ from omegaconf import DictConfig
21
+ from cosyvoice.utils.mask import make_pad_mask
22
+
23
+
24
+ class MaskedDiffWithXvec(torch.nn.Module):
25
+ def __init__(self,
26
+ input_size: int = 512,
27
+ output_size: int = 80,
28
+ spk_embed_dim: int = 192,
29
+ output_type: str = "mel",
30
+ vocab_size: int = 4096,
31
+ input_frame_rate: int = 50,
32
+ only_mask_loss: bool = True,
33
+ encoder: torch.nn.Module = None,
34
+ length_regulator: torch.nn.Module = None,
35
+ decoder: torch.nn.Module = None,
36
+ decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
37
+ 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
38
+ 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
39
+ 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
40
+ 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
41
+ mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
42
+ 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
43
+ super().__init__()
44
+ self.input_size = input_size
45
+ self.output_size = output_size
46
+ self.decoder_conf = decoder_conf
47
+ self.mel_feat_conf = mel_feat_conf
48
+ self.vocab_size = vocab_size
49
+ self.output_type = output_type
50
+ self.input_frame_rate = input_frame_rate
51
+ logging.info(f"input frame rate={self.input_frame_rate}")
52
+ self.input_embedding = nn.Embedding(vocab_size, input_size)
53
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
54
+ self.encoder = encoder
55
+ self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
56
+ self.decoder = decoder
57
+ self.length_regulator = length_regulator
58
+ self.only_mask_loss = only_mask_loss
59
+
60
+ def forward(
61
+ self,
62
+ batch: dict,
63
+ device: torch.device,
64
+ ) -> Dict[str, Optional[torch.Tensor]]:
65
+ token = batch['speech_token'].to(device)
66
+ token_len = batch['speech_token_len'].to(device)
67
+ feat = batch['speech_feat'].to(device)
68
+ feat_len = batch['speech_feat_len'].to(device)
69
+ embedding = batch['embedding'].to(device)
70
+
71
+ # xvec projection
72
+ embedding = F.normalize(embedding, dim=1)
73
+ embedding = self.spk_embed_affine_layer(embedding)
74
+
75
+ # concat text and prompt_text
76
+ mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
77
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
78
+
79
+ # text encode
80
+ h, h_lengths = self.encoder(token, token_len)
81
+ h = self.encoder_proj(h)
82
+ h, h_lengths = self.length_regulator(h, feat_len)
83
+
84
+ # get conditions
85
+ conds = torch.zeros(feat.shape, device=token.device)
86
+ for i, j in enumerate(feat_len):
87
+ if random.random() < 0.5:
88
+ continue
89
+ index = random.randint(0, int(0.3 * j))
90
+ conds[i, :index] = feat[i, :index]
91
+ conds = conds.transpose(1, 2)
92
+
93
+ mask = (~make_pad_mask(feat_len)).to(h)
94
+ # NOTE this is unnecessary, feat/h already same shape
95
+ loss, _ = self.decoder.compute_loss(
96
+ feat.transpose(1, 2).contiguous(),
97
+ mask.unsqueeze(1),
98
+ h.transpose(1, 2).contiguous(),
99
+ embedding,
100
+ cond=conds
101
+ )
102
+ return {'loss': loss}
103
+
104
+ @torch.inference_mode()
105
+ def inference(self,
106
+ token,
107
+ token_len,
108
+ prompt_token,
109
+ prompt_token_len,
110
+ prompt_feat,
111
+ prompt_feat_len,
112
+ embedding,
113
+ flow_cache):
114
+ assert token.shape[0] == 1
115
+ # xvec projection
116
+ embedding = F.normalize(embedding, dim=1)
117
+ embedding = self.spk_embed_affine_layer(embedding)
118
+
119
+ # concat speech token and prompt speech token
120
+ token_len1, token_len2 = prompt_token.shape[1], token.shape[1]
121
+ token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
122
+ mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
123
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
124
+
125
+ # text encode
126
+ h, h_lengths = self.encoder(token, token_len)
127
+ h = self.encoder_proj(h)
128
+ mel_len1, mel_len2 = prompt_feat.shape[1], int(token_len2 / self.input_frame_rate * 22050 / 256)
129
+ h, h_lengths = self.length_regulator.inference(h[:, :token_len1], h[:, token_len1:], mel_len1, mel_len2, self.input_frame_rate)
130
+
131
+ # get conditions
132
+ conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
133
+ conds[:, :mel_len1] = prompt_feat
134
+ conds = conds.transpose(1, 2)
135
+
136
+ mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
137
+ feat, flow_cache = self.decoder(
138
+ mu=h.transpose(1, 2).contiguous(),
139
+ mask=mask.unsqueeze(1),
140
+ spks=embedding,
141
+ cond=conds,
142
+ n_timesteps=10,
143
+ prompt_len=mel_len1,
144
+ cache=flow_cache
145
+ )
146
+ feat = feat[:, :, mel_len1:]
147
+ assert feat.shape[2] == mel_len2
148
+ return feat.float(), flow_cache
149
+
150
+
151
+ class CausalMaskedDiffWithXvec(torch.nn.Module):
152
+ def __init__(self,
153
+ input_size: int = 512,
154
+ output_size: int = 80,
155
+ spk_embed_dim: int = 192,
156
+ output_type: str = "mel",
157
+ vocab_size: int = 4096,
158
+ input_frame_rate: int = 50,
159
+ only_mask_loss: bool = True,
160
+ token_mel_ratio: int = 2,
161
+ pre_lookahead_len: int = 3,
162
+ encoder: torch.nn.Module = None,
163
+ decoder: torch.nn.Module = None,
164
+ decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
165
+ 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
166
+ 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}),
167
+ 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64,
168
+ 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
169
+ mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
170
+ 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
171
+ super().__init__()
172
+ self.input_size = input_size
173
+ self.output_size = output_size
174
+ self.decoder_conf = decoder_conf
175
+ self.mel_feat_conf = mel_feat_conf
176
+ self.vocab_size = vocab_size
177
+ self.output_type = output_type
178
+ self.input_frame_rate = input_frame_rate
179
+ logging.info(f"input frame rate={self.input_frame_rate}")
180
+ self.input_embedding = nn.Embedding(vocab_size, input_size)
181
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
182
+ self.encoder = encoder
183
+ self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
184
+ self.decoder = decoder
185
+ self.only_mask_loss = only_mask_loss
186
+ self.token_mel_ratio = token_mel_ratio
187
+ self.pre_lookahead_len = pre_lookahead_len
188
+
189
+ def forward(
190
+ self,
191
+ batch: dict,
192
+ device: torch.device,
193
+ ) -> Dict[str, Optional[torch.Tensor]]:
194
+ token = batch['speech_token'].to(device)
195
+ token_len = batch['speech_token_len'].to(device)
196
+ feat = batch['speech_feat'].to(device)
197
+ feat_len = batch['speech_feat_len'].to(device)
198
+ embedding = batch['embedding'].to(device)
199
+
200
+ # NOTE unified training, static_chunk_size > 0 or = 0
201
+ streaming = True if random.random() < 0.5 else False
202
+
203
+ # xvec projection
204
+ embedding = F.normalize(embedding, dim=1)
205
+ embedding = self.spk_embed_affine_layer(embedding)
206
+
207
+ # concat text and prompt_text
208
+ mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
209
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
210
+
211
+ # text encode
212
+ h, h_lengths = self.encoder(token, token_len, streaming=streaming)
213
+ h = self.encoder_proj(h)
214
+
215
+ # get conditions
216
+ conds = torch.zeros(feat.shape, device=token.device)
217
+ for i, j in enumerate(feat_len):
218
+ if random.random() < 0.5:
219
+ continue
220
+ index = random.randint(0, int(0.3 * j))
221
+ conds[i, :index] = feat[i, :index]
222
+ conds = conds.transpose(1, 2)
223
+
224
+ mask = (~make_pad_mask(h_lengths.sum(dim=-1).squeeze(dim=1))).to(h)
225
+ loss, _ = self.decoder.compute_loss(
226
+ feat.transpose(1, 2).contiguous(),
227
+ mask.unsqueeze(1),
228
+ h.transpose(1, 2).contiguous(),
229
+ embedding,
230
+ cond=conds,
231
+ streaming=streaming,
232
+ )
233
+ return {'loss': loss}
234
+
235
+ @torch.inference_mode()
236
+ def inference(self,
237
+ token,
238
+ token_len,
239
+ prompt_token,
240
+ prompt_token_len,
241
+ prompt_feat,
242
+ prompt_feat_len,
243
+ embedding,
244
+ streaming,
245
+ finalize):
246
+ assert token.shape[0] == 1
247
+ # xvec projection
248
+ embedding = F.normalize(embedding, dim=1)
249
+ embedding = self.spk_embed_affine_layer(embedding)
250
+
251
+ # concat text and prompt_text
252
+ token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
253
+ mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
254
+ token = self.input_embedding(torch.clamp(token, min=0)) * mask
255
+
256
+ # text encode
257
+ if finalize is True:
258
+ h, h_lengths = self.encoder(token, token_len, streaming=streaming)
259
+ else:
260
+ token, context = token[:, :-self.pre_lookahead_len], token[:, -self.pre_lookahead_len:]
261
+ h, h_lengths = self.encoder(token, token_len, context=context, streaming=streaming)
262
+ mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1]
263
+ h = self.encoder_proj(h)
264
+
265
+ # get conditions
266
+ conds = torch.zeros([1, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
267
+ conds[:, :mel_len1] = prompt_feat
268
+ conds = conds.transpose(1, 2)
269
+
270
+ mask = (~make_pad_mask(torch.tensor([mel_len1 + mel_len2]))).to(h)
271
+ feat, _ = self.decoder(
272
+ mu=h.transpose(1, 2).contiguous(),
273
+ mask=mask.unsqueeze(1),
274
+ spks=embedding,
275
+ cond=conds,
276
+ n_timesteps=10,
277
+ streaming=streaming
278
+ )
279
+ feat = feat[:, :, mel_len1:]
280
+ assert feat.shape[2] == mel_len2
281
+ return feat.float(), None
speech/cosyvoice/flow/flow_matching.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ # 2025 Alibaba Inc (authors: Xiang Lyu, Bofan Zhou)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import torch
16
+ import torch.nn.functional as F
17
+ from matcha.models.components.flow_matching import BASECFM
18
+ from cosyvoice.utils.common import set_all_random_seed
19
+
20
+
21
+ class ConditionalCFM(BASECFM):
22
+ def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
23
+ super().__init__(
24
+ n_feats=in_channels,
25
+ cfm_params=cfm_params,
26
+ n_spks=n_spks,
27
+ spk_emb_dim=spk_emb_dim,
28
+ )
29
+ self.t_scheduler = cfm_params.t_scheduler
30
+ self.training_cfg_rate = cfm_params.training_cfg_rate
31
+ self.inference_cfg_rate = cfm_params.inference_cfg_rate
32
+ in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
33
+ # Just change the architecture of the estimator here
34
+ self.estimator = estimator
35
+
36
+ @torch.inference_mode()
37
+ def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, cache=torch.zeros(1, 80, 0, 2)):
38
+ """Forward diffusion
39
+
40
+ Args:
41
+ mu (torch.Tensor): output of encoder
42
+ shape: (batch_size, n_feats, mel_timesteps)
43
+ mask (torch.Tensor): output_mask
44
+ shape: (batch_size, 1, mel_timesteps)
45
+ n_timesteps (int): number of diffusion steps
46
+ temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
47
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
48
+ shape: (batch_size, spk_emb_dim)
49
+ cond: Not used but kept for future purposes
50
+
51
+ Returns:
52
+ sample: generated mel-spectrogram
53
+ shape: (batch_size, n_feats, mel_timesteps)
54
+ """
55
+
56
+ z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature
57
+ cache_size = cache.shape[2]
58
+ # fix prompt and overlap part mu and z
59
+ if cache_size != 0:
60
+ z[:, :, :cache_size] = cache[:, :, :, 0]
61
+ mu[:, :, :cache_size] = cache[:, :, :, 1]
62
+ z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2)
63
+ mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2)
64
+ cache = torch.stack([z_cache, mu_cache], dim=-1)
65
+
66
+ t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
67
+ if self.t_scheduler == 'cosine':
68
+ t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
69
+ return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), cache
70
+
71
+ def solve_euler(self, x, t_span, mu, mask, spks, cond, streaming=False):
72
+ """
73
+ Fixed euler solver for ODEs.
74
+ Args:
75
+ x (torch.Tensor): random noise
76
+ t_span (torch.Tensor): n_timesteps interpolated
77
+ shape: (n_timesteps + 1,)
78
+ mu (torch.Tensor): output of encoder
79
+ shape: (batch_size, n_feats, mel_timesteps)
80
+ mask (torch.Tensor): output_mask
81
+ shape: (batch_size, 1, mel_timesteps)
82
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
83
+ shape: (batch_size, spk_emb_dim)
84
+ cond: Not used but kept for future purposes
85
+ """
86
+ t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
87
+ t = t.unsqueeze(dim=0)
88
+
89
+ # I am storing this because I can later plot it by putting a debugger here and saving it to a file
90
+ # Or in future might add like a return_all_steps flag
91
+ sol = []
92
+
93
+ # Do not use concat, it may cause memory format changed and trt infer with wrong results!
94
+ x_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
95
+ mask_in = torch.zeros([2, 1, x.size(2)], device=x.device, dtype=x.dtype)
96
+ mu_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
97
+ t_in = torch.zeros([2], device=x.device, dtype=x.dtype)
98
+ spks_in = torch.zeros([2, 80], device=x.device, dtype=x.dtype)
99
+ cond_in = torch.zeros([2, 80, x.size(2)], device=x.device, dtype=x.dtype)
100
+ for step in range(1, len(t_span)):
101
+ # Classifier-Free Guidance inference introduced in VoiceBox
102
+ x_in[:] = x
103
+ mask_in[:] = mask
104
+ mu_in[0] = mu
105
+ t_in[:] = t.unsqueeze(0)
106
+ spks_in[0] = spks
107
+ cond_in[0] = cond
108
+ dphi_dt = self.forward_estimator(
109
+ x_in, mask_in,
110
+ mu_in, t_in,
111
+ spks_in,
112
+ cond_in,
113
+ streaming
114
+ )
115
+ dphi_dt, cfg_dphi_dt = torch.split(dphi_dt, [x.size(0), x.size(0)], dim=0)
116
+ dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - self.inference_cfg_rate * cfg_dphi_dt)
117
+ x = x + dt * dphi_dt
118
+ t = t + dt
119
+ sol.append(x)
120
+ if step < len(t_span) - 1:
121
+ dt = t_span[step + 1] - t
122
+
123
+ return sol[-1].float()
124
+
125
+ def forward_estimator(self, x, mask, mu, t, spks, cond, streaming=False):
126
+ if isinstance(self.estimator, torch.nn.Module):
127
+ return self.estimator(x, mask, mu, t, spks, cond, streaming=streaming)
128
+ else:
129
+ [estimator, stream], trt_engine = self.estimator.acquire_estimator()
130
+ # NOTE need to synchronize when switching stream
131
+ torch.cuda.current_stream().synchronize()
132
+ with stream:
133
+ estimator.set_input_shape('x', (2, 80, x.size(2)))
134
+ estimator.set_input_shape('mask', (2, 1, x.size(2)))
135
+ estimator.set_input_shape('mu', (2, 80, x.size(2)))
136
+ estimator.set_input_shape('t', (2,))
137
+ estimator.set_input_shape('spks', (2, 80))
138
+ estimator.set_input_shape('cond', (2, 80, x.size(2)))
139
+ data_ptrs = [x.contiguous().data_ptr(),
140
+ mask.contiguous().data_ptr(),
141
+ mu.contiguous().data_ptr(),
142
+ t.contiguous().data_ptr(),
143
+ spks.contiguous().data_ptr(),
144
+ cond.contiguous().data_ptr(),
145
+ x.data_ptr()]
146
+ for i, j in enumerate(data_ptrs):
147
+ estimator.set_tensor_address(trt_engine.get_tensor_name(i), j)
148
+ # run trt engine
149
+ assert estimator.execute_async_v3(torch.cuda.current_stream().cuda_stream) is True
150
+ torch.cuda.current_stream().synchronize()
151
+ self.estimator.release_estimator(estimator, stream)
152
+ return x
153
+
154
+ def compute_loss(self, x1, mask, mu, spks=None, cond=None, streaming=False):
155
+ """Computes diffusion loss
156
+
157
+ Args:
158
+ x1 (torch.Tensor): Target
159
+ shape: (batch_size, n_feats, mel_timesteps)
160
+ mask (torch.Tensor): target mask
161
+ shape: (batch_size, 1, mel_timesteps)
162
+ mu (torch.Tensor): output of encoder
163
+ shape: (batch_size, n_feats, mel_timesteps)
164
+ spks (torch.Tensor, optional): speaker embedding. Defaults to None.
165
+ shape: (batch_size, spk_emb_dim)
166
+
167
+ Returns:
168
+ loss: conditional flow matching loss
169
+ y: conditional flow
170
+ shape: (batch_size, n_feats, mel_timesteps)
171
+ """
172
+ b, _, t = mu.shape
173
+
174
+ # random timestep
175
+ t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
176
+ if self.t_scheduler == 'cosine':
177
+ t = 1 - torch.cos(t * 0.5 * torch.pi)
178
+ # sample noise p(x_0)
179
+ z = torch.randn_like(x1)
180
+
181
+ y = (1 - (1 - self.sigma_min) * t) * z + t * x1
182
+ u = x1 - (1 - self.sigma_min) * z
183
+
184
+ # during training, we randomly drop condition to trade off mode coverage and sample fidelity
185
+ if self.training_cfg_rate > 0:
186
+ cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
187
+ mu = mu * cfg_mask.view(-1, 1, 1)
188
+ spks = spks * cfg_mask.view(-1, 1)
189
+ cond = cond * cfg_mask.view(-1, 1, 1)
190
+
191
+ pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond, streaming=streaming)
192
+ loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
193
+ return loss, y
194
+
195
+
196
+ class CausalConditionalCFM(ConditionalCFM):
197
+ def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
198
+ super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator)
199
+ set_all_random_seed(0)
200
+ self.rand_noise = torch.randn([1, 80, 50 * 300])
201
+
202
+ @torch.inference_mode()
203
+ def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, streaming=False):
204
+ """Forward diffusion
205
+
206
+ Args:
207
+ mu (torch.Tensor): output of encoder
208
+ shape: (batch_size, n_feats, mel_timesteps)
209
+ mask (torch.Tensor): output_mask
210
+ shape: (batch_size, 1, mel_timesteps)
211
+ n_timesteps (int): number of diffusion steps
212
+ temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
213
+ spks (torch.Tensor, optional): speaker ids. Defaults to None.
214
+ shape: (batch_size, spk_emb_dim)
215
+ cond: Not used but kept for future purposes
216
+
217
+ Returns:
218
+ sample: generated mel-spectrogram
219
+ shape: (batch_size, n_feats, mel_timesteps)
220
+ """
221
+
222
+ z = self.rand_noise[:, :, :mu.size(2)].to(mu.device).to(mu.dtype) * temperature
223
+ # fix prompt and overlap part mu and z
224
+ t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
225
+ if self.t_scheduler == 'cosine':
226
+ t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
227
+ return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond, streaming=streaming), None
speech/cosyvoice/flow/length_regulator.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Tuple
15
+ import torch.nn as nn
16
+ import torch
17
+ from torch.nn import functional as F
18
+ from cosyvoice.utils.mask import make_pad_mask
19
+
20
+
21
+ class InterpolateRegulator(nn.Module):
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ sampling_ratios: Tuple,
26
+ out_channels: int = None,
27
+ groups: int = 1,
28
+ ):
29
+ super().__init__()
30
+ self.sampling_ratios = sampling_ratios
31
+ out_channels = out_channels or channels
32
+ model = nn.ModuleList([])
33
+ if len(sampling_ratios) > 0:
34
+ for _ in sampling_ratios:
35
+ module = nn.Conv1d(channels, channels, 3, 1, 1)
36
+ norm = nn.GroupNorm(groups, channels)
37
+ act = nn.Mish()
38
+ model.extend([module, norm, act])
39
+ model.append(
40
+ nn.Conv1d(channels, out_channels, 1, 1)
41
+ )
42
+ self.model = nn.Sequential(*model)
43
+
44
+ def forward(self, x, ylens=None):
45
+ # x in (B, T, D)
46
+ mask = (~make_pad_mask(ylens)).to(x).unsqueeze(-1)
47
+ x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='linear')
48
+ out = self.model(x).transpose(1, 2).contiguous()
49
+ olens = ylens
50
+ return out * mask, olens
51
+
52
+ def inference(self, x1, x2, mel_len1, mel_len2, input_frame_rate=50):
53
+ # in inference mode, interploate prompt token and token(head/mid/tail) seprately, so we can get a clear separation point of mel
54
+ # NOTE 20 corresponds to token_overlap_len in cosyvoice/cli/model.py
55
+ # x in (B, T, D)
56
+ if x2.shape[1] > 40:
57
+ x2_head = F.interpolate(x2[:, :20].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
58
+ x2_mid = F.interpolate(x2[:, 20:-20].transpose(1, 2).contiguous(), size=mel_len2 - int(20 / input_frame_rate * 22050 / 256) * 2,
59
+ mode='linear')
60
+ x2_tail = F.interpolate(x2[:, -20:].transpose(1, 2).contiguous(), size=int(20 / input_frame_rate * 22050 / 256), mode='linear')
61
+ x2 = torch.concat([x2_head, x2_mid, x2_tail], dim=2)
62
+ else:
63
+ x2 = F.interpolate(x2.transpose(1, 2).contiguous(), size=mel_len2, mode='linear')
64
+ if x1.shape[1] != 0:
65
+ x1 = F.interpolate(x1.transpose(1, 2).contiguous(), size=mel_len1, mode='linear')
66
+ x = torch.concat([x1, x2], dim=2)
67
+ else:
68
+ x = x2
69
+ out = self.model(x).transpose(1, 2).contiguous()
70
+ return out, mel_len1 + mel_len2
speech/cosyvoice/hifigan/discriminator.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ try:
5
+ from torch.nn.utils.parametrizations import weight_norm, spectral_norm
6
+ except ImportError:
7
+ from torch.nn.utils import weight_norm, spectral_norm
8
+ from typing import List, Optional, Tuple
9
+ from einops import rearrange
10
+ from torchaudio.transforms import Spectrogram
11
+
12
+ LRELU_SLOPE = 0.1
13
+
14
+
15
+ class MultipleDiscriminator(nn.Module):
16
+ def __init__(
17
+ self, mpd: nn.Module, mrd: nn.Module
18
+ ):
19
+ super().__init__()
20
+ self.mpd = mpd
21
+ self.mrd = mrd
22
+
23
+ def forward(self, y: torch.Tensor, y_hat: torch.Tensor):
24
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
25
+ this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mpd(y.unsqueeze(dim=1), y_hat.unsqueeze(dim=1))
26
+ y_d_rs += this_y_d_rs
27
+ y_d_gs += this_y_d_gs
28
+ fmap_rs += this_fmap_rs
29
+ fmap_gs += this_fmap_gs
30
+ this_y_d_rs, this_y_d_gs, this_fmap_rs, this_fmap_gs = self.mrd(y, y_hat)
31
+ y_d_rs += this_y_d_rs
32
+ y_d_gs += this_y_d_gs
33
+ fmap_rs += this_fmap_rs
34
+ fmap_gs += this_fmap_gs
35
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
36
+
37
+
38
+ class MultiResolutionDiscriminator(nn.Module):
39
+ def __init__(
40
+ self,
41
+ fft_sizes: Tuple[int, ...] = (2048, 1024, 512),
42
+ num_embeddings: Optional[int] = None,
43
+ ):
44
+ """
45
+ Multi-Resolution Discriminator module adapted from https://github.com/descriptinc/descript-audio-codec.
46
+ Additionally, it allows incorporating conditional information with a learned embeddings table.
47
+
48
+ Args:
49
+ fft_sizes (tuple[int]): Tuple of window lengths for FFT. Defaults to (2048, 1024, 512).
50
+ num_embeddings (int, optional): Number of embeddings. None means non-conditional discriminator.
51
+ Defaults to None.
52
+ """
53
+
54
+ super().__init__()
55
+ self.discriminators = nn.ModuleList(
56
+ [DiscriminatorR(window_length=w, num_embeddings=num_embeddings) for w in fft_sizes]
57
+ )
58
+
59
+ def forward(
60
+ self, y: torch.Tensor, y_hat: torch.Tensor, bandwidth_id: torch.Tensor = None
61
+ ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]]]:
62
+ y_d_rs = []
63
+ y_d_gs = []
64
+ fmap_rs = []
65
+ fmap_gs = []
66
+
67
+ for d in self.discriminators:
68
+ y_d_r, fmap_r = d(x=y, cond_embedding_id=bandwidth_id)
69
+ y_d_g, fmap_g = d(x=y_hat, cond_embedding_id=bandwidth_id)
70
+ y_d_rs.append(y_d_r)
71
+ fmap_rs.append(fmap_r)
72
+ y_d_gs.append(y_d_g)
73
+ fmap_gs.append(fmap_g)
74
+
75
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
76
+
77
+
78
+ class DiscriminatorR(nn.Module):
79
+ def __init__(
80
+ self,
81
+ window_length: int,
82
+ num_embeddings: Optional[int] = None,
83
+ channels: int = 32,
84
+ hop_factor: float = 0.25,
85
+ bands: Tuple[Tuple[float, float], ...] = ((0.0, 0.1), (0.1, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)),
86
+ ):
87
+ super().__init__()
88
+ self.window_length = window_length
89
+ self.hop_factor = hop_factor
90
+ self.spec_fn = Spectrogram(
91
+ n_fft=window_length, hop_length=int(window_length * hop_factor), win_length=window_length, power=None
92
+ )
93
+ n_fft = window_length // 2 + 1
94
+ bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands]
95
+ self.bands = bands
96
+ convs = lambda: nn.ModuleList(
97
+ [
98
+ weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))),
99
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
100
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
101
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
102
+ weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))),
103
+ ]
104
+ )
105
+ self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))])
106
+
107
+ if num_embeddings is not None:
108
+ self.emb = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=channels)
109
+ torch.nn.init.zeros_(self.emb.weight)
110
+
111
+ self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1)))
112
+
113
+ def spectrogram(self, x):
114
+ # Remove DC offset
115
+ x = x - x.mean(dim=-1, keepdims=True)
116
+ # Peak normalize the volume of input audio
117
+ x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
118
+ x = self.spec_fn(x)
119
+ x = torch.view_as_real(x)
120
+ x = rearrange(x, "b f t c -> b c t f")
121
+ # Split into bands
122
+ x_bands = [x[..., b[0]: b[1]] for b in self.bands]
123
+ return x_bands
124
+
125
+ def forward(self, x: torch.Tensor, cond_embedding_id: torch.Tensor = None):
126
+ x_bands = self.spectrogram(x)
127
+ fmap = []
128
+ x = []
129
+ for band, stack in zip(x_bands, self.band_convs):
130
+ for i, layer in enumerate(stack):
131
+ band = layer(band)
132
+ band = torch.nn.functional.leaky_relu(band, 0.1)
133
+ if i > 0:
134
+ fmap.append(band)
135
+ x.append(band)
136
+ x = torch.cat(x, dim=-1)
137
+ if cond_embedding_id is not None:
138
+ emb = self.emb(cond_embedding_id)
139
+ h = (emb.view(1, -1, 1, 1) * x).sum(dim=1, keepdims=True)
140
+ else:
141
+ h = 0
142
+ x = self.conv_post(x)
143
+ fmap.append(x)
144
+ x += h
145
+
146
+ return x, fmap
147
+
148
+
149
+ class MultiResSpecDiscriminator(torch.nn.Module):
150
+
151
+ def __init__(self,
152
+ fft_sizes=[1024, 2048, 512],
153
+ hop_sizes=[120, 240, 50],
154
+ win_lengths=[600, 1200, 240],
155
+ window="hann_window"):
156
+
157
+ super(MultiResSpecDiscriminator, self).__init__()
158
+ self.discriminators = nn.ModuleList([
159
+ SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),
160
+ SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),
161
+ SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)])
162
+
163
+ def forward(self, y, y_hat):
164
+ y_d_rs = []
165
+ y_d_gs = []
166
+ fmap_rs = []
167
+ fmap_gs = []
168
+ for _, d in enumerate(self.discriminators):
169
+ y_d_r, fmap_r = d(y)
170
+ y_d_g, fmap_g = d(y_hat)
171
+ y_d_rs.append(y_d_r)
172
+ fmap_rs.append(fmap_r)
173
+ y_d_gs.append(y_d_g)
174
+ fmap_gs.append(fmap_g)
175
+
176
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
177
+
178
+
179
+ def stft(x, fft_size, hop_size, win_length, window):
180
+ """Perform STFT and convert to magnitude spectrogram.
181
+ Args:
182
+ x (Tensor): Input signal tensor (B, T).
183
+ fft_size (int): FFT size.
184
+ hop_size (int): Hop size.
185
+ win_length (int): Window length.
186
+ window (str): Window function type.
187
+ Returns:
188
+ Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
189
+ """
190
+ x_stft = torch.stft(x, fft_size, hop_size, win_length, window, return_complex=True)
191
+
192
+ # NOTE(kan-bayashi): clamp is needed to avoid nan or inf
193
+ return torch.abs(x_stft).transpose(2, 1)
194
+
195
+
196
+ class SpecDiscriminator(nn.Module):
197
+ """docstring for Discriminator."""
198
+
199
+ def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", use_spectral_norm=False):
200
+ super(SpecDiscriminator, self).__init__()
201
+ norm_f = weight_norm if use_spectral_norm is False else spectral_norm
202
+ self.fft_size = fft_size
203
+ self.shift_size = shift_size
204
+ self.win_length = win_length
205
+ self.window = getattr(torch, window)(win_length)
206
+ self.discriminators = nn.ModuleList([
207
+ norm_f(nn.Conv2d(1, 32, kernel_size=(3, 9), padding=(1, 4))),
208
+ norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))),
209
+ norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))),
210
+ norm_f(nn.Conv2d(32, 32, kernel_size=(3, 9), stride=(1, 2), padding=(1, 4))),
211
+ norm_f(nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))),
212
+ ])
213
+
214
+ self.out = norm_f(nn.Conv2d(32, 1, 3, 1, 1))
215
+
216
+ def forward(self, y):
217
+
218
+ fmap = []
219
+ y = y.squeeze(1)
220
+ y = stft(y, self.fft_size, self.shift_size, self.win_length, self.window.to(y.device))
221
+ y = y.unsqueeze(1)
222
+ for _, d in enumerate(self.discriminators):
223
+ y = d(y)
224
+ y = F.leaky_relu(y, LRELU_SLOPE)
225
+ fmap.append(y)
226
+
227
+ y = self.out(y)
228
+ fmap.append(y)
229
+
230
+ return torch.flatten(y, 1, -1), fmap
speech/cosyvoice/hifigan/f0_predictor.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ import torch.nn as nn
16
+ try:
17
+ from torch.nn.utils.parametrizations import weight_norm
18
+ except ImportError:
19
+ from torch.nn.utils import weight_norm
20
+
21
+
22
+ class ConvRNNF0Predictor(nn.Module):
23
+ def __init__(self,
24
+ num_class: int = 1,
25
+ in_channels: int = 80,
26
+ cond_channels: int = 512
27
+ ):
28
+ super().__init__()
29
+
30
+ self.num_class = num_class
31
+ self.condnet = nn.Sequential(
32
+ weight_norm(
33
+ nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
34
+ ),
35
+ nn.ELU(),
36
+ weight_norm(
37
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
38
+ ),
39
+ nn.ELU(),
40
+ weight_norm(
41
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
42
+ ),
43
+ nn.ELU(),
44
+ weight_norm(
45
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
46
+ ),
47
+ nn.ELU(),
48
+ weight_norm(
49
+ nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
50
+ ),
51
+ nn.ELU(),
52
+ )
53
+ self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
54
+
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ x = self.condnet(x)
57
+ x = x.transpose(1, 2)
58
+ return torch.abs(self.classifier(x).squeeze(-1))
speech/cosyvoice/hifigan/generator.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """HIFI-GAN"""
16
+
17
+ from typing import Dict, Optional, List
18
+ import numpy as np
19
+ from scipy.signal import get_window
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+ from torch.nn import Conv1d
24
+ from torch.nn import ConvTranspose1d
25
+ from torch.nn.utils import remove_weight_norm
26
+ try:
27
+ from torch.nn.utils.parametrizations import weight_norm
28
+ except ImportError:
29
+ from torch.nn.utils import weight_norm
30
+ from torch.distributions.uniform import Uniform
31
+
32
+ from cosyvoice.transformer.activation import Snake
33
+ from cosyvoice.utils.common import get_padding
34
+ from cosyvoice.utils.common import init_weights
35
+
36
+
37
+ """hifigan based generator implementation.
38
+
39
+ This code is modified from https://github.com/jik876/hifi-gan
40
+ ,https://github.com/kan-bayashi/ParallelWaveGAN and
41
+ https://github.com/NVIDIA/BigVGAN
42
+
43
+ """
44
+
45
+
46
+ class ResBlock(torch.nn.Module):
47
+ """Residual block module in HiFiGAN/BigVGAN."""
48
+ def __init__(
49
+ self,
50
+ channels: int = 512,
51
+ kernel_size: int = 3,
52
+ dilations: List[int] = [1, 3, 5],
53
+ ):
54
+ super(ResBlock, self).__init__()
55
+ self.convs1 = nn.ModuleList()
56
+ self.convs2 = nn.ModuleList()
57
+
58
+ for dilation in dilations:
59
+ self.convs1.append(
60
+ weight_norm(
61
+ Conv1d(
62
+ channels,
63
+ channels,
64
+ kernel_size,
65
+ 1,
66
+ dilation=dilation,
67
+ padding=get_padding(kernel_size, dilation)
68
+ )
69
+ )
70
+ )
71
+ self.convs2.append(
72
+ weight_norm(
73
+ Conv1d(
74
+ channels,
75
+ channels,
76
+ kernel_size,
77
+ 1,
78
+ dilation=1,
79
+ padding=get_padding(kernel_size, 1)
80
+ )
81
+ )
82
+ )
83
+ self.convs1.apply(init_weights)
84
+ self.convs2.apply(init_weights)
85
+ self.activations1 = nn.ModuleList([
86
+ Snake(channels, alpha_logscale=False)
87
+ for _ in range(len(self.convs1))
88
+ ])
89
+ self.activations2 = nn.ModuleList([
90
+ Snake(channels, alpha_logscale=False)
91
+ for _ in range(len(self.convs2))
92
+ ])
93
+
94
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
95
+ for idx in range(len(self.convs1)):
96
+ xt = self.activations1[idx](x)
97
+ xt = self.convs1[idx](xt)
98
+ xt = self.activations2[idx](xt)
99
+ xt = self.convs2[idx](xt)
100
+ x = xt + x
101
+ return x
102
+
103
+ def remove_weight_norm(self):
104
+ for idx in range(len(self.convs1)):
105
+ remove_weight_norm(self.convs1[idx])
106
+ remove_weight_norm(self.convs2[idx])
107
+
108
+
109
+ class SineGen(torch.nn.Module):
110
+ """ Definition of sine generator
111
+ SineGen(samp_rate, harmonic_num = 0,
112
+ sine_amp = 0.1, noise_std = 0.003,
113
+ voiced_threshold = 0,
114
+ flag_for_pulse=False)
115
+ samp_rate: sampling rate in Hz
116
+ harmonic_num: number of harmonic overtones (default 0)
117
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
118
+ noise_std: std of Gaussian noise (default 0.003)
119
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
120
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
121
+ Note: when flag_for_pulse is True, the first time step of a voiced
122
+ segment is always sin(np.pi) or cos(0)
123
+ """
124
+
125
+ def __init__(self, samp_rate, harmonic_num=0,
126
+ sine_amp=0.1, noise_std=0.003,
127
+ voiced_threshold=0):
128
+ super(SineGen, self).__init__()
129
+ self.sine_amp = sine_amp
130
+ self.noise_std = noise_std
131
+ self.harmonic_num = harmonic_num
132
+ self.sampling_rate = samp_rate
133
+ self.voiced_threshold = voiced_threshold
134
+
135
+ def _f02uv(self, f0):
136
+ # generate uv signal
137
+ uv = (f0 > self.voiced_threshold).type(torch.float32)
138
+ return uv
139
+
140
+ @torch.no_grad()
141
+ def forward(self, f0):
142
+ """
143
+ :param f0: [B, 1, sample_len], Hz
144
+ :return: [B, 1, sample_len]
145
+ """
146
+
147
+ F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
148
+ for i in range(self.harmonic_num + 1):
149
+ F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
150
+
151
+ theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
152
+ u_dist = Uniform(low=-np.pi, high=np.pi)
153
+ phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
154
+ phase_vec[:, 0, :] = 0
155
+
156
+ # generate sine waveforms
157
+ sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
158
+
159
+ # generate uv signal
160
+ uv = self._f02uv(f0)
161
+
162
+ # noise: for unvoiced should be similar to sine_amp
163
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
164
+ # . for voiced regions is self.noise_std
165
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
166
+ noise = noise_amp * torch.randn_like(sine_waves)
167
+
168
+ # first: set the unvoiced part to 0 by uv
169
+ # then: additive noise
170
+ sine_waves = sine_waves * uv + noise
171
+ return sine_waves, uv, noise
172
+
173
+
174
+ class SourceModuleHnNSF(torch.nn.Module):
175
+ """ SourceModule for hn-nsf
176
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
177
+ add_noise_std=0.003, voiced_threshod=0)
178
+ sampling_rate: sampling_rate in Hz
179
+ harmonic_num: number of harmonic above F0 (default: 0)
180
+ sine_amp: amplitude of sine source signal (default: 0.1)
181
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
182
+ note that amplitude of noise in unvoiced is decided
183
+ by sine_amp
184
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
185
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
186
+ F0_sampled (batchsize, length, 1)
187
+ Sine_source (batchsize, length, 1)
188
+ noise_source (batchsize, length 1)
189
+ uv (batchsize, length, 1)
190
+ """
191
+
192
+ def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
193
+ add_noise_std=0.003, voiced_threshod=0):
194
+ super(SourceModuleHnNSF, self).__init__()
195
+
196
+ self.sine_amp = sine_amp
197
+ self.noise_std = add_noise_std
198
+
199
+ # to produce sine waveforms
200
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
201
+ sine_amp, add_noise_std, voiced_threshod)
202
+
203
+ # to merge source harmonics into a single excitation
204
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
205
+ self.l_tanh = torch.nn.Tanh()
206
+
207
+ def forward(self, x):
208
+ """
209
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
210
+ F0_sampled (batchsize, length, 1)
211
+ Sine_source (batchsize, length, 1)
212
+ noise_source (batchsize, length 1)
213
+ """
214
+ # source for harmonic branch
215
+ with torch.no_grad():
216
+ sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
217
+ sine_wavs = sine_wavs.transpose(1, 2)
218
+ uv = uv.transpose(1, 2)
219
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
220
+
221
+ # source for noise branch, in the same shape as uv
222
+ noise = torch.randn_like(uv) * self.sine_amp / 3
223
+ return sine_merge, noise, uv
224
+
225
+
226
+ class SineGen2(torch.nn.Module):
227
+ """ Definition of sine generator
228
+ SineGen(samp_rate, harmonic_num = 0,
229
+ sine_amp = 0.1, noise_std = 0.003,
230
+ voiced_threshold = 0,
231
+ flag_for_pulse=False)
232
+ samp_rate: sampling rate in Hz
233
+ harmonic_num: number of harmonic overtones (default 0)
234
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
235
+ noise_std: std of Gaussian noise (default 0.003)
236
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
237
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
238
+ Note: when flag_for_pulse is True, the first time step of a voiced
239
+ segment is always sin(np.pi) or cos(0)
240
+ """
241
+
242
+ def __init__(self, samp_rate, upsample_scale, harmonic_num=0,
243
+ sine_amp=0.1, noise_std=0.003,
244
+ voiced_threshold=0,
245
+ flag_for_pulse=False):
246
+ super(SineGen2, self).__init__()
247
+ self.sine_amp = sine_amp
248
+ self.noise_std = noise_std
249
+ self.harmonic_num = harmonic_num
250
+ self.dim = self.harmonic_num + 1
251
+ self.sampling_rate = samp_rate
252
+ self.voiced_threshold = voiced_threshold
253
+ self.flag_for_pulse = flag_for_pulse
254
+ self.upsample_scale = upsample_scale
255
+
256
+ def _f02uv(self, f0):
257
+ # generate uv signal
258
+ uv = (f0 > self.voiced_threshold).type(torch.float32)
259
+ return uv
260
+
261
+ def _f02sine(self, f0_values):
262
+ """ f0_values: (batchsize, length, dim)
263
+ where dim indicates fundamental tone and overtones
264
+ """
265
+ # convert to F0 in rad. The interger part n can be ignored
266
+ # because 2 * np.pi * n doesn't affect phase
267
+ rad_values = (f0_values / self.sampling_rate) % 1
268
+
269
+ # initial phase noise (no noise for fundamental component)
270
+ rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device)
271
+ rand_ini[:, 0] = 0
272
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
273
+
274
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
275
+ if not self.flag_for_pulse:
276
+ rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2),
277
+ scale_factor=1 / self.upsample_scale,
278
+ mode="linear").transpose(1, 2)
279
+
280
+ phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
281
+ phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale,
282
+ scale_factor=self.upsample_scale, mode="linear").transpose(1, 2)
283
+ sines = torch.sin(phase)
284
+ else:
285
+ # If necessary, make sure that the first time step of every
286
+ # voiced segments is sin(pi) or cos(0)
287
+ # This is used for pulse-train generation
288
+
289
+ # identify the last time step in unvoiced segments
290
+ uv = self._f02uv(f0_values)
291
+ uv_1 = torch.roll(uv, shifts=-1, dims=1)
292
+ uv_1[:, -1, :] = 1
293
+ u_loc = (uv < 1) * (uv_1 > 0)
294
+
295
+ # get the instantanouse phase
296
+ tmp_cumsum = torch.cumsum(rad_values, dim=1)
297
+ # different batch needs to be processed differently
298
+ for idx in range(f0_values.shape[0]):
299
+ temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
300
+ temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
301
+ # stores the accumulation of i.phase within
302
+ # each voiced segments
303
+ tmp_cumsum[idx, :, :] = 0
304
+ tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
305
+
306
+ # rad_values - tmp_cumsum: remove the accumulation of i.phase
307
+ # within the previous voiced segment.
308
+ i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
309
+
310
+ # get the sines
311
+ sines = torch.cos(i_phase * 2 * np.pi)
312
+ return sines
313
+
314
+ def forward(self, f0):
315
+ """ sine_tensor, uv = forward(f0)
316
+ input F0: tensor(batchsize=1, length, dim=1)
317
+ f0 for unvoiced steps should be 0
318
+ output sine_tensor: tensor(batchsize=1, length, dim)
319
+ output uv: tensor(batchsize=1, length, 1)
320
+ """
321
+ # fundamental component
322
+ fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
323
+
324
+ # generate sine waveforms
325
+ sine_waves = self._f02sine(fn) * self.sine_amp
326
+
327
+ # generate uv signal
328
+ uv = self._f02uv(f0)
329
+
330
+ # noise: for unvoiced should be similar to sine_amp
331
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
332
+ # . for voiced regions is self.noise_std
333
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
334
+ noise = noise_amp * torch.randn_like(sine_waves)
335
+
336
+ # first: set the unvoiced part to 0 by uv
337
+ # then: additive noise
338
+ sine_waves = sine_waves * uv + noise
339
+ return sine_waves, uv, noise
340
+
341
+
342
+ class SourceModuleHnNSF2(torch.nn.Module):
343
+ """ SourceModule for hn-nsf
344
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
345
+ add_noise_std=0.003, voiced_threshod=0)
346
+ sampling_rate: sampling_rate in Hz
347
+ harmonic_num: number of harmonic above F0 (default: 0)
348
+ sine_amp: amplitude of sine source signal (default: 0.1)
349
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
350
+ note that amplitude of noise in unvoiced is decided
351
+ by sine_amp
352
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
353
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
354
+ F0_sampled (batchsize, length, 1)
355
+ Sine_source (batchsize, length, 1)
356
+ noise_source (batchsize, length 1)
357
+ uv (batchsize, length, 1)
358
+ """
359
+
360
+ def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
361
+ add_noise_std=0.003, voiced_threshod=0):
362
+ super(SourceModuleHnNSF2, self).__init__()
363
+
364
+ self.sine_amp = sine_amp
365
+ self.noise_std = add_noise_std
366
+
367
+ # to produce sine waveforms
368
+ self.l_sin_gen = SineGen2(sampling_rate, upsample_scale, harmonic_num,
369
+ sine_amp, add_noise_std, voiced_threshod)
370
+
371
+ # to merge source harmonics into a single excitation
372
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
373
+ self.l_tanh = torch.nn.Tanh()
374
+
375
+ def forward(self, x):
376
+ """
377
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
378
+ F0_sampled (batchsize, length, 1)
379
+ Sine_source (batchsize, length, 1)
380
+ noise_source (batchsize, length 1)
381
+ """
382
+ # source for harmonic branch
383
+ with torch.no_grad():
384
+ sine_wavs, uv, _ = self.l_sin_gen(x)
385
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
386
+
387
+ # source for noise branch, in the same shape as uv
388
+ noise = torch.randn_like(uv) * self.sine_amp / 3
389
+ return sine_merge, noise, uv
390
+
391
+
392
+ class HiFTGenerator(nn.Module):
393
+ """
394
+ HiFTNet Generator: Neural Source Filter + ISTFTNet
395
+ https://arxiv.org/abs/2309.09493
396
+ """
397
+ def __init__(
398
+ self,
399
+ in_channels: int = 80,
400
+ base_channels: int = 512,
401
+ nb_harmonics: int = 8,
402
+ sampling_rate: int = 22050,
403
+ nsf_alpha: float = 0.1,
404
+ nsf_sigma: float = 0.003,
405
+ nsf_voiced_threshold: float = 10,
406
+ upsample_rates: List[int] = [8, 8],
407
+ upsample_kernel_sizes: List[int] = [16, 16],
408
+ istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4},
409
+ resblock_kernel_sizes: List[int] = [3, 7, 11],
410
+ resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
411
+ source_resblock_kernel_sizes: List[int] = [7, 11],
412
+ source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]],
413
+ lrelu_slope: float = 0.1,
414
+ audio_limit: float = 0.99,
415
+ f0_predictor: torch.nn.Module = None,
416
+ ):
417
+ super(HiFTGenerator, self).__init__()
418
+
419
+ self.out_channels = 1
420
+ self.nb_harmonics = nb_harmonics
421
+ self.sampling_rate = sampling_rate
422
+ self.istft_params = istft_params
423
+ self.lrelu_slope = lrelu_slope
424
+ self.audio_limit = audio_limit
425
+
426
+ self.num_kernels = len(resblock_kernel_sizes)
427
+ self.num_upsamples = len(upsample_rates)
428
+ # NOTE in CosyVoice2, we use the original SourceModuleHnNSF implementation
429
+ this_SourceModuleHnNSF = SourceModuleHnNSF if self.sampling_rate == 22050 else SourceModuleHnNSF2
430
+ self.m_source = this_SourceModuleHnNSF(
431
+ sampling_rate=sampling_rate,
432
+ upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
433
+ harmonic_num=nb_harmonics,
434
+ sine_amp=nsf_alpha,
435
+ add_noise_std=nsf_sigma,
436
+ voiced_threshod=nsf_voiced_threshold)
437
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
438
+
439
+ self.conv_pre = weight_norm(
440
+ Conv1d(in_channels, base_channels, 7, 1, padding=3)
441
+ )
442
+
443
+ # Up
444
+ self.ups = nn.ModuleList()
445
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
446
+ self.ups.append(
447
+ weight_norm(
448
+ ConvTranspose1d(
449
+ base_channels // (2**i),
450
+ base_channels // (2**(i + 1)),
451
+ k,
452
+ u,
453
+ padding=(k - u) // 2,
454
+ )
455
+ )
456
+ )
457
+
458
+ # Down
459
+ self.source_downs = nn.ModuleList()
460
+ self.source_resblocks = nn.ModuleList()
461
+ downsample_rates = [1] + upsample_rates[::-1][:-1]
462
+ downsample_cum_rates = np.cumprod(downsample_rates)
463
+ for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)):
464
+ if u == 1:
465
+ self.source_downs.append(
466
+ Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
467
+ )
468
+ else:
469
+ self.source_downs.append(
470
+ Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
471
+ )
472
+
473
+ self.source_resblocks.append(
474
+ ResBlock(base_channels // (2 ** (i + 1)), k, d)
475
+ )
476
+
477
+ self.resblocks = nn.ModuleList()
478
+ for i in range(len(self.ups)):
479
+ ch = base_channels // (2**(i + 1))
480
+ for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
481
+ self.resblocks.append(ResBlock(ch, k, d))
482
+
483
+ self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
484
+ self.ups.apply(init_weights)
485
+ self.conv_post.apply(init_weights)
486
+ self.reflection_pad = nn.ReflectionPad1d((1, 0))
487
+ self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
488
+ self.f0_predictor = f0_predictor
489
+
490
+ def remove_weight_norm(self):
491
+ print('Removing weight norm...')
492
+ for l in self.ups:
493
+ remove_weight_norm(l)
494
+ for l in self.resblocks:
495
+ l.remove_weight_norm()
496
+ remove_weight_norm(self.conv_pre)
497
+ remove_weight_norm(self.conv_post)
498
+ self.m_source.remove_weight_norm()
499
+ for l in self.source_downs:
500
+ remove_weight_norm(l)
501
+ for l in self.source_resblocks:
502
+ l.remove_weight_norm()
503
+
504
+ def _stft(self, x):
505
+ spec = torch.stft(
506
+ x,
507
+ self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
508
+ return_complex=True)
509
+ spec = torch.view_as_real(spec) # [B, F, TT, 2]
510
+ return spec[..., 0], spec[..., 1]
511
+
512
+ def _istft(self, magnitude, phase):
513
+ magnitude = torch.clip(magnitude, max=1e2)
514
+ real = magnitude * torch.cos(phase)
515
+ img = magnitude * torch.sin(phase)
516
+ inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"],
517
+ self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
518
+ return inverse_transform
519
+
520
+ def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
521
+ s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
522
+ s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
523
+
524
+ x = self.conv_pre(x)
525
+ for i in range(self.num_upsamples):
526
+ x = F.leaky_relu(x, self.lrelu_slope)
527
+ x = self.ups[i](x)
528
+
529
+ if i == self.num_upsamples - 1:
530
+ x = self.reflection_pad(x)
531
+
532
+ # fusion
533
+ si = self.source_downs[i](s_stft)
534
+ si = self.source_resblocks[i](si)
535
+ x = x + si
536
+
537
+ xs = None
538
+ for j in range(self.num_kernels):
539
+ if xs is None:
540
+ xs = self.resblocks[i * self.num_kernels + j](x)
541
+ else:
542
+ xs += self.resblocks[i * self.num_kernels + j](x)
543
+ x = xs / self.num_kernels
544
+
545
+ x = F.leaky_relu(x)
546
+ x = self.conv_post(x)
547
+ magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
548
+ phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy
549
+
550
+ x = self._istft(magnitude, phase)
551
+ x = torch.clamp(x, -self.audio_limit, self.audio_limit)
552
+ return x
553
+
554
+ def forward(
555
+ self,
556
+ batch: dict,
557
+ device: torch.device,
558
+ ) -> Dict[str, Optional[torch.Tensor]]:
559
+ speech_feat = batch['speech_feat'].transpose(1, 2).to(device)
560
+ # mel->f0
561
+ f0 = self.f0_predictor(speech_feat)
562
+ # f0->source
563
+ s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
564
+ s, _, _ = self.m_source(s)
565
+ s = s.transpose(1, 2)
566
+ # mel+source->speech
567
+ generated_speech = self.decode(x=speech_feat, s=s)
568
+ return generated_speech, f0
569
+
570
+ @torch.inference_mode()
571
+ def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
572
+ # mel->f0
573
+ f0 = self.f0_predictor(speech_feat)
574
+ # f0->source
575
+ s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
576
+ s, _, _ = self.m_source(s)
577
+ s = s.transpose(1, 2)
578
+ # use cache_source to avoid glitch
579
+ if cache_source.shape[2] != 0:
580
+ s[:, :, :cache_source.shape[2]] = cache_source
581
+ generated_speech = self.decode(x=speech_feat, s=s)
582
+ return generated_speech, s
speech/cosyvoice/hifigan/hifigan.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Optional
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from matcha.hifigan.models import feature_loss, generator_loss, discriminator_loss
6
+ from cosyvoice.utils.losses import tpr_loss, mel_loss
7
+
8
+
9
+ class HiFiGan(nn.Module):
10
+ def __init__(self, generator, discriminator, mel_spec_transform,
11
+ multi_mel_spectral_recon_loss_weight=45, feat_match_loss_weight=2.0,
12
+ tpr_loss_weight=1.0, tpr_loss_tau=0.04):
13
+ super(HiFiGan, self).__init__()
14
+ self.generator = generator
15
+ self.discriminator = discriminator
16
+ self.mel_spec_transform = mel_spec_transform
17
+ self.multi_mel_spectral_recon_loss_weight = multi_mel_spectral_recon_loss_weight
18
+ self.feat_match_loss_weight = feat_match_loss_weight
19
+ self.tpr_loss_weight = tpr_loss_weight
20
+ self.tpr_loss_tau = tpr_loss_tau
21
+
22
+ def forward(
23
+ self,
24
+ batch: dict,
25
+ device: torch.device,
26
+ ) -> Dict[str, Optional[torch.Tensor]]:
27
+ if batch['turn'] == 'generator':
28
+ return self.forward_generator(batch, device)
29
+ else:
30
+ return self.forward_discriminator(batch, device)
31
+
32
+ def forward_generator(self, batch, device):
33
+ real_speech = batch['speech'].to(device)
34
+ pitch_feat = batch['pitch_feat'].to(device)
35
+ # 1. calculate generator outputs
36
+ generated_speech, generated_f0 = self.generator(batch, device)
37
+ # 2. calculate discriminator outputs
38
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech)
39
+ # 3. calculate generator losses, feature loss, mel loss, tpr losses [Optional]
40
+ loss_gen, _ = generator_loss(y_d_gs)
41
+ loss_fm = feature_loss(fmap_rs, fmap_gs)
42
+ loss_mel = mel_loss(real_speech, generated_speech, self.mel_spec_transform)
43
+ if self.tpr_loss_weight != 0:
44
+ loss_tpr = tpr_loss(y_d_gs, y_d_rs, self.tpr_loss_tau)
45
+ else:
46
+ loss_tpr = torch.zeros(1).to(device)
47
+ loss_f0 = F.l1_loss(generated_f0, pitch_feat)
48
+ loss = loss_gen + self.feat_match_loss_weight * loss_fm + \
49
+ self.multi_mel_spectral_recon_loss_weight * loss_mel + \
50
+ self.tpr_loss_weight * loss_tpr + loss_f0
51
+ return {'loss': loss, 'loss_gen': loss_gen, 'loss_fm': loss_fm, 'loss_mel': loss_mel, 'loss_tpr': loss_tpr, 'loss_f0': loss_f0}
52
+
53
+ def forward_discriminator(self, batch, device):
54
+ real_speech = batch['speech'].to(device)
55
+ # 1. calculate generator outputs
56
+ with torch.no_grad():
57
+ generated_speech, generated_f0 = self.generator(batch, device)
58
+ # 2. calculate discriminator outputs
59
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = self.discriminator(real_speech, generated_speech.detach())
60
+ # 3. calculate discriminator losses, tpr losses [Optional]
61
+ loss_disc, _, _ = discriminator_loss(y_d_rs, y_d_gs)
62
+ if self.tpr_loss_weight != 0:
63
+ loss_tpr = tpr_loss(y_d_rs, y_d_gs, self.tpr_loss_tau)
64
+ else:
65
+ loss_tpr = torch.zeros(1).to(device)
66
+ loss = loss_disc + self.tpr_loss_weight * loss_tpr
67
+ return {'loss': loss, 'loss_disc': loss_disc, 'loss_tpr': loss_tpr}
speech/cosyvoice/llm/llm.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ # 2025 Alibaba Inc (authors: Xiang Lyu, Yabin Li, Qihua)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import queue
16
+ import random
17
+ import time
18
+ import threading
19
+ from typing import Dict, Optional, Callable, List, Generator
20
+ import torch
21
+ from torch import nn
22
+ import torch.nn.functional as F
23
+ from transformers import Qwen2ForCausalLM
24
+ from torch.nn.utils.rnn import pad_sequence, unpad_sequence
25
+ from cosyvoice.utils.common import IGNORE_ID
26
+ from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss
27
+ from cosyvoice.utils.common import th_accuracy
28
+ from cosyvoice.utils.file_utils import logging
29
+ from cosyvoice.utils.mask import make_pad_mask
30
+
31
+
32
+ class TransformerLM(torch.nn.Module):
33
+ def __init__(
34
+ self,
35
+ text_encoder_input_size: int,
36
+ llm_input_size: int,
37
+ llm_output_size: int,
38
+ text_token_size: int,
39
+ speech_token_size: int,
40
+ text_encoder: torch.nn.Module,
41
+ llm: torch.nn.Module,
42
+ sampling: Callable,
43
+ length_normalized_loss: bool = True,
44
+ lsm_weight: float = 0.0,
45
+ spk_embed_dim: int = 192,
46
+ ):
47
+ super().__init__()
48
+ self.llm_input_size = llm_input_size
49
+ self.speech_token_size = speech_token_size
50
+ # 1. build text token inputs related modules
51
+ self.text_embedding = torch.nn.Embedding(text_token_size, text_encoder_input_size)
52
+ self.text_encoder = text_encoder
53
+ self.text_encoder_affine_layer = nn.Linear(
54
+ self.text_encoder.output_size(),
55
+ llm_input_size
56
+ )
57
+
58
+ # 2. build speech token language model related modules
59
+ self.sos_eos = 0
60
+ self.task_id = 1
61
+ self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
62
+ self.llm = llm
63
+ self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1)
64
+ self.criterion_ce = LabelSmoothingLoss(
65
+ size=speech_token_size + 1,
66
+ padding_idx=IGNORE_ID,
67
+ smoothing=lsm_weight,
68
+ normalize_length=length_normalized_loss,
69
+ )
70
+
71
+ # 3. [Optional] build speech token related modules
72
+ self.speech_embedding = torch.nn.Embedding(speech_token_size, llm_input_size)
73
+ self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, llm_input_size)
74
+
75
+ # 4. sampling method
76
+ self.sampling = sampling
77
+
78
+ def encode(
79
+ self,
80
+ text: torch.Tensor,
81
+ text_lengths: torch.Tensor,
82
+ ):
83
+ encoder_out, encoder_mask = self.text_encoder(text, text_lengths, decoding_chunk_size=1, num_decoding_left_chunks=-1)
84
+ encoder_out_lens = encoder_mask.squeeze(1).sum(1)
85
+ encoder_out = self.text_encoder_affine_layer(encoder_out)
86
+ return encoder_out, encoder_out_lens
87
+
88
+ def pad_unpad_sequence(self, sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len):
89
+ text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
90
+ speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
91
+ lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), embedding[i], text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0)
92
+ for i in range(len(text_token))]
93
+ lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
94
+ lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
95
+ return lm_input, lm_input_len
96
+
97
+ def forward(
98
+ self,
99
+ batch: dict,
100
+ device: torch.device,
101
+ ) -> Dict[str, Optional[torch.Tensor]]:
102
+ """
103
+ Args:
104
+ text: (B, L, D)
105
+ text_lengths: (B,)
106
+ audio: (B, T, N) or (B, T)
107
+ audio_lengths: (B,)
108
+ """
109
+ text_token = batch['text_token'].to(device)
110
+ text_token_len = batch['text_token_len'].to(device)
111
+ speech_token = batch['speech_token'].to(device)
112
+ speech_token_len = batch['speech_token_len'].to(device)
113
+ embedding = batch['embedding'].to(device)
114
+
115
+ # 1. prepare llm_target
116
+ lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() +
117
+ [self.speech_token_size]) for i in range(text_token.size(0))]
118
+ lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device)
119
+
120
+ # 1. encode text_token
121
+ text_token = self.text_embedding(text_token)
122
+ text_token, text_token_len = self.encode(text_token, text_token_len)
123
+
124
+ # 2. embedding projection
125
+ embedding = F.normalize(embedding, dim=1)
126
+ embedding = self.spk_embed_affine_layer(embedding)
127
+ embedding = embedding.unsqueeze(1)
128
+
129
+ # 3. eos and task_id
130
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
131
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
132
+
133
+ # 4. encode speech_token
134
+ speech_token = self.speech_embedding(speech_token)
135
+
136
+ # 5. unpad and pad
137
+ lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, embedding, text_token, text_token_len,
138
+ task_id_emb, speech_token, speech_token_len)
139
+
140
+ # 6. run lm forward
141
+ lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
142
+ logits = self.llm_decoder(lm_output)
143
+ loss = self.criterion_ce(logits, lm_target)
144
+ acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID)
145
+ return {'loss': loss, 'acc': acc}
146
+
147
+ def sampling_ids(
148
+ self,
149
+ weighted_scores: torch.Tensor,
150
+ decoded_tokens: List,
151
+ sampling: int,
152
+ ignore_eos: bool = True,
153
+ ):
154
+ num_trials, max_trials = 0, 100
155
+ while True:
156
+ top_ids = self.sampling(weighted_scores, decoded_tokens, sampling)
157
+ if (not ignore_eos) or (self.speech_token_size not in top_ids):
158
+ break
159
+ num_trials += 1
160
+ if num_trials > max_trials:
161
+ raise RuntimeError('sampling reaches max_trials {} and still get eos when ignore_eos is True, check your input!'.format(max_trials))
162
+ return top_ids
163
+
164
+ @torch.inference_mode()
165
+ def inference(
166
+ self,
167
+ text: torch.Tensor,
168
+ text_len: torch.Tensor,
169
+ prompt_text: torch.Tensor,
170
+ prompt_text_len: torch.Tensor,
171
+ prompt_speech_token: torch.Tensor,
172
+ prompt_speech_token_len: torch.Tensor,
173
+ embedding: torch.Tensor,
174
+ sampling: int = 25,
175
+ max_token_text_ratio: float = 20,
176
+ min_token_text_ratio: float = 2,
177
+ uuid: str = '',
178
+ ) -> Generator[torch.Tensor, None, None]:
179
+ device = text.device
180
+ text = torch.concat([prompt_text, text], dim=1)
181
+ text_len += prompt_text_len
182
+ text = self.text_embedding(text)
183
+
184
+ # 1. encode text
185
+ text, text_len = self.encode(text, text_len)
186
+
187
+ # 2. encode embedding
188
+ if embedding.shape[0] != 0:
189
+ embedding = F.normalize(embedding, dim=1)
190
+ embedding = self.spk_embed_affine_layer(embedding)
191
+ embedding = embedding.unsqueeze(dim=1)
192
+ else:
193
+ embedding = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device).to(text.dtype)
194
+
195
+ # 3. concat llm_input
196
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
197
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
198
+ if prompt_speech_token_len != 0:
199
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
200
+ else:
201
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
202
+ lm_input = torch.concat([sos_eos_emb, embedding, text, task_id_emb, prompt_speech_token_emb], dim=1)
203
+
204
+ # 4. cal min/max_length
205
+ min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
206
+ max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
207
+
208
+ # 5. step by step decode
209
+ out_tokens = []
210
+ offset = 0
211
+ att_cache, cnn_cache = torch.zeros((0, 0, 0, 0), device=lm_input.device), torch.zeros((0, 0, 0, 0), device=lm_input.device)
212
+ for i in range(max_len):
213
+ y_pred, att_cache, cnn_cache = self.llm.forward_chunk(lm_input, offset=offset, required_cache_size=-1,
214
+ att_cache=att_cache, cnn_cache=cnn_cache,
215
+ att_mask=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]),
216
+ device=lm_input.device)).to(torch.bool))
217
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
218
+ # force continue decode first token
219
+ if i == 0:
220
+ logp[:, self.speech_token_size] = -float('inf')
221
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
222
+ if top_ids == self.speech_token_size:
223
+ break
224
+ # in stream mode, yield token one by one
225
+ yield top_ids
226
+ out_tokens.append(top_ids)
227
+ offset += lm_input.size(1)
228
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
229
+
230
+
231
+ class Qwen2Encoder(torch.nn.Module):
232
+ def __init__(self, pretrain_path):
233
+ super().__init__()
234
+ self.model = Qwen2ForCausalLM.from_pretrained(pretrain_path)
235
+
236
+ def forward(self, xs: torch.Tensor, xs_lens: torch.Tensor):
237
+ T = xs.size(1)
238
+ masks = ~make_pad_mask(xs_lens, T)
239
+ outs = self.model(
240
+ inputs_embeds=xs,
241
+ attention_mask=masks,
242
+ output_hidden_states=True,
243
+ return_dict=True,
244
+ )
245
+ return outs.hidden_states[-1], masks.unsqueeze(1)
246
+
247
+ def forward_one_step(self, xs, masks, cache=None):
248
+ input_masks = masks[:, -1, :]
249
+ outs = self.model(
250
+ inputs_embeds=xs,
251
+ attention_mask=input_masks,
252
+ output_hidden_states=True,
253
+ return_dict=True,
254
+ use_cache=True,
255
+ past_key_values=cache,
256
+ )
257
+ xs = outs.hidden_states[-1]
258
+ new_cache = outs.past_key_values
259
+ return xs, new_cache
260
+
261
+
262
+ class Qwen2LM(TransformerLM):
263
+ def __init__(
264
+ self,
265
+ llm_input_size: int,
266
+ llm_output_size: int,
267
+ speech_token_size: int,
268
+ llm: torch.nn.Module,
269
+ sampling: Callable,
270
+ length_normalized_loss: bool = True,
271
+ lsm_weight: float = 0.0,
272
+ mix_ratio: List[int] = [5, 15],
273
+ ):
274
+ torch.nn.Module.__init__(self)
275
+ self.llm_input_size = llm_input_size
276
+ self.llm_output_size = llm_output_size
277
+ self.speech_token_size = speech_token_size
278
+ # 2. build speech token language model related modules
279
+ self.sos_eos = 0
280
+ self.task_id = 1
281
+ self.fill_token = 2
282
+
283
+ self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
284
+ self.llm = llm
285
+ self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 3)
286
+ self.criterion_ce = LabelSmoothingLoss(
287
+ size=speech_token_size + 3,
288
+ padding_idx=IGNORE_ID,
289
+ smoothing=lsm_weight,
290
+ normalize_length=length_normalized_loss,
291
+ )
292
+
293
+ # 3. [Optional] build speech token related modules
294
+ self.speech_embedding = torch.nn.Embedding(speech_token_size + 3, llm_input_size)
295
+
296
+ # 4. sampling method
297
+ self.sampling = sampling
298
+ self.mix_ratio = mix_ratio
299
+
300
+ # 5. vllm related
301
+ self.stop_token_ids = [speech_token_size + i for i in range(3)]
302
+ self.vllm_output_queue = {}
303
+
304
+ def prepare_lm_input_target(self, text_token, text_token_emb, text_token_len, speech_token, speech_token_emb, speech_token_len):
305
+ lm_target, lm_input = [], []
306
+ text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
307
+ speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
308
+ text_token_emb = unpad_sequence(text_token_emb, text_token_len.cpu(), batch_first=True)
309
+ speech_token_emb = unpad_sequence(speech_token_emb, speech_token_len.cpu(), batch_first=True)
310
+ for i in range(len(text_token)):
311
+ # bistream sequence
312
+ if random.random() < 0.5 and speech_token_len[i] / text_token_len[i] > self.mix_ratio[1] / self.mix_ratio[0]:
313
+ this_lm_target, this_lm_input = [], []
314
+ this_lm_target.append(IGNORE_ID)
315
+ this_lm_input.append(self.llm_embedding.weight[self.sos_eos].reshape(1, -1))
316
+ for j in range(((text_token_len[i] + 1) / self.mix_ratio[0]).ceil().int().item()):
317
+ this_text_token = text_token[i][j * self.mix_ratio[0]: (j + 1) * self.mix_ratio[0]].tolist()
318
+ this_speech_token = speech_token[i][j * self.mix_ratio[1]: (j + 1) * self.mix_ratio[1]].tolist()
319
+ if len(this_text_token) == self.mix_ratio[0]:
320
+ assert len(this_speech_token) == self.mix_ratio[1]
321
+ this_lm_target += [IGNORE_ID] * (self.mix_ratio[0] - 1)
322
+ this_lm_target += this_speech_token
323
+ this_lm_target.append(self.speech_token_size + 2)
324
+ this_lm_input.append(text_token_emb[i][j * self.mix_ratio[0]: (j + 1) * self.mix_ratio[0]])
325
+ this_lm_input.append(speech_token_emb[i][j * self.mix_ratio[1]: (j + 1) * self.mix_ratio[1]])
326
+ else:
327
+ this_lm_target += [-1] * len(this_text_token)
328
+ this_lm_target += speech_token[i][j * self.mix_ratio[1]:].tolist()
329
+ this_lm_target.append(self.speech_token_size)
330
+ this_lm_input.append(text_token_emb[i][j * self.mix_ratio[0]:])
331
+ this_lm_input.append(self.llm_embedding.weight[self.task_id].reshape(1, -1))
332
+ this_lm_input.append(speech_token_emb[i][j * self.mix_ratio[1]:])
333
+ this_lm_target, this_lm_input = torch.tensor(this_lm_target), torch.concat(this_lm_input, dim=0)
334
+ # unistream sequence
335
+ else:
336
+ this_lm_target = torch.tensor([IGNORE_ID] * (1 + text_token_len[i]) + speech_token[i].tolist() + [self.speech_token_size])
337
+ this_lm_input = torch.concat([self.llm_embedding.weight[self.sos_eos].reshape(1, -1), text_token_emb[i],
338
+ self.llm_embedding.weight[self.task_id].reshape(1, -1), speech_token_emb[i]], dim=0)
339
+ lm_target.append(this_lm_target)
340
+ lm_input.append(this_lm_input)
341
+ lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
342
+ lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
343
+ lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID)
344
+ return lm_target, lm_input, lm_input_len
345
+
346
+ def forward(
347
+ self,
348
+ batch: dict,
349
+ device: torch.device,
350
+ ) -> Dict[str, Optional[torch.Tensor]]:
351
+ """
352
+ Args:
353
+ text: (B, L, D)
354
+ text_lengths: (B,)
355
+ audio: (B, T, N) or (B, T)
356
+ audio_lengths: (B,)
357
+ """
358
+ text_token = batch['text_token'].to(device)
359
+ text_token_len = batch['text_token_len'].to(device)
360
+ speech_token = batch['speech_token'].to(device)
361
+ speech_token_len = batch['speech_token_len'].to(device)
362
+
363
+ # 1. encode text_token
364
+ text_token_emb = self.llm.model.model.embed_tokens(text_token)
365
+
366
+ # 2. encode speech_token
367
+ speech_token_emb = self.speech_embedding(speech_token)
368
+
369
+ # 3. prepare llm_input/target
370
+ lm_target, lm_input, lm_input_len = self.prepare_lm_input_target(text_token, text_token_emb, text_token_len, speech_token, speech_token_emb, speech_token_len)
371
+ lm_target = lm_target.to(device)
372
+
373
+ # 4. run lm forward
374
+ lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
375
+ logits = self.llm_decoder(lm_output)
376
+ loss = self.criterion_ce(logits, lm_target.to(device))
377
+ acc = th_accuracy(logits.view(-1, self.speech_token_size + 3), lm_target, ignore_label=IGNORE_ID)
378
+ return {'loss': loss, 'acc': acc}
379
+
380
+ def forward_dpo(
381
+ self,
382
+ batch: dict,
383
+ device: torch.device,
384
+ ) -> Dict[str, Optional[torch.Tensor]]:
385
+ text_token = batch['text_token'].to(device)
386
+ text_token_len = batch['text_token_len'].to(device)
387
+ speech_token = batch['speech_token'].to(device)
388
+ speech_token_len = batch['speech_token_len'].to(device)
389
+ reject_speech_token = batch['reject_speech_token'].to(device)
390
+ reject_speech_token_len = batch['reject_speech_token_len'].to(device)
391
+
392
+ # 1. encode text_token
393
+ text_token_emb = self.llm.model.model.embed_tokens(text_token)
394
+
395
+ # 2. encode speech_token
396
+ speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
397
+ reject_speech_token = unpad_sequence(reject_speech_token, reject_speech_token_len.cpu(), batch_first=True)
398
+ speech_token_combined = speech_token + reject_speech_token
399
+ speech_token_combined = pad_sequence(speech_token_combined, batch_first=True, padding_value=0)
400
+ speech_token_combined_len = torch.concat([speech_token_len, reject_speech_token_len], dim=0)
401
+ speech_token_combined_emb = self.speech_embedding(speech_token_combined)
402
+
403
+ # 3. prepare llm_input/target
404
+ lm_target, lm_input, lm_input_len = self.prepare_lm_input_target(text_token.repeat(2, 1), text_token_emb.repeat(2, 1, 1), text_token_len.repeat(2), speech_token_combined, speech_token_combined_emb, speech_token_combined_len)
405
+ lm_target = lm_target.to(device)
406
+
407
+ # 4. run lm forward
408
+ lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
409
+ logits = self.llm_decoder(lm_output)
410
+ chosen_logits = logits[:text_token.shape[0]]
411
+ rejected_logits = logits[text_token.shape[0]:]
412
+ chosen_lm_target = lm_target[:text_token.shape[0]]
413
+ rejected_lm_target = lm_target[text_token.shape[0]:]
414
+ loss = self.criterion_ce(chosen_logits, chosen_lm_target.to(device))
415
+ acc = th_accuracy(chosen_logits.view(-1, self.speech_token_size + 3), chosen_lm_target, ignore_label=IGNORE_ID)
416
+
417
+ # 5. calculate dpo logits
418
+ chosen_lm_mask = chosen_lm_target == IGNORE_ID
419
+ rejected_lm_mask = rejected_lm_target == IGNORE_ID
420
+ chosen_logps = torch.gather(chosen_logits.log_softmax(dim=-1), dim=2, index=chosen_lm_target.masked_fill(chosen_lm_mask, 0).unsqueeze(dim=-1)).squeeze(dim=-1)
421
+ rejected_logps = torch.gather(rejected_logits.log_softmax(dim=-1), dim=2, index=rejected_lm_target.masked_fill(rejected_lm_mask, 0).unsqueeze(dim=-1)).squeeze(dim=-1)
422
+ chosen_logps = (chosen_logps * chosen_lm_mask).mean(dim=-1)
423
+ rejected_logps = (rejected_logps * chosen_lm_mask).mean(dim=-1)
424
+ return {'loss': loss, 'acc': acc, 'chosen_logps': chosen_logps, 'rejected_logps': rejected_logps}
425
+
426
+ @torch.inference_mode()
427
+ def inference(
428
+ self,
429
+ text: torch.Tensor,
430
+ text_len: torch.Tensor,
431
+ prompt_text: torch.Tensor,
432
+ prompt_text_len: torch.Tensor,
433
+ prompt_speech_token: torch.Tensor,
434
+ prompt_speech_token_len: torch.Tensor,
435
+ embedding: torch.Tensor,
436
+ sampling: int = 25,
437
+ max_token_text_ratio: float = 20,
438
+ min_token_text_ratio: float = 2,
439
+ uuid: str = '',
440
+ ) -> Generator[torch.Tensor, None, None]:
441
+ device = text.device
442
+ text = torch.concat([prompt_text, text], dim=1)
443
+ text_len += prompt_text_len
444
+ text = self.llm.model.model.embed_tokens(text)
445
+
446
+ # 3. concat llm_input
447
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
448
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
449
+ if prompt_speech_token_len != 0:
450
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
451
+ else:
452
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
453
+ lm_input = torch.concat([sos_eos_emb, text, task_id_emb, prompt_speech_token_emb], dim=1)
454
+
455
+ # 4. cal min/max_length
456
+ min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
457
+ max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
458
+
459
+ # 5. step by step decode
460
+ for token in self.inference_wrapper(lm_input, sampling, min_len, max_len, uuid):
461
+ yield token
462
+
463
+ @torch.inference_mode()
464
+ def inference_wrapper(self, lm_input, sampling, min_len, max_len, uuid):
465
+ if hasattr(self, 'vllm'):
466
+ from vllm import SamplingParams, RequestOutput
467
+ sampling_params = SamplingParams(top_k=sampling,
468
+ stop_token_ids=self.stop_token_ids,
469
+ min_tokens=min_len,
470
+ max_tokens=max_len)
471
+ with self.lock:
472
+ self.vllm.add_request(uuid, {"prompt_embeds": lm_input.squeeze(0).to(torch.bfloat16).to(lm_input.device)}, sampling_params)
473
+ self.vllm_output_queue[uuid] = queue.Queue()
474
+ out_tokens = []
475
+ while True:
476
+ with self.lock:
477
+ if self.vllm_output_queue[uuid].empty() is True:
478
+ request_outputs: List[RequestOutput] = self.vllm.step()
479
+ for request_output in request_outputs:
480
+ top_ids = list(request_output.outputs[0].token_ids)[-1]
481
+ self.vllm_output_queue[request_output.request_id].put(top_ids)
482
+ if self.vllm_output_queue[uuid].empty() is False:
483
+ top_ids = self.vllm_output_queue[uuid].get()
484
+ if top_ids in self.stop_token_ids:
485
+ break
486
+ # in stream mode, yield token one by one
487
+ yield top_ids
488
+ out_tokens.append(top_ids)
489
+ if len(out_tokens) == max_len:
490
+ break
491
+ time.sleep(0.001)
492
+ with self.lock:
493
+ self.vllm_output_queue.pop(uuid)
494
+ else:
495
+ out_tokens = []
496
+ cache = None
497
+ for i in range(max_len):
498
+ y_pred, cache = self.llm.forward_one_step(lm_input,
499
+ masks=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool),
500
+ cache=cache)
501
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
502
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
503
+ if top_ids == self.speech_token_size:
504
+ break
505
+ if top_ids > self.speech_token_size:
506
+ continue
507
+ # in stream mode, yield token one by one
508
+ yield top_ids
509
+ out_tokens.append(top_ids)
510
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
511
+
512
+ @torch.inference_mode()
513
+ def inference_bistream(
514
+ self,
515
+ text: Generator,
516
+ prompt_text: torch.Tensor,
517
+ prompt_text_len: torch.Tensor,
518
+ prompt_speech_token: torch.Tensor,
519
+ prompt_speech_token_len: torch.Tensor,
520
+ embedding: torch.Tensor,
521
+ sampling: int = 25,
522
+ max_token_text_ratio: float = 20,
523
+ min_token_text_ratio: float = 2,
524
+ ) -> Generator[torch.Tensor, None, None]:
525
+
526
+ device = prompt_text.device
527
+ # 1. prepare input
528
+ sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
529
+ task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
530
+ if prompt_speech_token_len != 0:
531
+ prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
532
+ else:
533
+ prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=prompt_text.dtype).to(device)
534
+ lm_input = torch.concat([sos_eos_emb], dim=1)
535
+
536
+ # 2. iterate text
537
+ out_tokens = []
538
+ cache = None
539
+ # NOTE init prompt_text as text_cache as it is basically impossible prompt_speech_token/prompt_text < 15/5
540
+ text_cache = self.llm.model.model.embed_tokens(prompt_text)
541
+ next_fill_index = -1
542
+ for this_text in text:
543
+ text_cache = torch.concat([text_cache, self.llm.model.model.embed_tokens(this_text)], dim=1)
544
+ # prompt_speech_token_emb not empty, try append to lm_input
545
+ while prompt_speech_token_emb.size(1) != 0:
546
+ if text_cache.size(1) >= self.mix_ratio[0]:
547
+ lm_input_text, lm_input_speech = text_cache[:, :self.mix_ratio[0]], prompt_speech_token_emb[:, :self.mix_ratio[1]]
548
+ logging.info('append {} text token {} speech token'.format(lm_input_text.size(1), lm_input_speech.size(1)))
549
+ lm_input = torch.concat([lm_input, lm_input_text, lm_input_speech], dim=1)
550
+ text_cache, prompt_speech_token_emb = text_cache[:, self.mix_ratio[0]:], prompt_speech_token_emb[:, self.mix_ratio[1]:]
551
+ else:
552
+ logging.info('not enough text token to decode, wait for more')
553
+ break
554
+ # no prompt_speech_token_emb remain, can decode some speech token
555
+ if prompt_speech_token_emb.size(1) == 0:
556
+ if (len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2) or (len(out_tokens) == 0 and lm_input.size(1) == 1):
557
+ logging.info('get fill token, need to append more text token')
558
+ if text_cache.size(1) >= self.mix_ratio[0]:
559
+ lm_input_text = text_cache[:, :self.mix_ratio[0]]
560
+ logging.info('append {} text token'.format(lm_input_text.size(1)))
561
+ if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2:
562
+ lm_input = lm_input_text
563
+ else:
564
+ lm_input = torch.concat([lm_input, lm_input_text], dim=1)
565
+ text_cache = text_cache[:, self.mix_ratio[0]:]
566
+ else:
567
+ logging.info('not enough text token to decode, wait for more')
568
+ continue
569
+ while True:
570
+ seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
571
+ y_pred, cache = self.llm.forward_one_step(lm_input,
572
+ masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
573
+ cache=cache)
574
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
575
+ if next_fill_index != -1 and len(out_tokens) == next_fill_index:
576
+ top_ids = self.speech_token_size + 2
577
+ next_fill_index += (self.mix_ratio[1] + 1)
578
+ else:
579
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True).item()
580
+ if top_ids == self.speech_token_size + 2:
581
+ next_fill_index = len(out_tokens) + self.mix_ratio[1] + 1
582
+ logging.info('fill_token index {} next fill_token index {}'.format(len(out_tokens), next_fill_index))
583
+ out_tokens.append(top_ids)
584
+ if top_ids >= self.speech_token_size:
585
+ if top_ids == self.speech_token_size + 2:
586
+ break
587
+ else:
588
+ raise ValueError('should not get token {}'.format(top_ids))
589
+ yield top_ids
590
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
591
+
592
+ # 3. final decode
593
+ lm_input = torch.concat([lm_input, text_cache, task_id_emb], dim=1)
594
+ logging.info('no more text token, decode until met eos')
595
+ while True:
596
+ seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
597
+ y_pred, cache = self.llm.forward_one_step(lm_input,
598
+ masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
599
+ cache=cache)
600
+ logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
601
+ top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=False).item()
602
+ out_tokens.append(top_ids)
603
+ if top_ids >= self.speech_token_size:
604
+ if top_ids == self.speech_token_size:
605
+ break
606
+ else:
607
+ raise ValueError('should not get token {}'.format(top_ids))
608
+ # in stream mode, yield token one by one
609
+ yield top_ids
610
+ lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
speech/cosyvoice/tokenizer/assets/multilingual_zh_ja_yue_char_del.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
speech/cosyvoice/tokenizer/tokenizer.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import os
3
+ from functools import lru_cache
4
+ from typing import Optional
5
+ import torch
6
+ from transformers import AutoTokenizer
7
+ from whisper.tokenizer import Tokenizer
8
+
9
+ import tiktoken
10
+
11
+ LANGUAGES = {
12
+ "en": "english",
13
+ "zh": "chinese",
14
+ "de": "german",
15
+ "es": "spanish",
16
+ "ru": "russian",
17
+ "ko": "korean",
18
+ "fr": "french",
19
+ "ja": "japanese",
20
+ "pt": "portuguese",
21
+ "tr": "turkish",
22
+ "pl": "polish",
23
+ "ca": "catalan",
24
+ "nl": "dutch",
25
+ "ar": "arabic",
26
+ "sv": "swedish",
27
+ "it": "italian",
28
+ "id": "indonesian",
29
+ "hi": "hindi",
30
+ "fi": "finnish",
31
+ "vi": "vietnamese",
32
+ "he": "hebrew",
33
+ "uk": "ukrainian",
34
+ "el": "greek",
35
+ "ms": "malay",
36
+ "cs": "czech",
37
+ "ro": "romanian",
38
+ "da": "danish",
39
+ "hu": "hungarian",
40
+ "ta": "tamil",
41
+ "no": "norwegian",
42
+ "th": "thai",
43
+ "ur": "urdu",
44
+ "hr": "croatian",
45
+ "bg": "bulgarian",
46
+ "lt": "lithuanian",
47
+ "la": "latin",
48
+ "mi": "maori",
49
+ "ml": "malayalam",
50
+ "cy": "welsh",
51
+ "sk": "slovak",
52
+ "te": "telugu",
53
+ "fa": "persian",
54
+ "lv": "latvian",
55
+ "bn": "bengali",
56
+ "sr": "serbian",
57
+ "az": "azerbaijani",
58
+ "sl": "slovenian",
59
+ "kn": "kannada",
60
+ "et": "estonian",
61
+ "mk": "macedonian",
62
+ "br": "breton",
63
+ "eu": "basque",
64
+ "is": "icelandic",
65
+ "hy": "armenian",
66
+ "ne": "nepali",
67
+ "mn": "mongolian",
68
+ "bs": "bosnian",
69
+ "kk": "kazakh",
70
+ "sq": "albanian",
71
+ "sw": "swahili",
72
+ "gl": "galician",
73
+ "mr": "marathi",
74
+ "pa": "punjabi",
75
+ "si": "sinhala",
76
+ "km": "khmer",
77
+ "sn": "shona",
78
+ "yo": "yoruba",
79
+ "so": "somali",
80
+ "af": "afrikaans",
81
+ "oc": "occitan",
82
+ "ka": "georgian",
83
+ "be": "belarusian",
84
+ "tg": "tajik",
85
+ "sd": "sindhi",
86
+ "gu": "gujarati",
87
+ "am": "amharic",
88
+ "yi": "yiddish",
89
+ "lo": "lao",
90
+ "uz": "uzbek",
91
+ "fo": "faroese",
92
+ "ht": "haitian creole",
93
+ "ps": "pashto",
94
+ "tk": "turkmen",
95
+ "nn": "nynorsk",
96
+ "mt": "maltese",
97
+ "sa": "sanskrit",
98
+ "lb": "luxembourgish",
99
+ "my": "myanmar",
100
+ "bo": "tibetan",
101
+ "tl": "tagalog",
102
+ "mg": "malagasy",
103
+ "as": "assamese",
104
+ "tt": "tatar",
105
+ "haw": "hawaiian",
106
+ "ln": "lingala",
107
+ "ha": "hausa",
108
+ "ba": "bashkir",
109
+ "jw": "javanese",
110
+ "su": "sundanese",
111
+ "yue": "cantonese",
112
+ "minnan": "minnan",
113
+ "wuyu": "wuyu",
114
+ "dialect": "dialect",
115
+ "zh/en": "zh/en",
116
+ "en/zh": "en/zh",
117
+ }
118
+
119
+ # language code lookup by name, with a few language aliases
120
+ TO_LANGUAGE_CODE = {
121
+ **{language: code for code, language in LANGUAGES.items()},
122
+ "burmese": "my",
123
+ "valencian": "ca",
124
+ "flemish": "nl",
125
+ "haitian": "ht",
126
+ "letzeburgesch": "lb",
127
+ "pushto": "ps",
128
+ "panjabi": "pa",
129
+ "moldavian": "ro",
130
+ "moldovan": "ro",
131
+ "sinhalese": "si",
132
+ "castilian": "es",
133
+ "mandarin": "zh",
134
+ }
135
+
136
+ AUDIO_EVENT = {
137
+ "ASR": "ASR",
138
+ "AED": "AED",
139
+ "SER": "SER",
140
+ "Speech": "Speech",
141
+ "/Speech": "/Speech",
142
+ "BGM": "BGM",
143
+ "/BGM": "/BGM",
144
+ "Laughter": "Laughter",
145
+ "/Laughter": "/Laughter",
146
+ "Applause": "Applause",
147
+ "/Applause": "/Applause",
148
+ }
149
+
150
+ EMOTION = {
151
+ "HAPPY": "HAPPY",
152
+ "SAD": "SAD",
153
+ "ANGRY": "ANGRY",
154
+ "NEUTRAL": "NEUTRAL",
155
+ }
156
+
157
+ TTS_Vocal_Token = {
158
+ "TTS/B": "TTS/B",
159
+ "TTS/O": "TTS/O",
160
+ "TTS/Q": "TTS/Q",
161
+ "TTS/A": "TTS/A",
162
+ "TTS/CO": "TTS/CO",
163
+ "TTS/CL": "TTS/CL",
164
+ "TTS/H": "TTS/H",
165
+ **{f"TTS/SP{i:02d}": f"TTS/SP{i:02d}" for i in range(1, 14)}
166
+ }
167
+
168
+
169
+ @lru_cache(maxsize=None)
170
+ def get_encoding(name: str = "gpt2", num_languages: int = 99):
171
+ vocab_path = os.path.join(os.path.dirname(__file__), "assets", f"{name}.tiktoken")
172
+ ranks = {
173
+ base64.b64decode(token): int(rank)
174
+ for token, rank in (line.split() for line in open(vocab_path) if line)
175
+ }
176
+ n_vocab = len(ranks)
177
+ special_tokens = {}
178
+
179
+ specials = [
180
+ "<|endoftext|>",
181
+ "<|startoftranscript|>",
182
+ *[f"<|{lang}|>" for lang in list(LANGUAGES.keys())[:num_languages]],
183
+ *[f"<|{audio_event}|>" for audio_event in list(AUDIO_EVENT.keys())],
184
+ *[f"<|{emotion}|>" for emotion in list(EMOTION.keys())],
185
+ "<|translate|>",
186
+ "<|transcribe|>",
187
+ "<|startoflm|>",
188
+ "<|startofprev|>",
189
+ "<|nospeech|>",
190
+ "<|notimestamps|>",
191
+ *[f"<|SPECIAL_TOKEN_{i}|>" for i in range(1, 31)], # register special tokens for ASR
192
+ *[f"<|{tts}|>" for tts in list(TTS_Vocal_Token.keys())], # register special tokens for TTS
193
+ *[f"<|{i * 0.02:.2f}|>" for i in range(1501)],
194
+ ]
195
+
196
+ for token in specials:
197
+ special_tokens[token] = n_vocab
198
+ n_vocab += 1
199
+
200
+ return tiktoken.Encoding(
201
+ name=os.path.basename(vocab_path),
202
+ explicit_n_vocab=n_vocab,
203
+ pat_str=r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""",
204
+ mergeable_ranks=ranks,
205
+ special_tokens=special_tokens,
206
+ )
207
+
208
+
209
+ @lru_cache(maxsize=None)
210
+ def get_tokenizer(
211
+ multilingual: bool,
212
+ *,
213
+ num_languages: int = 99,
214
+ language: Optional[str] = None,
215
+ task: Optional[str] = None, # Literal["transcribe", "translate", None]
216
+ ) -> Tokenizer:
217
+ if language is not None:
218
+ language = language.lower()
219
+ if language not in LANGUAGES:
220
+ if language in TO_LANGUAGE_CODE:
221
+ language = TO_LANGUAGE_CODE[language]
222
+ else:
223
+ raise ValueError(f"Unsupported language: {language}")
224
+
225
+ if multilingual:
226
+ encoding_name = "multilingual_zh_ja_yue_char_del"
227
+ language = language or "en"
228
+ task = task or "transcribe"
229
+ else:
230
+ encoding_name = "gpt2"
231
+ language = None
232
+ task = None
233
+
234
+ encoding = get_encoding(name=encoding_name, num_languages=num_languages)
235
+
236
+ return Tokenizer(
237
+ encoding=encoding, num_languages=num_languages, language=language, task=task
238
+ )
239
+
240
+
241
+ class QwenTokenizer():
242
+ def __init__(self, token_path, skip_special_tokens=True):
243
+ super().__init__()
244
+ # NOTE: non-chat model, all these special tokens keep randomly initialized.
245
+ special_tokens = {
246
+ 'eos_token': '<|endoftext|>',
247
+ 'pad_token': '<|endoftext|>',
248
+ 'additional_special_tokens': [
249
+ '<|im_start|>', '<|im_end|>', '<|endofprompt|>',
250
+ '[breath]', '<strong>', '</strong>', '[noise]',
251
+ '[laughter]', '[cough]', '[clucking]', '[accent]',
252
+ '[quick_breath]',
253
+ "<laughter>", "</laughter>",
254
+ "[hissing]", "[sigh]", "[vocalized-noise]",
255
+ "[lipsmack]", "[mn]"
256
+ ]
257
+ }
258
+ self.special_tokens = special_tokens
259
+ self.tokenizer = AutoTokenizer.from_pretrained(token_path)
260
+ self.tokenizer.add_special_tokens(special_tokens)
261
+ self.skip_special_tokens = skip_special_tokens
262
+
263
+ def encode(self, text, **kwargs):
264
+ tokens = self.tokenizer([text], return_tensors="pt")
265
+ tokens = tokens["input_ids"][0].cpu().tolist()
266
+ return tokens
267
+
268
+ def decode(self, tokens):
269
+ tokens = torch.tensor(tokens, dtype=torch.int64)
270
+ text = self.tokenizer.batch_decode([tokens], skip_special_tokens=self.skip_special_tokens)[0]
271
+ return text
272
+
273
+
274
+ @lru_cache(maxsize=None)
275
+ def get_qwen_tokenizer(
276
+ token_path: str,
277
+ skip_special_tokens: bool
278
+ ) -> QwenTokenizer:
279
+ return QwenTokenizer(token_path=token_path, skip_special_tokens=skip_special_tokens)
speech/cosyvoice/transformer/__init__.py ADDED
File without changes
speech/cosyvoice/transformer/activation.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe)
2
+ # 2020 Northwestern Polytechnical University (Pengcheng Guo)
3
+ # 2020 Mobvoi Inc (Binbin Zhang)
4
+ # 2024 Alibaba Inc (Xiang Lyu)
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Swish() activation function for Conformer."""
18
+
19
+ import torch
20
+ from torch import nn, sin, pow
21
+ from torch.nn import Parameter
22
+
23
+
24
+ class Swish(torch.nn.Module):
25
+ """Construct an Swish object."""
26
+
27
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
28
+ """Return Swish activation function."""
29
+ return x * torch.sigmoid(x)
30
+
31
+
32
+ # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
33
+ # LICENSE is in incl_licenses directory.
34
+ class Snake(nn.Module):
35
+ '''
36
+ Implementation of a sine-based periodic activation function
37
+ Shape:
38
+ - Input: (B, C, T)
39
+ - Output: (B, C, T), same shape as the input
40
+ Parameters:
41
+ - alpha - trainable parameter
42
+ References:
43
+ - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
44
+ https://arxiv.org/abs/2006.08195
45
+ Examples:
46
+ >>> a1 = snake(256)
47
+ >>> x = torch.randn(256)
48
+ >>> x = a1(x)
49
+ '''
50
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
51
+ '''
52
+ Initialization.
53
+ INPUT:
54
+ - in_features: shape of the input
55
+ - alpha: trainable parameter
56
+ alpha is initialized to 1 by default, higher values = higher-frequency.
57
+ alpha will be trained along with the rest of your model.
58
+ '''
59
+ super(Snake, self).__init__()
60
+ self.in_features = in_features
61
+
62
+ # initialize alpha
63
+ self.alpha_logscale = alpha_logscale
64
+ if self.alpha_logscale: # log scale alphas initialized to zeros
65
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
66
+ else: # linear scale alphas initialized to ones
67
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
68
+
69
+ self.alpha.requires_grad = alpha_trainable
70
+
71
+ self.no_div_by_zero = 0.000000001
72
+
73
+ def forward(self, x):
74
+ '''
75
+ Forward pass of the function.
76
+ Applies the function to the input elementwise.
77
+ Snake ∶= x + 1/a * sin^2 (xa)
78
+ '''
79
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
80
+ if self.alpha_logscale:
81
+ alpha = torch.exp(alpha)
82
+ x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
83
+
84
+ return x
speech/cosyvoice/transformer/attention.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
4
+ # 2024 Alibaba Inc (Xiang Lyu)
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """Multi-Head Attention layer definition."""
18
+
19
+ import math
20
+ from typing import Tuple
21
+
22
+ import torch
23
+ from torch import nn
24
+
25
+
26
+ class MultiHeadedAttention(nn.Module):
27
+ """Multi-Head Attention layer.
28
+
29
+ Args:
30
+ n_head (int): The number of heads.
31
+ n_feat (int): The number of features.
32
+ dropout_rate (float): Dropout rate.
33
+
34
+ """
35
+
36
+ def __init__(self,
37
+ n_head: int,
38
+ n_feat: int,
39
+ dropout_rate: float,
40
+ key_bias: bool = True):
41
+ """Construct an MultiHeadedAttention object."""
42
+ super().__init__()
43
+ assert n_feat % n_head == 0
44
+ # We assume d_v always equals d_k
45
+ self.d_k = n_feat // n_head
46
+ self.h = n_head
47
+ self.linear_q = nn.Linear(n_feat, n_feat)
48
+ self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias)
49
+ self.linear_v = nn.Linear(n_feat, n_feat)
50
+ self.linear_out = nn.Linear(n_feat, n_feat)
51
+ self.dropout = nn.Dropout(p=dropout_rate)
52
+
53
+ def forward_qkv(
54
+ self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
55
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
56
+ """Transform query, key and value.
57
+
58
+ Args:
59
+ query (torch.Tensor): Query tensor (#batch, time1, size).
60
+ key (torch.Tensor): Key tensor (#batch, time2, size).
61
+ value (torch.Tensor): Value tensor (#batch, time2, size).
62
+
63
+ Returns:
64
+ torch.Tensor: Transformed query tensor, size
65
+ (#batch, n_head, time1, d_k).
66
+ torch.Tensor: Transformed key tensor, size
67
+ (#batch, n_head, time2, d_k).
68
+ torch.Tensor: Transformed value tensor, size
69
+ (#batch, n_head, time2, d_k).
70
+
71
+ """
72
+ n_batch = query.size(0)
73
+ q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
74
+ k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
75
+ v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
76
+ q = q.transpose(1, 2) # (batch, head, time1, d_k)
77
+ k = k.transpose(1, 2) # (batch, head, time2, d_k)
78
+ v = v.transpose(1, 2) # (batch, head, time2, d_k)
79
+
80
+ return q, k, v
81
+
82
+ def forward_attention(
83
+ self,
84
+ value: torch.Tensor,
85
+ scores: torch.Tensor,
86
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
87
+ ) -> torch.Tensor:
88
+ """Compute attention context vector.
89
+
90
+ Args:
91
+ value (torch.Tensor): Transformed value, size
92
+ (#batch, n_head, time2, d_k).
93
+ scores (torch.Tensor): Attention score, size
94
+ (#batch, n_head, time1, time2).
95
+ mask (torch.Tensor): Mask, size (#batch, 1, time2) or
96
+ (#batch, time1, time2), (0, 0, 0) means fake mask.
97
+
98
+ Returns:
99
+ torch.Tensor: Transformed value (#batch, time1, d_model)
100
+ weighted by the attention score (#batch, time1, time2).
101
+
102
+ """
103
+ n_batch = value.size(0)
104
+ # NOTE(xcsong): When will `if mask.size(2) > 0` be True?
105
+ # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
106
+ # 1st chunk to ease the onnx export.]
107
+ # 2. pytorch training
108
+ if mask.size(2) > 0: # time2 > 0
109
+ mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
110
+ # For last chunk, time2 might be larger than scores.size(-1)
111
+ mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
112
+ scores = scores.masked_fill(mask, -float('inf'))
113
+ attn = torch.softmax(scores, dim=-1).masked_fill(
114
+ mask, 0.0) # (batch, head, time1, time2)
115
+ # NOTE(xcsong): When will `if mask.size(2) > 0` be False?
116
+ # 1. onnx(16/-1, -1/-1, 16/0)
117
+ # 2. jit (16/-1, -1/-1, 16/0, 16/4)
118
+ else:
119
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
120
+
121
+ p_attn = self.dropout(attn)
122
+ x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
123
+ x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
124
+ self.h * self.d_k)
125
+ ) # (batch, time1, d_model)
126
+
127
+ return self.linear_out(x) # (batch, time1, d_model)
128
+
129
+ def forward(
130
+ self,
131
+ query: torch.Tensor,
132
+ key: torch.Tensor,
133
+ value: torch.Tensor,
134
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
135
+ pos_emb: torch.Tensor = torch.empty(0),
136
+ cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
137
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
138
+ """Compute scaled dot product attention.
139
+
140
+ Args:
141
+ query (torch.Tensor): Query tensor (#batch, time1, size).
142
+ key (torch.Tensor): Key tensor (#batch, time2, size).
143
+ value (torch.Tensor): Value tensor (#batch, time2, size).
144
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
145
+ (#batch, time1, time2).
146
+ 1.When applying cross attention between decoder and encoder,
147
+ the batch padding mask for input is in (#batch, 1, T) shape.
148
+ 2.When applying self attention of encoder,
149
+ the mask is in (#batch, T, T) shape.
150
+ 3.When applying self attention of decoder,
151
+ the mask is in (#batch, L, L) shape.
152
+ 4.If the different position in decoder see different block
153
+ of the encoder, such as Mocha, the passed in mask could be
154
+ in (#batch, L, T) shape. But there is no such case in current
155
+ CosyVoice.
156
+ cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
157
+ where `cache_t == chunk_size * num_decoding_left_chunks`
158
+ and `head * d_k == size`
159
+
160
+
161
+ Returns:
162
+ torch.Tensor: Output tensor (#batch, time1, d_model).
163
+ torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
164
+ where `cache_t == chunk_size * num_decoding_left_chunks`
165
+ and `head * d_k == size`
166
+
167
+ """
168
+ q, k, v = self.forward_qkv(query, key, value)
169
+
170
+ # NOTE(xcsong):
171
+ # when export onnx model, for 1st chunk, we feed
172
+ # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
173
+ # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
174
+ # In all modes, `if cache.size(0) > 0` will alwayse be `True`
175
+ # and we will always do splitting and
176
+ # concatnation(this will simplify onnx export). Note that
177
+ # it's OK to concat & split zero-shaped tensors(see code below).
178
+ # when export jit model, for 1st chunk, we always feed
179
+ # cache(0, 0, 0, 0) since jit supports dynamic if-branch.
180
+ # >>> a = torch.ones((1, 2, 0, 4))
181
+ # >>> b = torch.ones((1, 2, 3, 4))
182
+ # >>> c = torch.cat((a, b), dim=2)
183
+ # >>> torch.equal(b, c) # True
184
+ # >>> d = torch.split(a, 2, dim=-1)
185
+ # >>> torch.equal(d[0], d[1]) # True
186
+ if cache.size(0) > 0:
187
+ key_cache, value_cache = torch.split(cache,
188
+ cache.size(-1) // 2,
189
+ dim=-1)
190
+ k = torch.cat([key_cache, k], dim=2)
191
+ v = torch.cat([value_cache, v], dim=2)
192
+ # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
193
+ # non-trivial to calculate `next_cache_start` here.
194
+ new_cache = torch.cat((k, v), dim=-1)
195
+
196
+ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
197
+ return self.forward_attention(v, scores, mask), new_cache
198
+
199
+
200
+ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
201
+ """Multi-Head Attention layer with relative position encoding.
202
+ Paper: https://arxiv.org/abs/1901.02860
203
+ Args:
204
+ n_head (int): The number of heads.
205
+ n_feat (int): The number of features.
206
+ dropout_rate (float): Dropout rate.
207
+ """
208
+
209
+ def __init__(self,
210
+ n_head: int,
211
+ n_feat: int,
212
+ dropout_rate: float,
213
+ key_bias: bool = True):
214
+ """Construct an RelPositionMultiHeadedAttention object."""
215
+ super().__init__(n_head, n_feat, dropout_rate, key_bias)
216
+ # linear transformation for positional encoding
217
+ self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
218
+ # these two learnable bias are used in matrix c and matrix d
219
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
220
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
221
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
222
+ torch.nn.init.xavier_uniform_(self.pos_bias_u)
223
+ torch.nn.init.xavier_uniform_(self.pos_bias_v)
224
+
225
+ def rel_shift(self, x: torch.Tensor) -> torch.Tensor:
226
+ """Compute relative positional encoding.
227
+
228
+ Args:
229
+ x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
230
+ time1 means the length of query vector.
231
+
232
+ Returns:
233
+ torch.Tensor: Output tensor.
234
+
235
+ """
236
+ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
237
+ device=x.device,
238
+ dtype=x.dtype)
239
+ x_padded = torch.cat([zero_pad, x], dim=-1)
240
+
241
+ x_padded = x_padded.view(x.size()[0],
242
+ x.size()[1],
243
+ x.size(3) + 1, x.size(2))
244
+ x = x_padded[:, :, 1:].view_as(x)[
245
+ :, :, :, : x.size(-1) // 2 + 1
246
+ ] # only keep the positions from 0 to time2
247
+ return x
248
+
249
+ def forward(
250
+ self,
251
+ query: torch.Tensor,
252
+ key: torch.Tensor,
253
+ value: torch.Tensor,
254
+ mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
255
+ pos_emb: torch.Tensor = torch.empty(0),
256
+ cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
257
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
258
+ """Compute 'Scaled Dot Product Attention' with rel. positional encoding.
259
+ Args:
260
+ query (torch.Tensor): Query tensor (#batch, time1, size).
261
+ key (torch.Tensor): Key tensor (#batch, time2, size).
262
+ value (torch.Tensor): Value tensor (#batch, time2, size).
263
+ mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
264
+ (#batch, time1, time2), (0, 0, 0) means fake mask.
265
+ pos_emb (torch.Tensor): Positional embedding tensor
266
+ (#batch, time2, size).
267
+ cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
268
+ where `cache_t == chunk_size * num_decoding_left_chunks`
269
+ and `head * d_k == size`
270
+ Returns:
271
+ torch.Tensor: Output tensor (#batch, time1, d_model).
272
+ torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
273
+ where `cache_t == chunk_size * num_decoding_left_chunks`
274
+ and `head * d_k == size`
275
+ """
276
+ q, k, v = self.forward_qkv(query, key, value)
277
+ q = q.transpose(1, 2) # (batch, time1, head, d_k)
278
+
279
+ # NOTE(xcsong):
280
+ # when export onnx model, for 1st chunk, we feed
281
+ # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
282
+ # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
283
+ # In all modes, `if cache.size(0) > 0` will alwayse be `True`
284
+ # and we will always do splitting and
285
+ # concatnation(this will simplify onnx export). Note that
286
+ # it's OK to concat & split zero-shaped tensors(see code below).
287
+ # when export jit model, for 1st chunk, we always feed
288
+ # cache(0, 0, 0, 0) since jit supports dynamic if-branch.
289
+ # >>> a = torch.ones((1, 2, 0, 4))
290
+ # >>> b = torch.ones((1, 2, 3, 4))
291
+ # >>> c = torch.cat((a, b), dim=2)
292
+ # >>> torch.equal(b, c) # True
293
+ # >>> d = torch.split(a, 2, dim=-1)
294
+ # >>> torch.equal(d[0], d[1]) # True
295
+ if cache.size(0) > 0:
296
+ key_cache, value_cache = torch.split(cache,
297
+ cache.size(-1) // 2,
298
+ dim=-1)
299
+ k = torch.cat([key_cache, k], dim=2)
300
+ v = torch.cat([value_cache, v], dim=2)
301
+ # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
302
+ # non-trivial to calculate `next_cache_start` here.
303
+ new_cache = torch.cat((k, v), dim=-1)
304
+
305
+ n_batch_pos = pos_emb.size(0)
306
+ p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
307
+ p = p.transpose(1, 2) # (batch, head, time1, d_k)
308
+
309
+ # (batch, head, time1, d_k)
310
+ q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
311
+ # (batch, head, time1, d_k)
312
+ q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
313
+
314
+ # compute attention score
315
+ # first compute matrix a and matrix c
316
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
317
+ # (batch, head, time1, time2)
318
+ matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
319
+
320
+ # compute matrix b and matrix d
321
+ # (batch, head, time1, time2)
322
+ matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
323
+ # NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used
324
+ if matrix_ac.shape != matrix_bd.shape:
325
+ matrix_bd = self.rel_shift(matrix_bd)
326
+
327
+ scores = (matrix_ac + matrix_bd) / math.sqrt(
328
+ self.d_k) # (batch, head, time1, time2)
329
+
330
+ return self.forward_attention(v, scores, mask), new_cache
speech/cosyvoice/transformer/convolution.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """ConvolutionModule definition."""
17
+
18
+ from typing import Tuple
19
+
20
+ import torch
21
+ from torch import nn
22
+
23
+
24
+ class ConvolutionModule(nn.Module):
25
+ """ConvolutionModule in Conformer model."""
26
+
27
+ def __init__(self,
28
+ channels: int,
29
+ kernel_size: int = 15,
30
+ activation: nn.Module = nn.ReLU(),
31
+ norm: str = "batch_norm",
32
+ causal: bool = False,
33
+ bias: bool = True):
34
+ """Construct an ConvolutionModule object.
35
+ Args:
36
+ channels (int): The number of channels of conv layers.
37
+ kernel_size (int): Kernel size of conv layers.
38
+ causal (int): Whether use causal convolution or not
39
+ """
40
+ super().__init__()
41
+
42
+ self.pointwise_conv1 = nn.Conv1d(
43
+ channels,
44
+ 2 * channels,
45
+ kernel_size=1,
46
+ stride=1,
47
+ padding=0,
48
+ bias=bias,
49
+ )
50
+ # self.lorder is used to distinguish if it's a causal convolution,
51
+ # if self.lorder > 0: it's a causal convolution, the input will be
52
+ # padded with self.lorder frames on the left in forward.
53
+ # else: it's a symmetrical convolution
54
+ if causal:
55
+ padding = 0
56
+ self.lorder = kernel_size - 1
57
+ else:
58
+ # kernel_size should be an odd number for none causal convolution
59
+ assert (kernel_size - 1) % 2 == 0
60
+ padding = (kernel_size - 1) // 2
61
+ self.lorder = 0
62
+ self.depthwise_conv = nn.Conv1d(
63
+ channels,
64
+ channels,
65
+ kernel_size,
66
+ stride=1,
67
+ padding=padding,
68
+ groups=channels,
69
+ bias=bias,
70
+ )
71
+
72
+ assert norm in ['batch_norm', 'layer_norm']
73
+ if norm == "batch_norm":
74
+ self.use_layer_norm = False
75
+ self.norm = nn.BatchNorm1d(channels)
76
+ else:
77
+ self.use_layer_norm = True
78
+ self.norm = nn.LayerNorm(channels)
79
+
80
+ self.pointwise_conv2 = nn.Conv1d(
81
+ channels,
82
+ channels,
83
+ kernel_size=1,
84
+ stride=1,
85
+ padding=0,
86
+ bias=bias,
87
+ )
88
+ self.activation = activation
89
+
90
+ def forward(
91
+ self,
92
+ x: torch.Tensor,
93
+ mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
94
+ cache: torch.Tensor = torch.zeros((0, 0, 0)),
95
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
96
+ """Compute convolution module.
97
+ Args:
98
+ x (torch.Tensor): Input tensor (#batch, time, channels).
99
+ mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),
100
+ (0, 0, 0) means fake mask.
101
+ cache (torch.Tensor): left context cache, it is only
102
+ used in causal convolution (#batch, channels, cache_t),
103
+ (0, 0, 0) meas fake cache.
104
+ Returns:
105
+ torch.Tensor: Output tensor (#batch, time, channels).
106
+ """
107
+ # exchange the temporal dimension and the feature dimension
108
+ x = x.transpose(1, 2) # (#batch, channels, time)
109
+
110
+ # mask batch padding
111
+ if mask_pad.size(2) > 0: # time > 0
112
+ x.masked_fill_(~mask_pad, 0.0)
113
+
114
+ if self.lorder > 0:
115
+ if cache.size(2) == 0: # cache_t == 0
116
+ x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0)
117
+ else:
118
+ assert cache.size(0) == x.size(0) # equal batch
119
+ assert cache.size(1) == x.size(1) # equal channel
120
+ x = torch.cat((cache, x), dim=2)
121
+ assert (x.size(2) > self.lorder)
122
+ new_cache = x[:, :, -self.lorder:]
123
+ else:
124
+ # It's better we just return None if no cache is required,
125
+ # However, for JIT export, here we just fake one tensor instead of
126
+ # None.
127
+ new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
128
+
129
+ # GLU mechanism
130
+ x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
131
+ x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
132
+
133
+ # 1D Depthwise Conv
134
+ x = self.depthwise_conv(x)
135
+ if self.use_layer_norm:
136
+ x = x.transpose(1, 2)
137
+ x = self.activation(self.norm(x))
138
+ if self.use_layer_norm:
139
+ x = x.transpose(1, 2)
140
+ x = self.pointwise_conv2(x)
141
+ # mask batch padding
142
+ if mask_pad.size(2) > 0: # time > 0
143
+ x.masked_fill_(~mask_pad, 0.0)
144
+
145
+ return x.transpose(1, 2), new_cache
speech/cosyvoice/transformer/decoder.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Decoder definition."""
17
+ from typing import Tuple, List, Optional
18
+
19
+ import torch
20
+ import torch.utils.checkpoint as ckpt
21
+ import logging
22
+
23
+ from cosyvoice.transformer.decoder_layer import DecoderLayer
24
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
25
+ from cosyvoice.utils.class_utils import (
26
+ COSYVOICE_EMB_CLASSES,
27
+ COSYVOICE_ATTENTION_CLASSES,
28
+ COSYVOICE_ACTIVATION_CLASSES,
29
+ )
30
+ from cosyvoice.utils.mask import (subsequent_mask, make_pad_mask)
31
+
32
+
33
+ class TransformerDecoder(torch.nn.Module):
34
+ """Base class of Transfomer decoder module.
35
+ Args:
36
+ vocab_size: output dim
37
+ encoder_output_size: dimension of attention
38
+ attention_heads: the number of heads of multi head attention
39
+ linear_units: the hidden units number of position-wise feedforward
40
+ num_blocks: the number of decoder blocks
41
+ dropout_rate: dropout rate
42
+ self_attention_dropout_rate: dropout rate for attention
43
+ input_layer: input layer type
44
+ use_output_layer: whether to use output layer
45
+ pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
46
+ normalize_before:
47
+ True: use layer_norm before each sub-block of a layer.
48
+ False: use layer_norm after each sub-block of a layer.
49
+ src_attention: if false, encoder-decoder cross attention is not
50
+ applied, such as CIF model
51
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
52
+ gradient_checkpointing: rerunning a forward-pass segment for each
53
+ checkpointed segment during backward.
54
+ tie_word_embedding: Tie or clone module weights depending of whether we are
55
+ using TorchScript or not
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ vocab_size: int,
61
+ encoder_output_size: int,
62
+ attention_heads: int = 4,
63
+ linear_units: int = 2048,
64
+ num_blocks: int = 6,
65
+ dropout_rate: float = 0.1,
66
+ positional_dropout_rate: float = 0.1,
67
+ self_attention_dropout_rate: float = 0.0,
68
+ src_attention_dropout_rate: float = 0.0,
69
+ input_layer: str = "embed",
70
+ use_output_layer: bool = True,
71
+ normalize_before: bool = True,
72
+ src_attention: bool = True,
73
+ key_bias: bool = True,
74
+ activation_type: str = "relu",
75
+ gradient_checkpointing: bool = False,
76
+ tie_word_embedding: bool = False,
77
+ ):
78
+ super().__init__()
79
+ attention_dim = encoder_output_size
80
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
81
+
82
+ self.embed = torch.nn.Sequential(
83
+ torch.nn.Identity() if input_layer == "no_pos" else
84
+ torch.nn.Embedding(vocab_size, attention_dim),
85
+ COSYVOICE_EMB_CLASSES[input_layer](attention_dim,
86
+ positional_dropout_rate),
87
+ )
88
+
89
+ self.normalize_before = normalize_before
90
+ self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5)
91
+ self.use_output_layer = use_output_layer
92
+ if use_output_layer:
93
+ self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
94
+ else:
95
+ self.output_layer = torch.nn.Identity()
96
+ self.num_blocks = num_blocks
97
+ self.decoders = torch.nn.ModuleList([
98
+ DecoderLayer(
99
+ attention_dim,
100
+ COSYVOICE_ATTENTION_CLASSES["selfattn"](
101
+ attention_heads, attention_dim,
102
+ self_attention_dropout_rate, key_bias),
103
+ COSYVOICE_ATTENTION_CLASSES["selfattn"](
104
+ attention_heads, attention_dim, src_attention_dropout_rate,
105
+ key_bias) if src_attention else None,
106
+ PositionwiseFeedForward(attention_dim, linear_units,
107
+ dropout_rate, activation),
108
+ dropout_rate,
109
+ normalize_before,
110
+ ) for _ in range(self.num_blocks)
111
+ ])
112
+
113
+ self.gradient_checkpointing = gradient_checkpointing
114
+ self.tie_word_embedding = tie_word_embedding
115
+
116
+ def forward(
117
+ self,
118
+ memory: torch.Tensor,
119
+ memory_mask: torch.Tensor,
120
+ ys_in_pad: torch.Tensor,
121
+ ys_in_lens: torch.Tensor,
122
+ r_ys_in_pad: torch.Tensor = torch.empty(0),
123
+ reverse_weight: float = 0.0,
124
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
125
+ """Forward decoder.
126
+ Args:
127
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
128
+ memory_mask: encoder memory mask, (batch, 1, maxlen_in)
129
+ ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
130
+ ys_in_lens: input lengths of this batch (batch)
131
+ r_ys_in_pad: not used in transformer decoder, in order to unify api
132
+ with bidirectional decoder
133
+ reverse_weight: not used in transformer decoder, in order to unify
134
+ api with bidirectional decode
135
+ Returns:
136
+ (tuple): tuple containing:
137
+ x: decoded token score before softmax (batch, maxlen_out,
138
+ vocab_size) if use_output_layer is True,
139
+ torch.tensor(0.0), in order to unify api with bidirectional decoder
140
+ olens: (batch, )
141
+ NOTE(xcsong):
142
+ We pass the `__call__` method of the modules instead of `forward` to the
143
+ checkpointing API because `__call__` attaches all the hooks of the module.
144
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
145
+ """
146
+ tgt = ys_in_pad
147
+ maxlen = tgt.size(1)
148
+ # tgt_mask: (B, 1, L)
149
+ tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1)
150
+ tgt_mask = tgt_mask.to(tgt.device)
151
+ # m: (1, L, L)
152
+ m = subsequent_mask(tgt_mask.size(-1),
153
+ device=tgt_mask.device).unsqueeze(0)
154
+ # tgt_mask: (B, L, L)
155
+ tgt_mask = tgt_mask & m
156
+ x, _ = self.embed(tgt)
157
+ if self.gradient_checkpointing and self.training:
158
+ x = self.forward_layers_checkpointed(x, tgt_mask, memory,
159
+ memory_mask)
160
+ else:
161
+ x = self.forward_layers(x, tgt_mask, memory, memory_mask)
162
+ if self.normalize_before:
163
+ x = self.after_norm(x)
164
+ if self.use_output_layer:
165
+ x = self.output_layer(x)
166
+ olens = tgt_mask.sum(1)
167
+ return x, torch.tensor(0.0), olens
168
+
169
+ def forward_layers(self, x: torch.Tensor, tgt_mask: torch.Tensor,
170
+ memory: torch.Tensor,
171
+ memory_mask: torch.Tensor) -> torch.Tensor:
172
+ for layer in self.decoders:
173
+ x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory,
174
+ memory_mask)
175
+ return x
176
+
177
+ @torch.jit.unused
178
+ def forward_layers_checkpointed(self, x: torch.Tensor,
179
+ tgt_mask: torch.Tensor,
180
+ memory: torch.Tensor,
181
+ memory_mask: torch.Tensor) -> torch.Tensor:
182
+ for layer in self.decoders:
183
+ x, tgt_mask, memory, memory_mask = ckpt.checkpoint(
184
+ layer.__call__, x, tgt_mask, memory, memory_mask)
185
+ return x
186
+
187
+ def forward_one_step(
188
+ self,
189
+ memory: torch.Tensor,
190
+ memory_mask: torch.Tensor,
191
+ tgt: torch.Tensor,
192
+ tgt_mask: torch.Tensor,
193
+ cache: Optional[List[torch.Tensor]] = None,
194
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
195
+ """Forward one step.
196
+ This is only used for decoding.
197
+ Args:
198
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
199
+ memory_mask: encoded memory mask, (batch, 1, maxlen_in)
200
+ tgt: input token ids, int64 (batch, maxlen_out)
201
+ tgt_mask: input token mask, (batch, maxlen_out)
202
+ dtype=torch.uint8 in PyTorch 1.2-
203
+ dtype=torch.bool in PyTorch 1.2+ (include 1.2)
204
+ cache: cached output list of (batch, max_time_out-1, size)
205
+ Returns:
206
+ y, cache: NN output value and cache per `self.decoders`.
207
+ y.shape` is (batch, maxlen_out, token)
208
+ """
209
+ x, _ = self.embed(tgt)
210
+ new_cache = []
211
+ for i, decoder in enumerate(self.decoders):
212
+ if cache is None:
213
+ c = None
214
+ else:
215
+ c = cache[i]
216
+ x, tgt_mask, memory, memory_mask = decoder(x,
217
+ tgt_mask,
218
+ memory,
219
+ memory_mask,
220
+ cache=c)
221
+ new_cache.append(x)
222
+ if self.normalize_before:
223
+ y = self.after_norm(x[:, -1])
224
+ else:
225
+ y = x[:, -1]
226
+ if self.use_output_layer:
227
+ y = torch.log_softmax(self.output_layer(y), dim=-1)
228
+ return y, new_cache
229
+
230
+ def tie_or_clone_weights(self, jit_mode: bool = True):
231
+ """Tie or clone module weights (between word_emb and output_layer)
232
+ depending of whether we are using TorchScript or not"""
233
+ if not self.use_output_layer:
234
+ return
235
+ if jit_mode:
236
+ logging.info("clone emb.weight to output.weight")
237
+ self.output_layer.weight = torch.nn.Parameter(
238
+ self.embed[0].weight.clone())
239
+ else:
240
+ logging.info("tie emb.weight with output.weight")
241
+ self.output_layer.weight = self.embed[0].weight
242
+
243
+ if getattr(self.output_layer, "bias", None) is not None:
244
+ self.output_layer.bias.data = torch.nn.functional.pad(
245
+ self.output_layer.bias.data,
246
+ (
247
+ 0,
248
+ self.output_layer.weight.shape[0] -
249
+ self.output_layer.bias.shape[0],
250
+ ),
251
+ "constant",
252
+ 0,
253
+ )
254
+
255
+
256
+ class BiTransformerDecoder(torch.nn.Module):
257
+ """Base class of Transfomer decoder module.
258
+ Args:
259
+ vocab_size: output dim
260
+ encoder_output_size: dimension of attention
261
+ attention_heads: the number of heads of multi head attention
262
+ linear_units: the hidden units number of position-wise feedforward
263
+ num_blocks: the number of decoder blocks
264
+ r_num_blocks: the number of right to left decoder blocks
265
+ dropout_rate: dropout rate
266
+ self_attention_dropout_rate: dropout rate for attention
267
+ input_layer: input layer type
268
+ use_output_layer: whether to use output layer
269
+ pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
270
+ normalize_before:
271
+ True: use layer_norm before each sub-block of a layer.
272
+ False: use layer_norm after each sub-block of a layer.
273
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ vocab_size: int,
279
+ encoder_output_size: int,
280
+ attention_heads: int = 4,
281
+ linear_units: int = 2048,
282
+ num_blocks: int = 6,
283
+ r_num_blocks: int = 0,
284
+ dropout_rate: float = 0.1,
285
+ positional_dropout_rate: float = 0.1,
286
+ self_attention_dropout_rate: float = 0.0,
287
+ src_attention_dropout_rate: float = 0.0,
288
+ input_layer: str = "embed",
289
+ use_output_layer: bool = True,
290
+ normalize_before: bool = True,
291
+ key_bias: bool = True,
292
+ gradient_checkpointing: bool = False,
293
+ tie_word_embedding: bool = False,
294
+ ):
295
+
296
+ super().__init__()
297
+ self.tie_word_embedding = tie_word_embedding
298
+ self.left_decoder = TransformerDecoder(
299
+ vocab_size,
300
+ encoder_output_size,
301
+ attention_heads,
302
+ linear_units,
303
+ num_blocks,
304
+ dropout_rate,
305
+ positional_dropout_rate,
306
+ self_attention_dropout_rate,
307
+ src_attention_dropout_rate,
308
+ input_layer,
309
+ use_output_layer,
310
+ normalize_before,
311
+ key_bias=key_bias,
312
+ gradient_checkpointing=gradient_checkpointing,
313
+ tie_word_embedding=tie_word_embedding)
314
+
315
+ self.right_decoder = TransformerDecoder(
316
+ vocab_size,
317
+ encoder_output_size,
318
+ attention_heads,
319
+ linear_units,
320
+ r_num_blocks,
321
+ dropout_rate,
322
+ positional_dropout_rate,
323
+ self_attention_dropout_rate,
324
+ src_attention_dropout_rate,
325
+ input_layer,
326
+ use_output_layer,
327
+ normalize_before,
328
+ key_bias=key_bias,
329
+ gradient_checkpointing=gradient_checkpointing,
330
+ tie_word_embedding=tie_word_embedding)
331
+
332
+ def forward(
333
+ self,
334
+ memory: torch.Tensor,
335
+ memory_mask: torch.Tensor,
336
+ ys_in_pad: torch.Tensor,
337
+ ys_in_lens: torch.Tensor,
338
+ r_ys_in_pad: torch.Tensor,
339
+ reverse_weight: float = 0.0,
340
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
341
+ """Forward decoder.
342
+ Args:
343
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
344
+ memory_mask: encoder memory mask, (batch, 1, maxlen_in)
345
+ ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
346
+ ys_in_lens: input lengths of this batch (batch)
347
+ r_ys_in_pad: padded input token ids, int64 (batch, maxlen_out),
348
+ used for right to left decoder
349
+ reverse_weight: used for right to left decoder
350
+ Returns:
351
+ (tuple): tuple containing:
352
+ x: decoded token score before softmax (batch, maxlen_out,
353
+ vocab_size) if use_output_layer is True,
354
+ r_x: x: decoded token score (right to left decoder)
355
+ before softmax (batch, maxlen_out, vocab_size)
356
+ if use_output_layer is True,
357
+ olens: (batch, )
358
+ """
359
+ l_x, _, olens = self.left_decoder(memory, memory_mask, ys_in_pad,
360
+ ys_in_lens)
361
+ r_x = torch.tensor(0.0)
362
+ if reverse_weight > 0.0:
363
+ r_x, _, olens = self.right_decoder(memory, memory_mask,
364
+ r_ys_in_pad, ys_in_lens)
365
+ return l_x, r_x, olens
366
+
367
+ def forward_one_step(
368
+ self,
369
+ memory: torch.Tensor,
370
+ memory_mask: torch.Tensor,
371
+ tgt: torch.Tensor,
372
+ tgt_mask: torch.Tensor,
373
+ cache: Optional[List[torch.Tensor]] = None,
374
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
375
+ """Forward one step.
376
+ This is only used for decoding.
377
+ Args:
378
+ memory: encoded memory, float32 (batch, maxlen_in, feat)
379
+ memory_mask: encoded memory mask, (batch, 1, maxlen_in)
380
+ tgt: input token ids, int64 (batch, maxlen_out)
381
+ tgt_mask: input token mask, (batch, maxlen_out)
382
+ dtype=torch.uint8 in PyTorch 1.2-
383
+ dtype=torch.bool in PyTorch 1.2+ (include 1.2)
384
+ cache: cached output list of (batch, max_time_out-1, size)
385
+ Returns:
386
+ y, cache: NN output value and cache per `self.decoders`.
387
+ y.shape` is (batch, maxlen_out, token)
388
+ """
389
+ return self.left_decoder.forward_one_step(memory, memory_mask, tgt,
390
+ tgt_mask, cache)
391
+
392
+ def tie_or_clone_weights(self, jit_mode: bool = True):
393
+ """Tie or clone module weights (between word_emb and output_layer)
394
+ depending of whether we are using TorchScript or not"""
395
+ self.left_decoder.tie_or_clone_weights(jit_mode)
396
+ self.right_decoder.tie_or_clone_weights(jit_mode)
speech/cosyvoice/transformer/decoder_layer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Decoder self-attention layer definition."""
16
+ from typing import Optional, Tuple
17
+
18
+ import torch
19
+ from torch import nn
20
+
21
+
22
+ class DecoderLayer(nn.Module):
23
+ """Single decoder layer module.
24
+
25
+ Args:
26
+ size (int): Input dimension.
27
+ self_attn (torch.nn.Module): Self-attention module instance.
28
+ `MultiHeadedAttention` instance can be used as the argument.
29
+ src_attn (torch.nn.Module): Inter-attention module instance.
30
+ `MultiHeadedAttention` instance can be used as the argument.
31
+ If `None` is passed, Inter-attention is not used, such as
32
+ CIF, GPT, and other decoder only model.
33
+ feed_forward (torch.nn.Module): Feed-forward module instance.
34
+ `PositionwiseFeedForward` instance can be used as the argument.
35
+ dropout_rate (float): Dropout rate.
36
+ normalize_before (bool):
37
+ True: use layer_norm before each sub-block.
38
+ False: to use layer_norm after each sub-block.
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ size: int,
44
+ self_attn: nn.Module,
45
+ src_attn: Optional[nn.Module],
46
+ feed_forward: nn.Module,
47
+ dropout_rate: float,
48
+ normalize_before: bool = True,
49
+ ):
50
+ """Construct an DecoderLayer object."""
51
+ super().__init__()
52
+ self.size = size
53
+ self.self_attn = self_attn
54
+ self.src_attn = src_attn
55
+ self.feed_forward = feed_forward
56
+ self.norm1 = nn.LayerNorm(size, eps=1e-5)
57
+ self.norm2 = nn.LayerNorm(size, eps=1e-5)
58
+ self.norm3 = nn.LayerNorm(size, eps=1e-5)
59
+ self.dropout = nn.Dropout(dropout_rate)
60
+ self.normalize_before = normalize_before
61
+
62
+ def forward(
63
+ self,
64
+ tgt: torch.Tensor,
65
+ tgt_mask: torch.Tensor,
66
+ memory: torch.Tensor,
67
+ memory_mask: torch.Tensor,
68
+ cache: Optional[torch.Tensor] = None
69
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
70
+ """Compute decoded features.
71
+
72
+ Args:
73
+ tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
74
+ tgt_mask (torch.Tensor): Mask for input tensor
75
+ (#batch, maxlen_out).
76
+ memory (torch.Tensor): Encoded memory
77
+ (#batch, maxlen_in, size).
78
+ memory_mask (torch.Tensor): Encoded memory mask
79
+ (#batch, maxlen_in).
80
+ cache (torch.Tensor): cached tensors.
81
+ (#batch, maxlen_out - 1, size).
82
+
83
+ Returns:
84
+ torch.Tensor: Output tensor (#batch, maxlen_out, size).
85
+ torch.Tensor: Mask for output tensor (#batch, maxlen_out).
86
+ torch.Tensor: Encoded memory (#batch, maxlen_in, size).
87
+ torch.Tensor: Encoded memory mask (#batch, maxlen_in).
88
+
89
+ """
90
+ residual = tgt
91
+ if self.normalize_before:
92
+ tgt = self.norm1(tgt)
93
+
94
+ if cache is None:
95
+ tgt_q = tgt
96
+ tgt_q_mask = tgt_mask
97
+ else:
98
+ # compute only the last frame query keeping dim: max_time_out -> 1
99
+ assert cache.shape == (
100
+ tgt.shape[0],
101
+ tgt.shape[1] - 1,
102
+ self.size,
103
+ ), "{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
104
+ tgt_q = tgt[:, -1:, :]
105
+ residual = residual[:, -1:, :]
106
+ tgt_q_mask = tgt_mask[:, -1:, :]
107
+
108
+ x = residual + self.dropout(
109
+ self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0])
110
+ if not self.normalize_before:
111
+ x = self.norm1(x)
112
+
113
+ if self.src_attn is not None:
114
+ residual = x
115
+ if self.normalize_before:
116
+ x = self.norm2(x)
117
+ x = residual + self.dropout(
118
+ self.src_attn(x, memory, memory, memory_mask)[0])
119
+ if not self.normalize_before:
120
+ x = self.norm2(x)
121
+
122
+ residual = x
123
+ if self.normalize_before:
124
+ x = self.norm3(x)
125
+ x = residual + self.dropout(self.feed_forward(x))
126
+ if not self.normalize_before:
127
+ x = self.norm3(x)
128
+
129
+ if cache is not None:
130
+ x = torch.cat([cache, x], dim=1)
131
+
132
+ return x, tgt_mask, memory, memory_mask
speech/cosyvoice/transformer/embedding.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Positonal Encoding Module."""
17
+
18
+ import math
19
+ from typing import Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+ import numpy as np
24
+
25
+
26
+ class PositionalEncoding(torch.nn.Module):
27
+ """Positional encoding.
28
+
29
+ :param int d_model: embedding dim
30
+ :param float dropout_rate: dropout rate
31
+ :param int max_len: maximum input length
32
+
33
+ PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
34
+ PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
35
+ """
36
+
37
+ def __init__(self,
38
+ d_model: int,
39
+ dropout_rate: float,
40
+ max_len: int = 5000,
41
+ reverse: bool = False):
42
+ """Construct an PositionalEncoding object."""
43
+ super().__init__()
44
+ self.d_model = d_model
45
+ self.xscale = math.sqrt(self.d_model)
46
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
47
+ self.max_len = max_len
48
+
49
+ self.pe = torch.zeros(self.max_len, self.d_model)
50
+ position = torch.arange(0, self.max_len,
51
+ dtype=torch.float32).unsqueeze(1)
52
+ div_term = torch.exp(
53
+ torch.arange(0, self.d_model, 2, dtype=torch.float32) *
54
+ -(math.log(10000.0) / self.d_model))
55
+ self.pe[:, 0::2] = torch.sin(position * div_term)
56
+ self.pe[:, 1::2] = torch.cos(position * div_term)
57
+ self.pe = self.pe.unsqueeze(0)
58
+
59
+ def forward(self,
60
+ x: torch.Tensor,
61
+ offset: Union[int, torch.Tensor] = 0) \
62
+ -> Tuple[torch.Tensor, torch.Tensor]:
63
+ """Add positional encoding.
64
+
65
+ Args:
66
+ x (torch.Tensor): Input. Its shape is (batch, time, ...)
67
+ offset (int, torch.tensor): position offset
68
+
69
+ Returns:
70
+ torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
71
+ torch.Tensor: for compatibility to RelPositionalEncoding
72
+ """
73
+
74
+ self.pe = self.pe.to(x.device)
75
+ pos_emb = self.position_encoding(offset, x.size(1), False)
76
+ x = x * self.xscale + pos_emb
77
+ return self.dropout(x), self.dropout(pos_emb)
78
+
79
+ def position_encoding(self,
80
+ offset: Union[int, torch.Tensor],
81
+ size: int,
82
+ apply_dropout: bool = True) -> torch.Tensor:
83
+ """ For getting encoding in a streaming fashion
84
+
85
+ Attention!!!!!
86
+ we apply dropout only once at the whole utterance level in a none
87
+ streaming way, but will call this function several times with
88
+ increasing input size in a streaming scenario, so the dropout will
89
+ be applied several times.
90
+
91
+ Args:
92
+ offset (int or torch.tensor): start offset
93
+ size (int): required size of position encoding
94
+
95
+ Returns:
96
+ torch.Tensor: Corresponding encoding
97
+ """
98
+ # How to subscript a Union type:
99
+ # https://github.com/pytorch/pytorch/issues/69434
100
+ if isinstance(offset, int):
101
+ assert offset + size <= self.max_len
102
+ pos_emb = self.pe[:, offset:offset + size]
103
+ elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
104
+ assert offset + size <= self.max_len
105
+ pos_emb = self.pe[:, offset:offset + size]
106
+ else: # for batched streaming decoding on GPU
107
+ assert torch.max(offset) + size <= self.max_len
108
+ index = offset.unsqueeze(1) + \
109
+ torch.arange(0, size).to(offset.device) # B X T
110
+ flag = index > 0
111
+ # remove negative offset
112
+ index = index * flag
113
+ pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
114
+
115
+ if apply_dropout:
116
+ pos_emb = self.dropout(pos_emb)
117
+ return pos_emb
118
+
119
+
120
+ class RelPositionalEncoding(PositionalEncoding):
121
+ """Relative positional encoding module.
122
+ See : Appendix B in https://arxiv.org/abs/1901.02860
123
+ Args:
124
+ d_model (int): Embedding dimension.
125
+ dropout_rate (float): Dropout rate.
126
+ max_len (int): Maximum input length.
127
+ """
128
+
129
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
130
+ """Initialize class."""
131
+ super().__init__(d_model, dropout_rate, max_len, reverse=True)
132
+
133
+ def forward(self,
134
+ x: torch.Tensor,
135
+ offset: Union[int, torch.Tensor] = 0) \
136
+ -> Tuple[torch.Tensor, torch.Tensor]:
137
+ """Compute positional encoding.
138
+ Args:
139
+ x (torch.Tensor): Input tensor (batch, time, `*`).
140
+ Returns:
141
+ torch.Tensor: Encoded tensor (batch, time, `*`).
142
+ torch.Tensor: Positional embedding tensor (1, time, `*`).
143
+ """
144
+ self.pe = self.pe.to(x.device)
145
+ x = x * self.xscale
146
+ pos_emb = self.position_encoding(offset, x.size(1), False)
147
+ return self.dropout(x), self.dropout(pos_emb)
148
+
149
+
150
+ class WhisperPositionalEncoding(PositionalEncoding):
151
+ """ Sinusoids position encoding used in openai-whisper.encoder
152
+ """
153
+
154
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
155
+ super().__init__(d_model, dropout_rate, max_len)
156
+ self.xscale = 1.0
157
+ log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
158
+ inv_timescales = torch.exp(-log_timescale_increment *
159
+ torch.arange(d_model // 2))
160
+ scaled_time = torch.arange(max_len)[:, np.newaxis] * \
161
+ inv_timescales[np.newaxis, :]
162
+ pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
163
+ delattr(self, "pe")
164
+ self.register_buffer("pe", pe.unsqueeze(0))
165
+
166
+
167
+ class LearnablePositionalEncoding(PositionalEncoding):
168
+ """ Learnable position encoding used in openai-whisper.decoder
169
+ """
170
+
171
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
172
+ super().__init__(d_model, dropout_rate, max_len)
173
+ # NOTE(xcsong): overwrite self.pe & self.xscale
174
+ self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
175
+ self.xscale = 1.0
176
+
177
+
178
+ class NoPositionalEncoding(torch.nn.Module):
179
+ """ No position encoding
180
+ """
181
+
182
+ def __init__(self, d_model: int, dropout_rate: float):
183
+ super().__init__()
184
+ self.d_model = d_model
185
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
186
+
187
+ def forward(self,
188
+ x: torch.Tensor,
189
+ offset: Union[int, torch.Tensor] = 0) \
190
+ -> Tuple[torch.Tensor, torch.Tensor]:
191
+ """ Just return zero vector for interface compatibility
192
+ """
193
+ pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
194
+ return self.dropout(x), pos_emb
195
+
196
+ def position_encoding(self, offset: Union[int, torch.Tensor],
197
+ size: int) -> torch.Tensor:
198
+ return torch.zeros(1, size, self.d_model)
199
+
200
+
201
+ class EspnetRelPositionalEncoding(torch.nn.Module):
202
+ """Relative positional encoding module (new implementation).
203
+
204
+ Details can be found in https://github.com/espnet/espnet/pull/2816.
205
+
206
+ See : Appendix B in https://arxiv.org/abs/1901.02860
207
+
208
+ Args:
209
+ d_model (int): Embedding dimension.
210
+ dropout_rate (float): Dropout rate.
211
+ max_len (int): Maximum input length.
212
+
213
+ """
214
+
215
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
216
+ """Construct an PositionalEncoding object."""
217
+ super(EspnetRelPositionalEncoding, self).__init__()
218
+ self.d_model = d_model
219
+ self.xscale = math.sqrt(self.d_model)
220
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
221
+ self.pe = None
222
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
223
+
224
+ def extend_pe(self, x: torch.Tensor):
225
+ """Reset the positional encodings."""
226
+ if self.pe is not None:
227
+ # self.pe contains both positive and negative parts
228
+ # the length of self.pe is 2 * input_len - 1
229
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
230
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
231
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
232
+ return
233
+ # Suppose `i` means to the position of query vecotr and `j` means the
234
+ # position of key vector. We use position relative positions when keys
235
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
236
+ pe_positive = torch.zeros(x.size(1), self.d_model)
237
+ pe_negative = torch.zeros(x.size(1), self.d_model)
238
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
239
+ div_term = torch.exp(
240
+ torch.arange(0, self.d_model, 2, dtype=torch.float32)
241
+ * -(math.log(10000.0) / self.d_model)
242
+ )
243
+ pe_positive[:, 0::2] = torch.sin(position * div_term)
244
+ pe_positive[:, 1::2] = torch.cos(position * div_term)
245
+ pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
246
+ pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
247
+
248
+ # Reserve the order of positive indices and concat both positive and
249
+ # negative indices. This is used to support the shifting trick
250
+ # as in https://arxiv.org/abs/1901.02860
251
+ pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
252
+ pe_negative = pe_negative[1:].unsqueeze(0)
253
+ pe = torch.cat([pe_positive, pe_negative], dim=1)
254
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
255
+
256
+ def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \
257
+ -> Tuple[torch.Tensor, torch.Tensor]:
258
+ """Add positional encoding.
259
+
260
+ Args:
261
+ x (torch.Tensor): Input tensor (batch, time, `*`).
262
+
263
+ Returns:
264
+ torch.Tensor: Encoded tensor (batch, time, `*`).
265
+
266
+ """
267
+ self.extend_pe(x)
268
+ x = x * self.xscale
269
+ pos_emb = self.position_encoding(size=x.size(1), offset=offset)
270
+ return self.dropout(x), self.dropout(pos_emb)
271
+
272
+ def position_encoding(self,
273
+ offset: Union[int, torch.Tensor],
274
+ size: int) -> torch.Tensor:
275
+ """ For getting encoding in a streaming fashion
276
+
277
+ Attention!!!!!
278
+ we apply dropout only once at the whole utterance level in a none
279
+ streaming way, but will call this function several times with
280
+ increasing input size in a streaming scenario, so the dropout will
281
+ be applied several times.
282
+
283
+ Args:
284
+ offset (int or torch.tensor): start offset
285
+ size (int): required size of position encoding
286
+
287
+ Returns:
288
+ torch.Tensor: Corresponding encoding
289
+ """
290
+ # How to subscript a Union type:
291
+ # https://github.com/pytorch/pytorch/issues/69434
292
+ if isinstance(offset, int):
293
+ pos_emb = self.pe[
294
+ :,
295
+ self.pe.size(1) // 2 - size - offset + 1: self.pe.size(1) // 2 + size + offset,
296
+ ]
297
+ elif isinstance(offset, torch.Tensor):
298
+ pos_emb = self.pe[
299
+ :,
300
+ self.pe.size(1) // 2 - size - offset + 1: self.pe.size(1) // 2 + size + offset,
301
+ ]
302
+ return pos_emb
speech/cosyvoice/transformer/encoder.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
3
+ # 2024 Alibaba Inc (Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ """Encoder definition."""
18
+ from typing import Tuple
19
+
20
+ import torch
21
+ import torch.utils.checkpoint as ckpt
22
+
23
+ from cosyvoice.transformer.convolution import ConvolutionModule
24
+ from cosyvoice.transformer.encoder_layer import TransformerEncoderLayer
25
+ from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
26
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
27
+ from cosyvoice.utils.class_utils import (
28
+ COSYVOICE_EMB_CLASSES,
29
+ COSYVOICE_SUBSAMPLE_CLASSES,
30
+ COSYVOICE_ATTENTION_CLASSES,
31
+ COSYVOICE_ACTIVATION_CLASSES,
32
+ )
33
+ from cosyvoice.utils.mask import make_pad_mask
34
+ from cosyvoice.utils.mask import add_optional_chunk_mask
35
+
36
+
37
+ class BaseEncoder(torch.nn.Module):
38
+
39
+ def __init__(
40
+ self,
41
+ input_size: int,
42
+ output_size: int = 256,
43
+ attention_heads: int = 4,
44
+ linear_units: int = 2048,
45
+ num_blocks: int = 6,
46
+ dropout_rate: float = 0.1,
47
+ positional_dropout_rate: float = 0.1,
48
+ attention_dropout_rate: float = 0.0,
49
+ input_layer: str = "conv2d",
50
+ pos_enc_layer_type: str = "abs_pos",
51
+ normalize_before: bool = True,
52
+ static_chunk_size: int = 0,
53
+ use_dynamic_chunk: bool = False,
54
+ global_cmvn: torch.nn.Module = None,
55
+ use_dynamic_left_chunk: bool = False,
56
+ gradient_checkpointing: bool = False,
57
+ ):
58
+ """
59
+ Args:
60
+ input_size (int): input dim
61
+ output_size (int): dimension of attention
62
+ attention_heads (int): the number of heads of multi head attention
63
+ linear_units (int): the hidden units number of position-wise feed
64
+ forward
65
+ num_blocks (int): the number of decoder blocks
66
+ dropout_rate (float): dropout rate
67
+ attention_dropout_rate (float): dropout rate in attention
68
+ positional_dropout_rate (float): dropout rate after adding
69
+ positional encoding
70
+ input_layer (str): input layer type.
71
+ optional [linear, conv2d, conv2d6, conv2d8]
72
+ pos_enc_layer_type (str): Encoder positional encoding layer type.
73
+ opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
74
+ normalize_before (bool):
75
+ True: use layer_norm before each sub-block of a layer.
76
+ False: use layer_norm after each sub-block of a layer.
77
+ static_chunk_size (int): chunk size for static chunk training and
78
+ decoding
79
+ use_dynamic_chunk (bool): whether use dynamic chunk size for
80
+ training or not, You can only use fixed chunk(chunk_size > 0)
81
+ or dyanmic chunk size(use_dynamic_chunk = True)
82
+ global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
83
+ use_dynamic_left_chunk (bool): whether use dynamic left chunk in
84
+ dynamic chunk training
85
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
86
+ gradient_checkpointing: rerunning a forward-pass segment for each
87
+ checkpointed segment during backward.
88
+ """
89
+ super().__init__()
90
+ self._output_size = output_size
91
+
92
+ self.global_cmvn = global_cmvn
93
+ self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
94
+ input_size,
95
+ output_size,
96
+ dropout_rate,
97
+ COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
98
+ positional_dropout_rate),
99
+ )
100
+
101
+ self.normalize_before = normalize_before
102
+ self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
103
+ self.static_chunk_size = static_chunk_size
104
+ self.use_dynamic_chunk = use_dynamic_chunk
105
+ self.use_dynamic_left_chunk = use_dynamic_left_chunk
106
+ self.gradient_checkpointing = gradient_checkpointing
107
+
108
+ def output_size(self) -> int:
109
+ return self._output_size
110
+
111
+ def forward(
112
+ self,
113
+ xs: torch.Tensor,
114
+ xs_lens: torch.Tensor,
115
+ decoding_chunk_size: int = 0,
116
+ num_decoding_left_chunks: int = -1,
117
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
118
+ """Embed positions in tensor.
119
+
120
+ Args:
121
+ xs: padded input tensor (B, T, D)
122
+ xs_lens: input length (B)
123
+ decoding_chunk_size: decoding chunk size for dynamic chunk
124
+ 0: default for training, use random dynamic chunk.
125
+ <0: for decoding, use full chunk.
126
+ >0: for decoding, use fixed chunk size as set.
127
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
128
+ the chunk size is decoding_chunk_size.
129
+ >=0: use num_decoding_left_chunks
130
+ <0: use all left chunks
131
+ Returns:
132
+ encoder output tensor xs, and subsampled masks
133
+ xs: padded output tensor (B, T' ~= T/subsample_rate, D)
134
+ masks: torch.Tensor batch padding mask after subsample
135
+ (B, 1, T' ~= T/subsample_rate)
136
+ NOTE(xcsong):
137
+ We pass the `__call__` method of the modules instead of `forward` to the
138
+ checkpointing API because `__call__` attaches all the hooks of the module.
139
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
140
+ """
141
+ T = xs.size(1)
142
+ masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
143
+ if self.global_cmvn is not None:
144
+ xs = self.global_cmvn(xs)
145
+ xs, pos_emb, masks = self.embed(xs, masks)
146
+ mask_pad = masks # (B, 1, T/subsample_rate)
147
+ chunk_masks = add_optional_chunk_mask(xs, masks,
148
+ self.use_dynamic_chunk,
149
+ self.use_dynamic_left_chunk,
150
+ decoding_chunk_size,
151
+ self.static_chunk_size,
152
+ num_decoding_left_chunks)
153
+ if self.gradient_checkpointing and self.training:
154
+ xs = self.forward_layers_checkpointed(xs, chunk_masks, pos_emb,
155
+ mask_pad)
156
+ else:
157
+ xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
158
+ if self.normalize_before:
159
+ xs = self.after_norm(xs)
160
+ # Here we assume the mask is not changed in encoder layers, so just
161
+ # return the masks before encoder layers, and the masks will be used
162
+ # for cross attention with decoder later
163
+ return xs, masks
164
+
165
+ def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
166
+ pos_emb: torch.Tensor,
167
+ mask_pad: torch.Tensor) -> torch.Tensor:
168
+ for layer in self.encoders:
169
+ xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
170
+ return xs
171
+
172
+ @torch.jit.unused
173
+ def forward_layers_checkpointed(self, xs: torch.Tensor,
174
+ chunk_masks: torch.Tensor,
175
+ pos_emb: torch.Tensor,
176
+ mask_pad: torch.Tensor) -> torch.Tensor:
177
+ for layer in self.encoders:
178
+ xs, chunk_masks, _, _ = ckpt.checkpoint(layer.__call__, xs,
179
+ chunk_masks, pos_emb,
180
+ mask_pad)
181
+ return xs
182
+
183
+ @torch.jit.export
184
+ def forward_chunk(
185
+ self,
186
+ xs: torch.Tensor,
187
+ offset: int,
188
+ required_cache_size: int,
189
+ att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
190
+ cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
191
+ att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
192
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
193
+ """ Forward just one chunk
194
+
195
+ Args:
196
+ xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim),
197
+ where `time == (chunk_size - 1) * subsample_rate + \
198
+ subsample.right_context + 1`
199
+ offset (int): current offset in encoder output time stamp
200
+ required_cache_size (int): cache size required for next chunk
201
+ compuation
202
+ >=0: actual cache size
203
+ <0: means all history cache is required
204
+ att_cache (torch.Tensor): cache tensor for KEY & VALUE in
205
+ transformer/conformer attention, with shape
206
+ (elayers, head, cache_t1, d_k * 2), where
207
+ `head * d_k == hidden-dim` and
208
+ `cache_t1 == chunk_size * num_decoding_left_chunks`.
209
+ cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,
210
+ (elayers, b=1, hidden-dim, cache_t2), where
211
+ `cache_t2 == cnn.lorder - 1`
212
+
213
+ Returns:
214
+ torch.Tensor: output of current input xs,
215
+ with shape (b=1, chunk_size, hidden-dim).
216
+ torch.Tensor: new attention cache required for next chunk, with
217
+ dynamic shape (elayers, head, ?, d_k * 2)
218
+ depending on required_cache_size.
219
+ torch.Tensor: new conformer cnn cache required for next chunk, with
220
+ same shape as the original cnn_cache.
221
+
222
+ """
223
+ assert xs.size(0) == 1
224
+ # tmp_masks is just for interface compatibility
225
+ tmp_masks = torch.ones(1,
226
+ xs.size(1),
227
+ device=xs.device,
228
+ dtype=torch.bool)
229
+ tmp_masks = tmp_masks.unsqueeze(1)
230
+ if self.global_cmvn is not None:
231
+ xs = self.global_cmvn(xs)
232
+ # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim)
233
+ xs, pos_emb, _ = self.embed(xs, tmp_masks, offset)
234
+ # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim)
235
+ elayers, cache_t1 = att_cache.size(0), att_cache.size(2)
236
+ chunk_size = xs.size(1)
237
+ attention_key_size = cache_t1 + chunk_size
238
+ pos_emb = self.embed.position_encoding(offset=offset - cache_t1,
239
+ size=attention_key_size)
240
+ if required_cache_size < 0:
241
+ next_cache_start = 0
242
+ elif required_cache_size == 0:
243
+ next_cache_start = attention_key_size
244
+ else:
245
+ next_cache_start = max(attention_key_size - required_cache_size, 0)
246
+ r_att_cache = []
247
+ r_cnn_cache = []
248
+ for i, layer in enumerate(self.encoders):
249
+ # NOTE(xcsong): Before layer.forward
250
+ # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2),
251
+ # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2)
252
+ xs, _, new_att_cache, new_cnn_cache = layer(
253
+ xs,
254
+ att_mask,
255
+ pos_emb,
256
+ att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
257
+ cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache)
258
+ # NOTE(xcsong): After layer.forward
259
+ # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2),
260
+ # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2)
261
+ r_att_cache.append(new_att_cache[:, :, next_cache_start:, :])
262
+ r_cnn_cache.append(new_cnn_cache.unsqueeze(0))
263
+ if self.normalize_before:
264
+ xs = self.after_norm(xs)
265
+
266
+ # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2),
267
+ # ? may be larger than cache_t1, it depends on required_cache_size
268
+ r_att_cache = torch.cat(r_att_cache, dim=0)
269
+ # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2)
270
+ r_cnn_cache = torch.cat(r_cnn_cache, dim=0)
271
+
272
+ return (xs, r_att_cache, r_cnn_cache)
273
+
274
+ @torch.jit.unused
275
+ def forward_chunk_by_chunk(
276
+ self,
277
+ xs: torch.Tensor,
278
+ decoding_chunk_size: int,
279
+ num_decoding_left_chunks: int = -1,
280
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
281
+ """ Forward input chunk by chunk with chunk_size like a streaming
282
+ fashion
283
+
284
+ Here we should pay special attention to computation cache in the
285
+ streaming style forward chunk by chunk. Three things should be taken
286
+ into account for computation in the current network:
287
+ 1. transformer/conformer encoder layers output cache
288
+ 2. convolution in conformer
289
+ 3. convolution in subsampling
290
+
291
+ However, we don't implement subsampling cache for:
292
+ 1. We can control subsampling module to output the right result by
293
+ overlapping input instead of cache left context, even though it
294
+ wastes some computation, but subsampling only takes a very
295
+ small fraction of computation in the whole model.
296
+ 2. Typically, there are several covolution layers with subsampling
297
+ in subsampling module, it is tricky and complicated to do cache
298
+ with different convolution layers with different subsampling
299
+ rate.
300
+ 3. Currently, nn.Sequential is used to stack all the convolution
301
+ layers in subsampling, we need to rewrite it to make it work
302
+ with cache, which is not preferred.
303
+ Args:
304
+ xs (torch.Tensor): (1, max_len, dim)
305
+ chunk_size (int): decoding chunk size
306
+ """
307
+ assert decoding_chunk_size > 0
308
+ # The model is trained by static or dynamic chunk
309
+ assert self.static_chunk_size > 0 or self.use_dynamic_chunk
310
+ subsampling = self.embed.subsampling_rate
311
+ context = self.embed.right_context + 1 # Add current frame
312
+ stride = subsampling * decoding_chunk_size
313
+ decoding_window = (decoding_chunk_size - 1) * subsampling + context
314
+ num_frames = xs.size(1)
315
+ att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
316
+ cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
317
+ outputs = []
318
+ offset = 0
319
+ required_cache_size = decoding_chunk_size * num_decoding_left_chunks
320
+
321
+ # Feed forward overlap input step by step
322
+ for cur in range(0, num_frames - context + 1, stride):
323
+ end = min(cur + decoding_window, num_frames)
324
+ chunk_xs = xs[:, cur:end, :]
325
+ (y, att_cache,
326
+ cnn_cache) = self.forward_chunk(chunk_xs, offset,
327
+ required_cache_size, att_cache,
328
+ cnn_cache)
329
+ outputs.append(y)
330
+ offset += y.size(1)
331
+ ys = torch.cat(outputs, 1)
332
+ masks = torch.ones((1, 1, ys.size(1)),
333
+ device=ys.device,
334
+ dtype=torch.bool)
335
+ return ys, masks
336
+
337
+
338
+ class TransformerEncoder(BaseEncoder):
339
+ """Transformer encoder module."""
340
+
341
+ def __init__(
342
+ self,
343
+ input_size: int,
344
+ output_size: int = 256,
345
+ attention_heads: int = 4,
346
+ linear_units: int = 2048,
347
+ num_blocks: int = 6,
348
+ dropout_rate: float = 0.1,
349
+ positional_dropout_rate: float = 0.1,
350
+ attention_dropout_rate: float = 0.0,
351
+ input_layer: str = "conv2d",
352
+ pos_enc_layer_type: str = "abs_pos",
353
+ normalize_before: bool = True,
354
+ static_chunk_size: int = 0,
355
+ use_dynamic_chunk: bool = False,
356
+ global_cmvn: torch.nn.Module = None,
357
+ use_dynamic_left_chunk: bool = False,
358
+ key_bias: bool = True,
359
+ selfattention_layer_type: str = "selfattn",
360
+ activation_type: str = "relu",
361
+ gradient_checkpointing: bool = False,
362
+ ):
363
+ """ Construct TransformerEncoder
364
+
365
+ See Encoder for the meaning of each parameter.
366
+ """
367
+ super().__init__(input_size, output_size, attention_heads,
368
+ linear_units, num_blocks, dropout_rate,
369
+ positional_dropout_rate, attention_dropout_rate,
370
+ input_layer, pos_enc_layer_type, normalize_before,
371
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
372
+ use_dynamic_left_chunk, gradient_checkpointing)
373
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
374
+ self.encoders = torch.nn.ModuleList([
375
+ TransformerEncoderLayer(
376
+ output_size,
377
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](attention_heads,
378
+ output_size,
379
+ attention_dropout_rate,
380
+ key_bias),
381
+ PositionwiseFeedForward(output_size, linear_units,
382
+ dropout_rate, activation),
383
+ dropout_rate, normalize_before) for _ in range(num_blocks)
384
+ ])
385
+
386
+
387
+ class ConformerEncoder(BaseEncoder):
388
+ """Conformer encoder module."""
389
+
390
+ def __init__(
391
+ self,
392
+ input_size: int,
393
+ output_size: int = 256,
394
+ attention_heads: int = 4,
395
+ linear_units: int = 2048,
396
+ num_blocks: int = 6,
397
+ dropout_rate: float = 0.1,
398
+ positional_dropout_rate: float = 0.1,
399
+ attention_dropout_rate: float = 0.0,
400
+ input_layer: str = "conv2d",
401
+ pos_enc_layer_type: str = "rel_pos",
402
+ normalize_before: bool = True,
403
+ static_chunk_size: int = 0,
404
+ use_dynamic_chunk: bool = False,
405
+ global_cmvn: torch.nn.Module = None,
406
+ use_dynamic_left_chunk: bool = False,
407
+ positionwise_conv_kernel_size: int = 1,
408
+ macaron_style: bool = True,
409
+ selfattention_layer_type: str = "rel_selfattn",
410
+ activation_type: str = "swish",
411
+ use_cnn_module: bool = True,
412
+ cnn_module_kernel: int = 15,
413
+ causal: bool = False,
414
+ cnn_module_norm: str = "batch_norm",
415
+ key_bias: bool = True,
416
+ gradient_checkpointing: bool = False,
417
+ ):
418
+ """Construct ConformerEncoder
419
+
420
+ Args:
421
+ input_size to use_dynamic_chunk, see in BaseEncoder
422
+ positionwise_conv_kernel_size (int): Kernel size of positionwise
423
+ conv1d layer.
424
+ macaron_style (bool): Whether to use macaron style for
425
+ positionwise layer.
426
+ selfattention_layer_type (str): Encoder attention layer type,
427
+ the parameter has no effect now, it's just for configure
428
+ compatibility.
429
+ activation_type (str): Encoder activation function type.
430
+ use_cnn_module (bool): Whether to use convolution module.
431
+ cnn_module_kernel (int): Kernel size of convolution module.
432
+ causal (bool): whether to use causal convolution or not.
433
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
434
+ """
435
+ super().__init__(input_size, output_size, attention_heads,
436
+ linear_units, num_blocks, dropout_rate,
437
+ positional_dropout_rate, attention_dropout_rate,
438
+ input_layer, pos_enc_layer_type, normalize_before,
439
+ static_chunk_size, use_dynamic_chunk, global_cmvn,
440
+ use_dynamic_left_chunk, gradient_checkpointing)
441
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
442
+
443
+ # self-attention module definition
444
+ encoder_selfattn_layer_args = (
445
+ attention_heads,
446
+ output_size,
447
+ attention_dropout_rate,
448
+ key_bias,
449
+ )
450
+ # feed-forward module definition
451
+ positionwise_layer_args = (
452
+ output_size,
453
+ linear_units,
454
+ dropout_rate,
455
+ activation,
456
+ )
457
+ # convolution module definition
458
+ convolution_layer_args = (output_size, cnn_module_kernel, activation,
459
+ cnn_module_norm, causal)
460
+
461
+ self.encoders = torch.nn.ModuleList([
462
+ ConformerEncoderLayer(
463
+ output_size,
464
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
465
+ *encoder_selfattn_layer_args),
466
+ PositionwiseFeedForward(*positionwise_layer_args),
467
+ PositionwiseFeedForward(
468
+ *positionwise_layer_args) if macaron_style else None,
469
+ ConvolutionModule(
470
+ *convolution_layer_args) if use_cnn_module else None,
471
+ dropout_rate,
472
+ normalize_before,
473
+ ) for _ in range(num_blocks)
474
+ ])
speech/cosyvoice/transformer/encoder_layer.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Encoder self-attention layer definition."""
17
+
18
+ from typing import Optional, Tuple
19
+
20
+ import torch
21
+ from torch import nn
22
+
23
+
24
+ class TransformerEncoderLayer(nn.Module):
25
+ """Encoder layer module.
26
+
27
+ Args:
28
+ size (int): Input dimension.
29
+ self_attn (torch.nn.Module): Self-attention module instance.
30
+ `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
31
+ instance can be used as the argument.
32
+ feed_forward (torch.nn.Module): Feed-forward module instance.
33
+ `PositionwiseFeedForward`, instance can be used as the argument.
34
+ dropout_rate (float): Dropout rate.
35
+ normalize_before (bool):
36
+ True: use layer_norm before each sub-block.
37
+ False: to use layer_norm after each sub-block.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ size: int,
43
+ self_attn: torch.nn.Module,
44
+ feed_forward: torch.nn.Module,
45
+ dropout_rate: float,
46
+ normalize_before: bool = True,
47
+ ):
48
+ """Construct an EncoderLayer object."""
49
+ super().__init__()
50
+ self.self_attn = self_attn
51
+ self.feed_forward = feed_forward
52
+ self.norm1 = nn.LayerNorm(size, eps=1e-12)
53
+ self.norm2 = nn.LayerNorm(size, eps=1e-12)
54
+ self.dropout = nn.Dropout(dropout_rate)
55
+ self.size = size
56
+ self.normalize_before = normalize_before
57
+
58
+ def forward(
59
+ self,
60
+ x: torch.Tensor,
61
+ mask: torch.Tensor,
62
+ pos_emb: torch.Tensor,
63
+ mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
64
+ att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
65
+ cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
66
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
67
+ """Compute encoded features.
68
+
69
+ Args:
70
+ x (torch.Tensor): (#batch, time, size)
71
+ mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
72
+ (0, 0, 0) means fake mask.
73
+ pos_emb (torch.Tensor): just for interface compatibility
74
+ to ConformerEncoderLayer
75
+ mask_pad (torch.Tensor): does not used in transformer layer,
76
+ just for unified api with conformer.
77
+ att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
78
+ (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
79
+ cnn_cache (torch.Tensor): Convolution cache in conformer layer
80
+ (#batch=1, size, cache_t2), not used here, it's for interface
81
+ compatibility to ConformerEncoderLayer.
82
+ Returns:
83
+ torch.Tensor: Output tensor (#batch, time, size).
84
+ torch.Tensor: Mask tensor (#batch, time, time).
85
+ torch.Tensor: att_cache tensor,
86
+ (#batch=1, head, cache_t1 + time, d_k * 2).
87
+ torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).
88
+
89
+ """
90
+ residual = x
91
+ if self.normalize_before:
92
+ x = self.norm1(x)
93
+ x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache)
94
+ x = residual + self.dropout(x_att)
95
+ if not self.normalize_before:
96
+ x = self.norm1(x)
97
+
98
+ residual = x
99
+ if self.normalize_before:
100
+ x = self.norm2(x)
101
+ x = residual + self.dropout(self.feed_forward(x))
102
+ if not self.normalize_before:
103
+ x = self.norm2(x)
104
+
105
+ fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
106
+ return x, mask, new_att_cache, fake_cnn_cache
107
+
108
+
109
+ class ConformerEncoderLayer(nn.Module):
110
+ """Encoder layer module.
111
+ Args:
112
+ size (int): Input dimension.
113
+ self_attn (torch.nn.Module): Self-attention module instance.
114
+ `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
115
+ instance can be used as the argument.
116
+ feed_forward (torch.nn.Module): Feed-forward module instance.
117
+ `PositionwiseFeedForward` instance can be used as the argument.
118
+ feed_forward_macaron (torch.nn.Module): Additional feed-forward module
119
+ instance.
120
+ `PositionwiseFeedForward` instance can be used as the argument.
121
+ conv_module (torch.nn.Module): Convolution module instance.
122
+ `ConvlutionModule` instance can be used as the argument.
123
+ dropout_rate (float): Dropout rate.
124
+ normalize_before (bool):
125
+ True: use layer_norm before each sub-block.
126
+ False: use layer_norm after each sub-block.
127
+ """
128
+
129
+ def __init__(
130
+ self,
131
+ size: int,
132
+ self_attn: torch.nn.Module,
133
+ feed_forward: Optional[nn.Module] = None,
134
+ feed_forward_macaron: Optional[nn.Module] = None,
135
+ conv_module: Optional[nn.Module] = None,
136
+ dropout_rate: float = 0.1,
137
+ normalize_before: bool = True,
138
+ ):
139
+ """Construct an EncoderLayer object."""
140
+ super().__init__()
141
+ self.self_attn = self_attn
142
+ self.feed_forward = feed_forward
143
+ self.feed_forward_macaron = feed_forward_macaron
144
+ self.conv_module = conv_module
145
+ self.norm_ff = nn.LayerNorm(size, eps=1e-12) # for the FNN module
146
+ self.norm_mha = nn.LayerNorm(size, eps=1e-12) # for the MHA module
147
+ if feed_forward_macaron is not None:
148
+ self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-12)
149
+ self.ff_scale = 0.5
150
+ else:
151
+ self.ff_scale = 1.0
152
+ if self.conv_module is not None:
153
+ self.norm_conv = nn.LayerNorm(size, eps=1e-12) # for the CNN module
154
+ self.norm_final = nn.LayerNorm(
155
+ size, eps=1e-12) # for the final output of the block
156
+ self.dropout = nn.Dropout(dropout_rate)
157
+ self.size = size
158
+ self.normalize_before = normalize_before
159
+
160
+ def forward(
161
+ self,
162
+ x: torch.Tensor,
163
+ mask: torch.Tensor,
164
+ pos_emb: torch.Tensor,
165
+ mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
166
+ att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
167
+ cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
168
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
169
+ """Compute encoded features.
170
+
171
+ Args:
172
+ x (torch.Tensor): (#batch, time, size)
173
+ mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
174
+ (0, 0, 0) means fake mask.
175
+ pos_emb (torch.Tensor): positional encoding, must not be None
176
+ for ConformerEncoderLayer.
177
+ mask_pad (torch.Tensor): batch padding mask used for conv module.
178
+ (#batch, 1,time), (0, 0, 0) means fake mask.
179
+ att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
180
+ (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
181
+ cnn_cache (torch.Tensor): Convolution cache in conformer layer
182
+ (#batch=1, size, cache_t2)
183
+ Returns:
184
+ torch.Tensor: Output tensor (#batch, time, size).
185
+ torch.Tensor: Mask tensor (#batch, time, time).
186
+ torch.Tensor: att_cache tensor,
187
+ (#batch=1, head, cache_t1 + time, d_k * 2).
188
+ torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).
189
+ """
190
+
191
+ # whether to use macaron style
192
+ if self.feed_forward_macaron is not None:
193
+ residual = x
194
+ if self.normalize_before:
195
+ x = self.norm_ff_macaron(x)
196
+ x = residual + self.ff_scale * self.dropout(
197
+ self.feed_forward_macaron(x))
198
+ if not self.normalize_before:
199
+ x = self.norm_ff_macaron(x)
200
+
201
+ # multi-headed self-attention module
202
+ residual = x
203
+ if self.normalize_before:
204
+ x = self.norm_mha(x)
205
+ x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb,
206
+ att_cache)
207
+ x = residual + self.dropout(x_att)
208
+ if not self.normalize_before:
209
+ x = self.norm_mha(x)
210
+
211
+ # convolution module
212
+ # Fake new cnn cache here, and then change it in conv_module
213
+ new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
214
+ if self.conv_module is not None:
215
+ residual = x
216
+ if self.normalize_before:
217
+ x = self.norm_conv(x)
218
+ x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)
219
+ x = residual + self.dropout(x)
220
+
221
+ if not self.normalize_before:
222
+ x = self.norm_conv(x)
223
+
224
+ # feed forward module
225
+ residual = x
226
+ if self.normalize_before:
227
+ x = self.norm_ff(x)
228
+
229
+ x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
230
+ if not self.normalize_before:
231
+ x = self.norm_ff(x)
232
+
233
+ if self.conv_module is not None:
234
+ x = self.norm_final(x)
235
+
236
+ return x, mask, new_att_cache, new_cnn_cache
speech/cosyvoice/transformer/label_smoothing_loss.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Label smoothing module."""
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+
21
+ class LabelSmoothingLoss(nn.Module):
22
+ """Label-smoothing loss.
23
+
24
+ In a standard CE loss, the label's data distribution is:
25
+ [0,1,2] ->
26
+ [
27
+ [1.0, 0.0, 0.0],
28
+ [0.0, 1.0, 0.0],
29
+ [0.0, 0.0, 1.0],
30
+ ]
31
+
32
+ In the smoothing version CE Loss,some probabilities
33
+ are taken from the true label prob (1.0) and are divided
34
+ among other labels.
35
+
36
+ e.g.
37
+ smoothing=0.1
38
+ [0,1,2] ->
39
+ [
40
+ [0.9, 0.05, 0.05],
41
+ [0.05, 0.9, 0.05],
42
+ [0.05, 0.05, 0.9],
43
+ ]
44
+
45
+ Args:
46
+ size (int): the number of class
47
+ padding_idx (int): padding class id which will be ignored for loss
48
+ smoothing (float): smoothing rate (0.0 means the conventional CE)
49
+ normalize_length (bool):
50
+ normalize loss by sequence length if True
51
+ normalize loss by batch size if False
52
+ """
53
+
54
+ def __init__(self,
55
+ size: int,
56
+ padding_idx: int,
57
+ smoothing: float,
58
+ normalize_length: bool = False):
59
+ """Construct an LabelSmoothingLoss object."""
60
+ super(LabelSmoothingLoss, self).__init__()
61
+ self.criterion = nn.KLDivLoss(reduction="none")
62
+ self.padding_idx = padding_idx
63
+ self.confidence = 1.0 - smoothing
64
+ self.smoothing = smoothing
65
+ self.size = size
66
+ self.normalize_length = normalize_length
67
+
68
+ def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
69
+ """Compute loss between x and target.
70
+
71
+ The model outputs and data labels tensors are flatten to
72
+ (batch*seqlen, class) shape and a mask is applied to the
73
+ padding part which should not be calculated for loss.
74
+
75
+ Args:
76
+ x (torch.Tensor): prediction (batch, seqlen, class)
77
+ target (torch.Tensor):
78
+ target signal masked with self.padding_id (batch, seqlen)
79
+ Returns:
80
+ loss (torch.Tensor) : The KL loss, scalar float value
81
+ """
82
+ assert x.size(2) == self.size
83
+ batch_size = x.size(0)
84
+ x = x.view(-1, self.size)
85
+ target = target.view(-1)
86
+ # use zeros_like instead of torch.no_grad() for true_dist,
87
+ # since no_grad() can not be exported by JIT
88
+ true_dist = torch.zeros_like(x)
89
+ true_dist.fill_(self.smoothing / (self.size - 1))
90
+ ignore = target == self.padding_idx # (B,)
91
+ total = len(target) - ignore.sum().item()
92
+ target = target.masked_fill(ignore, 0) # avoid -1 index
93
+ true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
94
+ kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
95
+ denom = total if self.normalize_length else batch_size
96
+ return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
speech/cosyvoice/transformer/positionwise_feed_forward.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Positionwise feed forward layer definition."""
16
+
17
+ import torch
18
+
19
+
20
+ class PositionwiseFeedForward(torch.nn.Module):
21
+ """Positionwise feed forward layer.
22
+
23
+ FeedForward are appied on each position of the sequence.
24
+ The output dim is same with the input dim.
25
+
26
+ Args:
27
+ idim (int): Input dimenstion.
28
+ hidden_units (int): The number of hidden units.
29
+ dropout_rate (float): Dropout rate.
30
+ activation (torch.nn.Module): Activation function
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ idim: int,
36
+ hidden_units: int,
37
+ dropout_rate: float,
38
+ activation: torch.nn.Module = torch.nn.ReLU(),
39
+ ):
40
+ """Construct a PositionwiseFeedForward object."""
41
+ super(PositionwiseFeedForward, self).__init__()
42
+ self.w_1 = torch.nn.Linear(idim, hidden_units)
43
+ self.activation = activation
44
+ self.dropout = torch.nn.Dropout(dropout_rate)
45
+ self.w_2 = torch.nn.Linear(hidden_units, idim)
46
+
47
+ def forward(self, xs: torch.Tensor) -> torch.Tensor:
48
+ """Forward function.
49
+
50
+ Args:
51
+ xs: input tensor (B, L, D)
52
+ Returns:
53
+ output tensor, (B, L, D)
54
+ """
55
+ return self.w_2(self.dropout(self.activation(self.w_1(xs))))
56
+
57
+
58
+ class MoEFFNLayer(torch.nn.Module):
59
+ """
60
+ Mixture of expert with Positionwise feed forward layer
61
+ See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf
62
+ The output dim is same with the input dim.
63
+
64
+ Modified from https://github.com/Lightning-AI/lit-gpt/pull/823
65
+ https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
66
+ Args:
67
+ n_expert: number of expert.
68
+ n_expert_per_token: The actual number of experts used for each frame
69
+ idim (int): Input dimenstion.
70
+ hidden_units (int): The number of hidden units.
71
+ dropout_rate (float): Dropout rate.
72
+ activation (torch.nn.Module): Activation function
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ n_expert: int,
78
+ n_expert_per_token: int,
79
+ idim: int,
80
+ hidden_units: int,
81
+ dropout_rate: float,
82
+ activation: torch.nn.Module = torch.nn.ReLU(),
83
+ ):
84
+ super(MoEFFNLayer, self).__init__()
85
+ self.gate = torch.nn.Linear(idim, n_expert, bias=False)
86
+ self.experts = torch.nn.ModuleList(
87
+ PositionwiseFeedForward(idim, hidden_units, dropout_rate,
88
+ activation) for _ in range(n_expert))
89
+ self.n_expert_per_token = n_expert_per_token
90
+
91
+ def forward(self, xs: torch.Tensor) -> torch.Tensor:
92
+ """Foward function.
93
+ Args:
94
+ xs: input tensor (B, L, D)
95
+ Returns:
96
+ output tensor, (B, L, D)
97
+
98
+ """
99
+ B, L, D = xs.size(
100
+ ) # batch size, sequence length, embedding dimension (idim)
101
+ xs = xs.view(-1, D) # (B*L, D)
102
+ router = self.gate(xs) # (B*L, n_expert)
103
+ logits, indices = torch.topk(
104
+ router, self.n_expert_per_token
105
+ ) # probs:(B*L, n_expert), indices: (B*L, n_expert)
106
+ weights = torch.nn.functional.softmax(
107
+ logits, dim=1,
108
+ dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token)
109
+ output = torch.zeros_like(xs) # (B*L, D)
110
+ for i, expert in enumerate(self.experts):
111
+ mask = indices == i
112
+ batch_idx, ith_expert = torch.where(mask)
113
+ output[batch_idx] += weights[batch_idx, ith_expert, None] * expert(
114
+ xs[batch_idx])
115
+ return output.view(B, L, D)
speech/cosyvoice/transformer/subsampling.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2024 Alibaba Inc (Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Modified from ESPnet(https://github.com/espnet/espnet)
16
+ """Subsampling layer definition."""
17
+
18
+ from typing import Tuple, Union
19
+
20
+ import torch
21
+
22
+
23
+ class BaseSubsampling(torch.nn.Module):
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ self.right_context = 0
28
+ self.subsampling_rate = 1
29
+
30
+ def position_encoding(self, offset: Union[int, torch.Tensor],
31
+ size: int) -> torch.Tensor:
32
+ return self.pos_enc.position_encoding(offset, size)
33
+
34
+
35
+ class EmbedinigNoSubsampling(BaseSubsampling):
36
+ """Embedding input without subsampling
37
+ """
38
+
39
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
40
+ pos_enc_class: torch.nn.Module):
41
+ super().__init__()
42
+ self.embed = torch.nn.Embedding(idim, odim)
43
+ self.pos_enc = pos_enc_class
44
+
45
+ def forward(
46
+ self,
47
+ x: torch.Tensor,
48
+ x_mask: torch.Tensor,
49
+ offset: Union[int, torch.Tensor] = 0
50
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
51
+ """Input x.
52
+
53
+ Args:
54
+ x (torch.Tensor): Input tensor (#batch, time, idim).
55
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
56
+
57
+ Returns:
58
+ torch.Tensor: linear input tensor (#batch, time', odim),
59
+ where time' = time .
60
+ torch.Tensor: linear input mask (#batch, 1, time'),
61
+ where time' = time .
62
+
63
+ """
64
+ x = self.embed(x)
65
+ x, pos_emb = self.pos_enc(x, offset)
66
+ return x, pos_emb, x_mask
67
+
68
+
69
+ class LinearNoSubsampling(BaseSubsampling):
70
+ """Linear transform the input without subsampling
71
+
72
+ Args:
73
+ idim (int): Input dimension.
74
+ odim (int): Output dimension.
75
+ dropout_rate (float): Dropout rate.
76
+
77
+ """
78
+
79
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
80
+ pos_enc_class: torch.nn.Module):
81
+ """Construct an linear object."""
82
+ super().__init__()
83
+ self.out = torch.nn.Sequential(
84
+ torch.nn.Linear(idim, odim),
85
+ torch.nn.LayerNorm(odim, eps=1e-5),
86
+ torch.nn.Dropout(dropout_rate),
87
+ )
88
+ self.pos_enc = pos_enc_class
89
+ self.right_context = 0
90
+ self.subsampling_rate = 1
91
+
92
+ def forward(
93
+ self,
94
+ x: torch.Tensor,
95
+ x_mask: torch.Tensor,
96
+ offset: Union[int, torch.Tensor] = 0
97
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
98
+ """Input x.
99
+
100
+ Args:
101
+ x (torch.Tensor): Input tensor (#batch, time, idim).
102
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
103
+
104
+ Returns:
105
+ torch.Tensor: linear input tensor (#batch, time', odim),
106
+ where time' = time .
107
+ torch.Tensor: linear input mask (#batch, 1, time'),
108
+ where time' = time .
109
+
110
+ """
111
+ x = self.out(x)
112
+ x, pos_emb = self.pos_enc(x, offset)
113
+ return x, pos_emb, x_mask
114
+
115
+
116
+ class Conv1dSubsampling2(BaseSubsampling):
117
+ """Convolutional 1D subsampling (to 1/2 length).
118
+ It is designed for Whisper, ref:
119
+ https://github.com/openai/whisper/blob/main/whisper/model.py
120
+
121
+ Args:
122
+ idim (int): Input dimension.
123
+ odim (int): Output dimension.
124
+ dropout_rate (float): Dropout rate.
125
+
126
+ """
127
+
128
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
129
+ pos_enc_class: torch.nn.Module):
130
+ """Construct an Conv1dSubsampling2 object."""
131
+ super().__init__()
132
+ self.conv = torch.nn.Sequential(
133
+ torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1),
134
+ torch.nn.GELU(),
135
+ torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1),
136
+ torch.nn.GELU(),
137
+ )
138
+ self.pos_enc = pos_enc_class
139
+ # The right context for every conv layer is computed by:
140
+ # (kernel_size - 1) * frame_rate_of_this_layer
141
+ self.subsampling_rate = 2
142
+ # 4 = (3 - 1) * 1 + (3 - 1) * 1
143
+ self.right_context = 4
144
+
145
+ def forward(
146
+ self,
147
+ x: torch.Tensor,
148
+ x_mask: torch.Tensor,
149
+ offset: Union[int, torch.Tensor] = 0
150
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
151
+ """Subsample x.
152
+
153
+ Args:
154
+ x (torch.Tensor): Input tensor (#batch, time, idim).
155
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
156
+
157
+ Returns:
158
+ torch.Tensor: Subsampled tensor (#batch, time', odim),
159
+ where time' = time // 2.
160
+ torch.Tensor: Subsampled mask (#batch, 1, time'),
161
+ where time' = time // 2.
162
+ torch.Tensor: positional encoding
163
+
164
+ """
165
+ time = x.size(1)
166
+ x = x.transpose(1, 2) # (b, f, t)
167
+ x = self.conv(x)
168
+ x = x.transpose(1, 2) # (b, t, f)
169
+ x, pos_emb = self.pos_enc(x, offset)
170
+ return x, pos_emb, x_mask[:, :, (time + 1) % 2::2]
171
+
172
+
173
+ class Conv2dSubsampling4(BaseSubsampling):
174
+ """Convolutional 2D subsampling (to 1/4 length).
175
+
176
+ Args:
177
+ idim (int): Input dimension.
178
+ odim (int): Output dimension.
179
+ dropout_rate (float): Dropout rate.
180
+
181
+ """
182
+
183
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
184
+ pos_enc_class: torch.nn.Module):
185
+ """Construct an Conv2dSubsampling4 object."""
186
+ super().__init__()
187
+ self.conv = torch.nn.Sequential(
188
+ torch.nn.Conv2d(1, odim, 3, 2),
189
+ torch.nn.ReLU(),
190
+ torch.nn.Conv2d(odim, odim, 3, 2),
191
+ torch.nn.ReLU(),
192
+ )
193
+ self.out = torch.nn.Sequential(
194
+ torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
195
+ self.pos_enc = pos_enc_class
196
+ # The right context for every conv layer is computed by:
197
+ # (kernel_size - 1) * frame_rate_of_this_layer
198
+ self.subsampling_rate = 4
199
+ # 6 = (3 - 1) * 1 + (3 - 1) * 2
200
+ self.right_context = 6
201
+
202
+ def forward(
203
+ self,
204
+ x: torch.Tensor,
205
+ x_mask: torch.Tensor,
206
+ offset: Union[int, torch.Tensor] = 0
207
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
208
+ """Subsample x.
209
+
210
+ Args:
211
+ x (torch.Tensor): Input tensor (#batch, time, idim).
212
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
213
+
214
+ Returns:
215
+ torch.Tensor: Subsampled tensor (#batch, time', odim),
216
+ where time' = time // 4.
217
+ torch.Tensor: Subsampled mask (#batch, 1, time'),
218
+ where time' = time // 4.
219
+ torch.Tensor: positional encoding
220
+
221
+ """
222
+ x = x.unsqueeze(1) # (b, c=1, t, f)
223
+ x = self.conv(x)
224
+ b, c, t, f = x.size()
225
+ x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
226
+ x, pos_emb = self.pos_enc(x, offset)
227
+ return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
228
+
229
+
230
+ class Conv2dSubsampling6(BaseSubsampling):
231
+ """Convolutional 2D subsampling (to 1/6 length).
232
+ Args:
233
+ idim (int): Input dimension.
234
+ odim (int): Output dimension.
235
+ dropout_rate (float): Dropout rate.
236
+ pos_enc (torch.nn.Module): Custom position encoding layer.
237
+ """
238
+
239
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
240
+ pos_enc_class: torch.nn.Module):
241
+ """Construct an Conv2dSubsampling6 object."""
242
+ super().__init__()
243
+ self.conv = torch.nn.Sequential(
244
+ torch.nn.Conv2d(1, odim, 3, 2),
245
+ torch.nn.ReLU(),
246
+ torch.nn.Conv2d(odim, odim, 5, 3),
247
+ torch.nn.ReLU(),
248
+ )
249
+ self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
250
+ odim)
251
+ self.pos_enc = pos_enc_class
252
+ # 10 = (3 - 1) * 1 + (5 - 1) * 2
253
+ self.subsampling_rate = 6
254
+ self.right_context = 10
255
+
256
+ def forward(
257
+ self,
258
+ x: torch.Tensor,
259
+ x_mask: torch.Tensor,
260
+ offset: Union[int, torch.Tensor] = 0
261
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
262
+ """Subsample x.
263
+ Args:
264
+ x (torch.Tensor): Input tensor (#batch, time, idim).
265
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
266
+
267
+ Returns:
268
+ torch.Tensor: Subsampled tensor (#batch, time', odim),
269
+ where time' = time // 6.
270
+ torch.Tensor: Subsampled mask (#batch, 1, time'),
271
+ where time' = time // 6.
272
+ torch.Tensor: positional encoding
273
+ """
274
+ x = x.unsqueeze(1) # (b, c, t, f)
275
+ x = self.conv(x)
276
+ b, c, t, f = x.size()
277
+ x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
278
+ x, pos_emb = self.pos_enc(x, offset)
279
+ return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
280
+
281
+
282
+ class Conv2dSubsampling8(BaseSubsampling):
283
+ """Convolutional 2D subsampling (to 1/8 length).
284
+
285
+ Args:
286
+ idim (int): Input dimension.
287
+ odim (int): Output dimension.
288
+ dropout_rate (float): Dropout rate.
289
+
290
+ """
291
+
292
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
293
+ pos_enc_class: torch.nn.Module):
294
+ """Construct an Conv2dSubsampling8 object."""
295
+ super().__init__()
296
+ self.conv = torch.nn.Sequential(
297
+ torch.nn.Conv2d(1, odim, 3, 2),
298
+ torch.nn.ReLU(),
299
+ torch.nn.Conv2d(odim, odim, 3, 2),
300
+ torch.nn.ReLU(),
301
+ torch.nn.Conv2d(odim, odim, 3, 2),
302
+ torch.nn.ReLU(),
303
+ )
304
+ self.linear = torch.nn.Linear(
305
+ odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
306
+ self.pos_enc = pos_enc_class
307
+ self.subsampling_rate = 8
308
+ # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
309
+ self.right_context = 14
310
+
311
+ def forward(
312
+ self,
313
+ x: torch.Tensor,
314
+ x_mask: torch.Tensor,
315
+ offset: Union[int, torch.Tensor] = 0
316
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
317
+ """Subsample x.
318
+
319
+ Args:
320
+ x (torch.Tensor): Input tensor (#batch, time, idim).
321
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
322
+
323
+ Returns:
324
+ torch.Tensor: Subsampled tensor (#batch, time', odim),
325
+ where time' = time // 8.
326
+ torch.Tensor: Subsampled mask (#batch, 1, time'),
327
+ where time' = time // 8.
328
+ torch.Tensor: positional encoding
329
+ """
330
+ x = x.unsqueeze(1) # (b, c, t, f)
331
+ x = self.conv(x)
332
+ b, c, t, f = x.size()
333
+ x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
334
+ x, pos_emb = self.pos_enc(x, offset)
335
+ return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
336
+
337
+
338
+ class LegacyLinearNoSubsampling(BaseSubsampling):
339
+ """Linear transform the input without subsampling
340
+
341
+ Args:
342
+ idim (int): Input dimension.
343
+ odim (int): Output dimension.
344
+ dropout_rate (float): Dropout rate.
345
+
346
+ """
347
+
348
+ def __init__(self, idim: int, odim: int, dropout_rate: float,
349
+ pos_enc_class: torch.nn.Module):
350
+ """Construct an linear object."""
351
+ super().__init__()
352
+ self.out = torch.nn.Sequential(
353
+ torch.nn.Linear(idim, odim),
354
+ torch.nn.LayerNorm(odim, eps=1e-5),
355
+ torch.nn.Dropout(dropout_rate),
356
+ torch.nn.ReLU(),
357
+ )
358
+ self.pos_enc = pos_enc_class
359
+ self.right_context = 0
360
+ self.subsampling_rate = 1
361
+
362
+ def forward(
363
+ self,
364
+ x: torch.Tensor,
365
+ x_mask: torch.Tensor,
366
+ offset: Union[int, torch.Tensor] = 0
367
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
368
+ """Input x.
369
+
370
+ Args:
371
+ x (torch.Tensor): Input tensor (#batch, time, idim).
372
+ x_mask (torch.Tensor): Input mask (#batch, 1, time).
373
+
374
+ Returns:
375
+ torch.Tensor: linear input tensor (#batch, time', odim),
376
+ where time' = time .
377
+ torch.Tensor: linear input mask (#batch, 1, time'),
378
+ where time' = time .
379
+
380
+ """
381
+ x = self.out(x)
382
+ x, pos_emb = self.pos_enc(x, offset)
383
+ return x, pos_emb, x_mask
speech/cosyvoice/transformer/upsample_encoder.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
2
+ # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
3
+ # 2024 Alibaba Inc (Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ """Encoder definition."""
18
+ from typing import Tuple
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import functional as F
23
+
24
+ from cosyvoice.transformer.convolution import ConvolutionModule
25
+ from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
26
+ from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
27
+ from cosyvoice.utils.class_utils import (
28
+ COSYVOICE_EMB_CLASSES,
29
+ COSYVOICE_SUBSAMPLE_CLASSES,
30
+ COSYVOICE_ATTENTION_CLASSES,
31
+ COSYVOICE_ACTIVATION_CLASSES,
32
+ )
33
+ from cosyvoice.utils.mask import make_pad_mask
34
+ from cosyvoice.utils.mask import add_optional_chunk_mask
35
+
36
+
37
+ class Upsample1D(nn.Module):
38
+ """A 1D upsampling layer with an optional convolution.
39
+
40
+ Parameters:
41
+ channels (`int`):
42
+ number of channels in the inputs and outputs.
43
+ use_conv (`bool`, default `False`):
44
+ option to use a convolution.
45
+ use_conv_transpose (`bool`, default `False`):
46
+ option to use a convolution transpose.
47
+ out_channels (`int`, optional):
48
+ number of output channels. Defaults to `channels`.
49
+ """
50
+
51
+ def __init__(self, channels: int, out_channels: int, stride: int = 2):
52
+ super().__init__()
53
+ self.channels = channels
54
+ self.out_channels = out_channels
55
+ self.stride = stride
56
+ # In this mode, first repeat interpolate, than conv with stride=1
57
+ self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0)
58
+
59
+ def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
60
+ outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest")
61
+ outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0)
62
+ outputs = self.conv(outputs)
63
+ return outputs, input_lengths * self.stride
64
+
65
+
66
+ class PreLookaheadLayer(nn.Module):
67
+ def __init__(self, channels: int, pre_lookahead_len: int = 1):
68
+ super().__init__()
69
+ self.channels = channels
70
+ self.pre_lookahead_len = pre_lookahead_len
71
+ self.conv1 = nn.Conv1d(
72
+ channels, channels,
73
+ kernel_size=pre_lookahead_len + 1,
74
+ stride=1, padding=0,
75
+ )
76
+ self.conv2 = nn.Conv1d(
77
+ channels, channels,
78
+ kernel_size=3, stride=1, padding=0,
79
+ )
80
+
81
+ def forward(self, inputs: torch.Tensor, context: torch.Tensor = torch.zeros(0, 0, 0)) -> torch.Tensor:
82
+ """
83
+ inputs: (batch_size, seq_len, channels)
84
+ """
85
+ outputs = inputs.transpose(1, 2).contiguous()
86
+ context = context.transpose(1, 2).contiguous()
87
+ # look ahead
88
+ if context.size(2) == 0:
89
+ outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0)
90
+ else:
91
+ assert self.training is False, 'you have passed context, make sure that you are running inference mode'
92
+ assert context.size(2) == self.pre_lookahead_len
93
+ outputs = F.pad(torch.concat([outputs, context], dim=2), (0, self.pre_lookahead_len - context.size(2)), mode='constant', value=0.0)
94
+ outputs = F.leaky_relu(self.conv1(outputs))
95
+ # outputs
96
+ outputs = F.pad(outputs, (self.conv2.kernel_size[0] - 1, 0), mode='constant', value=0.0)
97
+ outputs = self.conv2(outputs)
98
+ outputs = outputs.transpose(1, 2).contiguous()
99
+
100
+ # residual connection
101
+ outputs = outputs + inputs
102
+ return outputs
103
+
104
+
105
+ class UpsampleConformerEncoder(torch.nn.Module):
106
+
107
+ def __init__(
108
+ self,
109
+ input_size: int,
110
+ output_size: int = 256,
111
+ attention_heads: int = 4,
112
+ linear_units: int = 2048,
113
+ num_blocks: int = 6,
114
+ dropout_rate: float = 0.1,
115
+ positional_dropout_rate: float = 0.1,
116
+ attention_dropout_rate: float = 0.0,
117
+ input_layer: str = "conv2d",
118
+ pos_enc_layer_type: str = "rel_pos",
119
+ normalize_before: bool = True,
120
+ static_chunk_size: int = 0,
121
+ use_dynamic_chunk: bool = False,
122
+ global_cmvn: torch.nn.Module = None,
123
+ use_dynamic_left_chunk: bool = False,
124
+ positionwise_conv_kernel_size: int = 1,
125
+ macaron_style: bool = True,
126
+ selfattention_layer_type: str = "rel_selfattn",
127
+ activation_type: str = "swish",
128
+ use_cnn_module: bool = True,
129
+ cnn_module_kernel: int = 15,
130
+ causal: bool = False,
131
+ cnn_module_norm: str = "batch_norm",
132
+ key_bias: bool = True,
133
+ gradient_checkpointing: bool = False,
134
+ ):
135
+ """
136
+ Args:
137
+ input_size (int): input dim
138
+ output_size (int): dimension of attention
139
+ attention_heads (int): the number of heads of multi head attention
140
+ linear_units (int): the hidden units number of position-wise feed
141
+ forward
142
+ num_blocks (int): the number of decoder blocks
143
+ dropout_rate (float): dropout rate
144
+ attention_dropout_rate (float): dropout rate in attention
145
+ positional_dropout_rate (float): dropout rate after adding
146
+ positional encoding
147
+ input_layer (str): input layer type.
148
+ optional [linear, conv2d, conv2d6, conv2d8]
149
+ pos_enc_layer_type (str): Encoder positional encoding layer type.
150
+ opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
151
+ normalize_before (bool):
152
+ True: use layer_norm before each sub-block of a layer.
153
+ False: use layer_norm after each sub-block of a layer.
154
+ static_chunk_size (int): chunk size for static chunk training and
155
+ decoding
156
+ use_dynamic_chunk (bool): whether use dynamic chunk size for
157
+ training or not, You can only use fixed chunk(chunk_size > 0)
158
+ or dyanmic chunk size(use_dynamic_chunk = True)
159
+ global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
160
+ use_dynamic_left_chunk (bool): whether use dynamic left chunk in
161
+ dynamic chunk training
162
+ key_bias: whether use bias in attention.linear_k, False for whisper models.
163
+ gradient_checkpointing: rerunning a forward-pass segment for each
164
+ checkpointed segment during backward.
165
+ """
166
+ super().__init__()
167
+ self._output_size = output_size
168
+
169
+ self.global_cmvn = global_cmvn
170
+ self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
171
+ input_size,
172
+ output_size,
173
+ dropout_rate,
174
+ COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
175
+ positional_dropout_rate),
176
+ )
177
+
178
+ self.normalize_before = normalize_before
179
+ self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
180
+ self.static_chunk_size = static_chunk_size
181
+ self.use_dynamic_chunk = use_dynamic_chunk
182
+ self.use_dynamic_left_chunk = use_dynamic_left_chunk
183
+ self.gradient_checkpointing = gradient_checkpointing
184
+ activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
185
+ # self-attention module definition
186
+ encoder_selfattn_layer_args = (
187
+ attention_heads,
188
+ output_size,
189
+ attention_dropout_rate,
190
+ key_bias,
191
+ )
192
+ # feed-forward module definition
193
+ positionwise_layer_args = (
194
+ output_size,
195
+ linear_units,
196
+ dropout_rate,
197
+ activation,
198
+ )
199
+ # convolution module definition
200
+ convolution_layer_args = (output_size, cnn_module_kernel, activation,
201
+ cnn_module_norm, causal)
202
+ self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3)
203
+ self.encoders = torch.nn.ModuleList([
204
+ ConformerEncoderLayer(
205
+ output_size,
206
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
207
+ *encoder_selfattn_layer_args),
208
+ PositionwiseFeedForward(*positionwise_layer_args),
209
+ PositionwiseFeedForward(
210
+ *positionwise_layer_args) if macaron_style else None,
211
+ ConvolutionModule(
212
+ *convolution_layer_args) if use_cnn_module else None,
213
+ dropout_rate,
214
+ normalize_before,
215
+ ) for _ in range(num_blocks)
216
+ ])
217
+ self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2)
218
+ self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
219
+ input_size,
220
+ output_size,
221
+ dropout_rate,
222
+ COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
223
+ positional_dropout_rate),
224
+ )
225
+ self.up_encoders = torch.nn.ModuleList([
226
+ ConformerEncoderLayer(
227
+ output_size,
228
+ COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
229
+ *encoder_selfattn_layer_args),
230
+ PositionwiseFeedForward(*positionwise_layer_args),
231
+ PositionwiseFeedForward(
232
+ *positionwise_layer_args) if macaron_style else None,
233
+ ConvolutionModule(
234
+ *convolution_layer_args) if use_cnn_module else None,
235
+ dropout_rate,
236
+ normalize_before,
237
+ ) for _ in range(4)
238
+ ])
239
+
240
+ def output_size(self) -> int:
241
+ return self._output_size
242
+
243
+ def forward(
244
+ self,
245
+ xs: torch.Tensor,
246
+ xs_lens: torch.Tensor,
247
+ context: torch.Tensor = torch.zeros(0, 0, 0),
248
+ decoding_chunk_size: int = 0,
249
+ num_decoding_left_chunks: int = -1,
250
+ streaming: bool = False,
251
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
252
+ """Embed positions in tensor.
253
+
254
+ Args:
255
+ xs: padded input tensor (B, T, D)
256
+ xs_lens: input length (B)
257
+ decoding_chunk_size: decoding chunk size for dynamic chunk
258
+ 0: default for training, use random dynamic chunk.
259
+ <0: for decoding, use full chunk.
260
+ >0: for decoding, use fixed chunk size as set.
261
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
262
+ the chunk size is decoding_chunk_size.
263
+ >=0: use num_decoding_left_chunks
264
+ <0: use all left chunks
265
+ Returns:
266
+ encoder output tensor xs, and subsampled masks
267
+ xs: padded output tensor (B, T' ~= T/subsample_rate, D)
268
+ masks: torch.Tensor batch padding mask after subsample
269
+ (B, 1, T' ~= T/subsample_rate)
270
+ NOTE(xcsong):
271
+ We pass the `__call__` method of the modules instead of `forward` to the
272
+ checkpointing API because `__call__` attaches all the hooks of the module.
273
+ https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
274
+ """
275
+ T = xs.size(1)
276
+ masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
277
+ if self.global_cmvn is not None:
278
+ xs = self.global_cmvn(xs)
279
+ xs, pos_emb, masks = self.embed(xs, masks)
280
+ if context.size(1) != 0:
281
+ assert self.training is False, 'you have passed context, make sure that you are running inference mode'
282
+ context_masks = torch.ones(1, 1, context.size(1)).to(masks)
283
+ context, _, _ = self.embed(context, context_masks, offset=xs.size(1))
284
+ mask_pad = masks # (B, 1, T/subsample_rate)
285
+ chunk_masks = add_optional_chunk_mask(xs, masks, False, False, 0, self.static_chunk_size if streaming is True else 0, -1)
286
+ # lookahead + conformer encoder
287
+ xs = self.pre_lookahead_layer(xs, context=context)
288
+ xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
289
+
290
+ # upsample + conformer encoder
291
+ xs = xs.transpose(1, 2).contiguous()
292
+ xs, xs_lens = self.up_layer(xs, xs_lens)
293
+ xs = xs.transpose(1, 2).contiguous()
294
+ T = xs.size(1)
295
+ masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
296
+ xs, pos_emb, masks = self.up_embed(xs, masks)
297
+ mask_pad = masks # (B, 1, T/subsample_rate)
298
+ chunk_masks = add_optional_chunk_mask(xs, masks, False, False, 0, self.static_chunk_size * self.up_layer.stride if streaming is True else 0, -1)
299
+ xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad)
300
+
301
+ if self.normalize_before:
302
+ xs = self.after_norm(xs)
303
+ # Here we assume the mask is not changed in encoder layers, so just
304
+ # return the masks before encoder layers, and the masks will be used
305
+ # for cross attention with decoder later
306
+ return xs, masks
307
+
308
+ def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
309
+ pos_emb: torch.Tensor,
310
+ mask_pad: torch.Tensor) -> torch.Tensor:
311
+ for layer in self.encoders:
312
+ xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
313
+ return xs
314
+
315
+ def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
316
+ pos_emb: torch.Tensor,
317
+ mask_pad: torch.Tensor) -> torch.Tensor:
318
+ for layer in self.up_encoders:
319
+ xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
320
+ return xs
speech/cosyvoice/utils/__init__.py ADDED
File without changes
speech/cosyvoice/utils/class_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright [2023-11-28] <sxc19@mails.tsinghua.edu.cn, Xingchen Song>
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import torch
16
+
17
+ from cosyvoice.transformer.activation import Swish
18
+ from cosyvoice.transformer.subsampling import (
19
+ LinearNoSubsampling,
20
+ EmbedinigNoSubsampling,
21
+ Conv1dSubsampling2,
22
+ Conv2dSubsampling4,
23
+ Conv2dSubsampling6,
24
+ Conv2dSubsampling8,
25
+ )
26
+ from cosyvoice.transformer.embedding import (PositionalEncoding,
27
+ RelPositionalEncoding,
28
+ WhisperPositionalEncoding,
29
+ LearnablePositionalEncoding,
30
+ NoPositionalEncoding)
31
+ from cosyvoice.transformer.attention import (MultiHeadedAttention,
32
+ RelPositionMultiHeadedAttention)
33
+ from cosyvoice.transformer.embedding import EspnetRelPositionalEncoding
34
+ from cosyvoice.transformer.subsampling import LegacyLinearNoSubsampling
35
+ from cosyvoice.llm.llm import TransformerLM, Qwen2LM
36
+ from cosyvoice.flow.flow import MaskedDiffWithXvec, CausalMaskedDiffWithXvec
37
+ from cosyvoice.hifigan.generator import HiFTGenerator
38
+ from cosyvoice.cli.model import CosyVoiceModel, CosyVoice2Model
39
+
40
+
41
+ COSYVOICE_ACTIVATION_CLASSES = {
42
+ "hardtanh": torch.nn.Hardtanh,
43
+ "tanh": torch.nn.Tanh,
44
+ "relu": torch.nn.ReLU,
45
+ "selu": torch.nn.SELU,
46
+ "swish": getattr(torch.nn, "SiLU", Swish),
47
+ "gelu": torch.nn.GELU,
48
+ }
49
+
50
+ COSYVOICE_SUBSAMPLE_CLASSES = {
51
+ "linear": LinearNoSubsampling,
52
+ "linear_legacy": LegacyLinearNoSubsampling,
53
+ "embed": EmbedinigNoSubsampling,
54
+ "conv1d2": Conv1dSubsampling2,
55
+ "conv2d": Conv2dSubsampling4,
56
+ "conv2d6": Conv2dSubsampling6,
57
+ "conv2d8": Conv2dSubsampling8,
58
+ 'paraformer_dummy': torch.nn.Identity
59
+ }
60
+
61
+ COSYVOICE_EMB_CLASSES = {
62
+ "embed": PositionalEncoding,
63
+ "abs_pos": PositionalEncoding,
64
+ "rel_pos": RelPositionalEncoding,
65
+ "rel_pos_espnet": EspnetRelPositionalEncoding,
66
+ "no_pos": NoPositionalEncoding,
67
+ "abs_pos_whisper": WhisperPositionalEncoding,
68
+ "embed_learnable_pe": LearnablePositionalEncoding,
69
+ }
70
+
71
+ COSYVOICE_ATTENTION_CLASSES = {
72
+ "selfattn": MultiHeadedAttention,
73
+ "rel_selfattn": RelPositionMultiHeadedAttention,
74
+ }
75
+
76
+
77
+ def get_model_type(configs):
78
+ # NOTE CosyVoice2Model inherits CosyVoiceModel
79
+ if isinstance(configs['llm'], TransformerLM) and isinstance(configs['flow'], MaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
80
+ return CosyVoiceModel
81
+ if isinstance(configs['llm'], Qwen2LM) and isinstance(configs['flow'], CausalMaskedDiffWithXvec) and isinstance(configs['hift'], HiFTGenerator):
82
+ return CosyVoice2Model
83
+ raise TypeError('No valid model type found!')
speech/cosyvoice/utils/common.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ # 2025 Alibaba Inc (authors: Xiang Lyu, Bofan Zhou)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ """Unility functions for Transformer."""
18
+
19
+ import queue
20
+ import random
21
+ from typing import List
22
+
23
+ import numpy as np
24
+ import torch
25
+
26
+ IGNORE_ID = -1
27
+
28
+
29
+ def pad_list(xs: List[torch.Tensor], pad_value: int):
30
+ """Perform padding for the list of tensors.
31
+
32
+ Args:
33
+ xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
34
+ pad_value (float): Value for padding.
35
+
36
+ Returns:
37
+ Tensor: Padded tensor (B, Tmax, `*`).
38
+
39
+ Examples:
40
+ >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
41
+ >>> x
42
+ [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
43
+ >>> pad_list(x, 0)
44
+ tensor([[1., 1., 1., 1.],
45
+ [1., 1., 0., 0.],
46
+ [1., 0., 0., 0.]])
47
+
48
+ """
49
+ max_len = max([len(item) for item in xs])
50
+ batchs = len(xs)
51
+ ndim = xs[0].ndim
52
+ if ndim == 1:
53
+ pad_res = torch.zeros(batchs,
54
+ max_len,
55
+ dtype=xs[0].dtype,
56
+ device=xs[0].device)
57
+ elif ndim == 2:
58
+ pad_res = torch.zeros(batchs,
59
+ max_len,
60
+ xs[0].shape[1],
61
+ dtype=xs[0].dtype,
62
+ device=xs[0].device)
63
+ elif ndim == 3:
64
+ pad_res = torch.zeros(batchs,
65
+ max_len,
66
+ xs[0].shape[1],
67
+ xs[0].shape[2],
68
+ dtype=xs[0].dtype,
69
+ device=xs[0].device)
70
+ else:
71
+ raise ValueError(f"Unsupported ndim: {ndim}")
72
+ pad_res.fill_(pad_value)
73
+ for i in range(batchs):
74
+ pad_res[i, :len(xs[i])] = xs[i]
75
+ return pad_res
76
+
77
+
78
+ def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor,
79
+ ignore_label: int) -> torch.Tensor:
80
+ """Calculate accuracy.
81
+
82
+ Args:
83
+ pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
84
+ pad_targets (LongTensor): Target label tensors (B, Lmax).
85
+ ignore_label (int): Ignore label id.
86
+
87
+ Returns:
88
+ torch.Tensor: Accuracy value (0.0 - 1.0).
89
+
90
+ """
91
+ pad_pred = pad_outputs.view(pad_targets.size(0), pad_targets.size(1),
92
+ pad_outputs.size(1)).argmax(2)
93
+ mask = pad_targets != ignore_label
94
+ numerator = torch.sum(
95
+ pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
96
+ denominator = torch.sum(mask)
97
+ return (numerator / denominator).detach()
98
+
99
+
100
+ def get_padding(kernel_size, dilation=1):
101
+ return int((kernel_size * dilation - dilation) / 2)
102
+
103
+
104
+ def init_weights(m, mean=0.0, std=0.01):
105
+ classname = m.__class__.__name__
106
+ if classname.find("Conv") != -1:
107
+ m.weight.data.normal_(mean, std)
108
+
109
+
110
+ # Repetition Aware Sampling in VALL-E 2
111
+ def ras_sampling(weighted_scores, decoded_tokens, sampling, top_p=0.8, top_k=25, win_size=10, tau_r=0.1):
112
+ top_ids = nucleus_sampling(weighted_scores, top_p=top_p, top_k=top_k)
113
+ rep_num = (torch.tensor(decoded_tokens[-win_size:]).to(weighted_scores.device) == top_ids).sum().item()
114
+ if rep_num >= win_size * tau_r:
115
+ top_ids = random_sampling(weighted_scores, decoded_tokens, sampling)
116
+ return top_ids
117
+
118
+
119
+ def nucleus_sampling(weighted_scores, top_p=0.8, top_k=25):
120
+ prob, indices = [], []
121
+ cum_prob = 0.0
122
+ sorted_value, sorted_idx = weighted_scores.softmax(dim=0).sort(descending=True, stable=True)
123
+ for i in range(len(sorted_idx)):
124
+ # sampling both top-p and numbers.
125
+ if cum_prob < top_p and len(prob) < top_k:
126
+ cum_prob += sorted_value[i]
127
+ prob.append(sorted_value[i])
128
+ indices.append(sorted_idx[i])
129
+ else:
130
+ break
131
+ prob = torch.tensor(prob).to(weighted_scores)
132
+ indices = torch.tensor(indices, dtype=torch.long).to(weighted_scores.device)
133
+ top_ids = indices[prob.multinomial(1, replacement=True)]
134
+ return top_ids
135
+
136
+
137
+ def random_sampling(weighted_scores, decoded_tokens, sampling):
138
+ top_ids = weighted_scores.softmax(dim=0).multinomial(1, replacement=True)
139
+ return top_ids
140
+
141
+
142
+ def fade_in_out(fade_in_mel, fade_out_mel, window):
143
+ device = fade_in_mel.device
144
+ fade_in_mel, fade_out_mel = fade_in_mel.cpu(), fade_out_mel.cpu()
145
+ mel_overlap_len = int(window.shape[0] / 2)
146
+ if fade_in_mel.device == torch.device('cpu'):
147
+ fade_in_mel = fade_in_mel.clone()
148
+ fade_in_mel[..., :mel_overlap_len] = fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
149
+ fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
150
+ return fade_in_mel.to(device)
151
+
152
+
153
+ def set_all_random_seed(seed):
154
+ random.seed(seed)
155
+ np.random.seed(seed)
156
+ torch.manual_seed(seed)
157
+ torch.cuda.manual_seed_all(seed)
158
+
159
+
160
+ def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
161
+ assert mask.dtype == torch.bool
162
+ assert dtype in [torch.float32, torch.bfloat16, torch.float16]
163
+ mask = mask.to(dtype)
164
+ # attention mask bias
165
+ # NOTE(Mddct): torch.finfo jit issues
166
+ # chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
167
+ mask = (1.0 - mask) * -1.0e+10
168
+ return mask
169
+
170
+
171
+ class TrtContextWrapper:
172
+ def __init__(self, trt_engine, trt_concurrent=1, device='cuda:0'):
173
+ self.trt_context_pool = queue.Queue(maxsize=trt_concurrent)
174
+ self.trt_engine = trt_engine
175
+ for _ in range(trt_concurrent):
176
+ trt_context = trt_engine.create_execution_context()
177
+ trt_stream = torch.cuda.stream(torch.cuda.Stream(device))
178
+ assert trt_context is not None, 'failed to create trt context, maybe not enough CUDA memory, try reduce current trt concurrent {}'.format(trt_concurrent)
179
+ self.trt_context_pool.put([trt_context, trt_stream])
180
+ assert self.trt_context_pool.empty() is False, 'no avaialbe estimator context'
181
+
182
+ def acquire_estimator(self):
183
+ return self.trt_context_pool.get(), self.trt_engine
184
+
185
+ def release_estimator(self, context, stream):
186
+ self.trt_context_pool.put([context, stream])
speech/cosyvoice/utils/executor.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from contextlib import nullcontext
18
+ import os
19
+
20
+ import torch
21
+ import torch.distributed as dist
22
+
23
+ from cosyvoice.utils.train_utils import update_parameter_and_lr, log_per_step, log_per_save, batch_forward, batch_backward, save_model, cosyvoice_join
24
+
25
+
26
+ class Executor:
27
+
28
+ def __init__(self, gan: bool = False, ref_model: torch.nn.Module = None, dpo_loss: torch.nn.Module = None):
29
+ self.gan = gan
30
+ self.ref_model = ref_model
31
+ self.dpo_loss = dpo_loss
32
+ self.step = 0
33
+ self.epoch = 0
34
+ self.rank = int(os.environ.get('RANK', 0))
35
+ self.device = torch.device('cuda:{}'.format(self.rank))
36
+
37
+ def train_one_epoc(self, model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, scaler, group_join, ref_model=None):
38
+ ''' Train one epoch
39
+ '''
40
+
41
+ lr = optimizer.param_groups[0]['lr']
42
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
43
+ logging.info('using accumulate grad, new batch size is {} times'
44
+ ' larger than before'.format(info_dict['accum_grad']))
45
+ # A context manager to be used in conjunction with an instance of
46
+ # torch.nn.parallel.DistributedDataParallel to be able to train
47
+ # with uneven inputs across participating processes.
48
+ model.train()
49
+ if self.ref_model is not None:
50
+ self.ref_model.eval()
51
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
52
+ with model_context():
53
+ for batch_idx, batch_dict in enumerate(train_data_loader):
54
+ info_dict["tag"] = "TRAIN"
55
+ info_dict["step"] = self.step
56
+ info_dict["epoch"] = self.epoch
57
+ info_dict["batch_idx"] = batch_idx
58
+ if cosyvoice_join(group_join, info_dict):
59
+ break
60
+
61
+ # Disable gradient synchronizations across DDP processes.
62
+ # Within this context, gradients will be accumulated on module
63
+ # variables, which will later be synchronized.
64
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
65
+ context = model.no_sync
66
+ # Used for single gpu training and DDP gradient synchronization
67
+ # processes.
68
+ else:
69
+ context = nullcontext
70
+
71
+ with context():
72
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict, ref_model=self.ref_model, dpo_loss=self.dpo_loss)
73
+ info_dict = batch_backward(model, scaler, info_dict)
74
+
75
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
76
+ log_per_step(writer, info_dict)
77
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
78
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
79
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
80
+ dist.barrier()
81
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
82
+ model.train()
83
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
84
+ self.step += 1
85
+ dist.barrier()
86
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
87
+
88
+ def train_one_epoc_gan(self, model, optimizer, scheduler, optimizer_d, scheduler_d, train_data_loader, cv_data_loader,
89
+ writer, info_dict, scaler, group_join):
90
+ ''' Train one epoch
91
+ '''
92
+
93
+ lr = optimizer.param_groups[0]['lr']
94
+ logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
95
+ logging.info('using accumulate grad, new batch size is {} times'
96
+ ' larger than before'.format(info_dict['accum_grad']))
97
+ # A context manager to be used in conjunction with an instance of
98
+ # torch.nn.parallel.DistributedDataParallel to be able to train
99
+ # with uneven inputs across participating processes.
100
+ model.train()
101
+ model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
102
+ with model_context():
103
+ for batch_idx, batch_dict in enumerate(train_data_loader):
104
+ info_dict["tag"] = "TRAIN"
105
+ info_dict["step"] = self.step
106
+ info_dict["epoch"] = self.epoch
107
+ info_dict["batch_idx"] = batch_idx
108
+ if cosyvoice_join(group_join, info_dict):
109
+ break
110
+
111
+ # Disable gradient synchronizations across DDP processes.
112
+ # Within this context, gradients will be accumulated on module
113
+ # variables, which will later be synchronized.
114
+ if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
115
+ context = model.no_sync
116
+ # Used for single gpu training and DDP gradient synchronization
117
+ # processes.
118
+ else:
119
+ context = nullcontext
120
+
121
+ with context():
122
+ batch_dict['turn'] = 'discriminator'
123
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
124
+ info_dict = batch_backward(model, scaler, info_dict)
125
+ info_dict = update_parameter_and_lr(model, optimizer_d, scheduler_d, scaler, info_dict)
126
+ optimizer.zero_grad()
127
+ log_per_step(writer, info_dict)
128
+ with context():
129
+ batch_dict['turn'] = 'generator'
130
+ info_dict = batch_forward(model, batch_dict, scaler, info_dict)
131
+ info_dict = batch_backward(model, scaler, info_dict)
132
+ info_dict = update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict)
133
+ optimizer_d.zero_grad()
134
+ log_per_step(writer, info_dict)
135
+ # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
136
+ if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and \
137
+ (batch_idx + 1) % info_dict["accum_grad"] == 0:
138
+ dist.barrier()
139
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
140
+ model.train()
141
+ if (batch_idx + 1) % info_dict["accum_grad"] == 0:
142
+ self.step += 1
143
+ dist.barrier()
144
+ self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
145
+
146
+ @torch.inference_mode()
147
+ def cv(self, model, cv_data_loader, writer, info_dict, on_batch_end=True):
148
+ ''' Cross validation on
149
+ '''
150
+ logging.info('Epoch {} Step {} on_batch_end {} CV rank {}'.format(self.epoch, self.step + 1, on_batch_end, self.rank))
151
+ model.eval()
152
+ total_num_utts, total_loss_dict = 0, {} # avoid division by 0
153
+ for batch_idx, batch_dict in enumerate(cv_data_loader):
154
+ info_dict["tag"] = "CV"
155
+ info_dict["step"] = self.step
156
+ info_dict["epoch"] = self.epoch
157
+ info_dict["batch_idx"] = batch_idx
158
+
159
+ num_utts = len(batch_dict["utts"])
160
+ total_num_utts += num_utts
161
+
162
+ if self.gan is True:
163
+ batch_dict['turn'] = 'generator'
164
+ info_dict = batch_forward(model, batch_dict, None, info_dict)
165
+
166
+ for k, v in info_dict['loss_dict'].items():
167
+ if k not in total_loss_dict:
168
+ total_loss_dict[k] = []
169
+ total_loss_dict[k].append(v.item() * num_utts)
170
+ log_per_step(None, info_dict)
171
+ for k, v in total_loss_dict.items():
172
+ total_loss_dict[k] = sum(v) / total_num_utts
173
+ info_dict['loss_dict'] = total_loss_dict
174
+ log_per_save(writer, info_dict)
175
+ model_name = 'epoch_{}_whole'.format(self.epoch) if on_batch_end else 'epoch_{}_step_{}'.format(self.epoch, self.step + 1)
176
+ save_model(model, model_name, info_dict)
speech/cosyvoice/utils/file_utils.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
2
+ # 2024 Alibaba Inc (authors: Xiang Lyu, Zetao Hu)
3
+ # 2025 Alibaba Inc (authors: Xiang Lyu, Yabin Li)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+ import json
19
+ import torch
20
+ import torchaudio
21
+ import logging
22
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
23
+ logging.basicConfig(level=logging.DEBUG,
24
+ format='%(asctime)s %(levelname)s %(message)s')
25
+
26
+
27
+ def read_lists(list_file):
28
+ lists = []
29
+ with open(list_file, 'r', encoding='utf8') as fin:
30
+ for line in fin:
31
+ lists.append(line.strip())
32
+ return lists
33
+
34
+
35
+ def read_json_lists(list_file):
36
+ lists = read_lists(list_file)
37
+ results = {}
38
+ for fn in lists:
39
+ with open(fn, 'r', encoding='utf8') as fin:
40
+ results.update(json.load(fin))
41
+ return results
42
+
43
+
44
+ def load_wav(wav, target_sr):
45
+ speech, sample_rate = torchaudio.load(wav, backend='soundfile')
46
+ speech = speech.mean(dim=0, keepdim=True)
47
+ if sample_rate != target_sr:
48
+ assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
49
+ speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
50
+ return speech
51
+
52
+
53
+ def convert_onnx_to_trt(trt_model, trt_kwargs, onnx_model, fp16):
54
+ import tensorrt as trt
55
+ logging.info("Converting onnx to trt...")
56
+ network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
57
+ logger = trt.Logger(trt.Logger.INFO)
58
+ builder = trt.Builder(logger)
59
+ network = builder.create_network(network_flags)
60
+ parser = trt.OnnxParser(network, logger)
61
+ config = builder.create_builder_config()
62
+ config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 32) # 4GB
63
+ if fp16:
64
+ config.set_flag(trt.BuilderFlag.FP16)
65
+ profile = builder.create_optimization_profile()
66
+ # load onnx model
67
+ with open(onnx_model, "rb") as f:
68
+ if not parser.parse(f.read()):
69
+ for error in range(parser.num_errors):
70
+ print(parser.get_error(error))
71
+ raise ValueError('failed to parse {}'.format(onnx_model))
72
+ # set input shapes
73
+ for i in range(len(trt_kwargs['input_names'])):
74
+ profile.set_shape(trt_kwargs['input_names'][i], trt_kwargs['min_shape'][i], trt_kwargs['opt_shape'][i], trt_kwargs['max_shape'][i])
75
+ tensor_dtype = trt.DataType.HALF if fp16 else trt.DataType.FLOAT
76
+ # set input and output data type
77
+ for i in range(network.num_inputs):
78
+ input_tensor = network.get_input(i)
79
+ input_tensor.dtype = tensor_dtype
80
+ for i in range(network.num_outputs):
81
+ output_tensor = network.get_output(i)
82
+ output_tensor.dtype = tensor_dtype
83
+ config.add_optimization_profile(profile)
84
+ engine_bytes = builder.build_serialized_network(network, config)
85
+ # save trt engine
86
+ with open(trt_model, "wb") as f:
87
+ f.write(engine_bytes)
88
+ logging.info("Succesfully convert onnx to trt...")
89
+
90
+
91
+ def export_cosyvoice2_vllm(model, model_path, device):
92
+ if os.path.exists(model_path):
93
+ return
94
+ pad_to = DEFAULT_VOCAB_PADDING_SIZE = 64
95
+ vocab_size = model.speech_embedding.num_embeddings
96
+ feature_size = model.speech_embedding.embedding_dim
97
+ pad_vocab_size = ((vocab_size + pad_to - 1) // pad_to) * pad_to
98
+
99
+ dtype = torch.bfloat16
100
+ # lm_head
101
+ new_lm_head = torch.nn.Linear(in_features=feature_size, out_features=pad_vocab_size, bias=True)
102
+ with torch.no_grad():
103
+ new_lm_head.weight[:vocab_size] = model.llm_decoder.weight
104
+ new_lm_head.bias[:vocab_size] = model.llm_decoder.bias
105
+ new_lm_head.weight[vocab_size:] = 0
106
+ new_lm_head.bias[vocab_size:] = 0
107
+ model.llm.model.lm_head = new_lm_head
108
+ new_codec_embed = torch.nn.Linear(in_features=feature_size, out_features=pad_vocab_size)
109
+ # embed_tokens
110
+ embed_tokens = model.llm.model.model.embed_tokens
111
+ with torch.no_grad():
112
+ new_codec_embed.weight[:vocab_size] = model.speech_embedding.weight
113
+ new_codec_embed.weight[vocab_size:] = 0
114
+ model.llm.model.set_input_embeddings(new_codec_embed)
115
+ model.llm.model.to(device)
116
+ model.llm.model.to(dtype)
117
+ tmp_vocab_size = model.llm.model.config.vocab_size
118
+ tmp_tie_embedding = model.llm.model.config.tie_word_embeddings
119
+ del model.llm.model.generation_config.eos_token_id
120
+ del model.llm.model.config.bos_token_id
121
+ del model.llm.model.config.eos_token_id
122
+ model.llm.model.config.vocab_size = pad_vocab_size
123
+ model.llm.model.config.tie_word_embeddings = False
124
+ model.llm.model.config.use_bias = True
125
+ model.llm.model.save_pretrained(model_path)
126
+ os.system('sed -i s@Qwen2ForCausalLM@CosyVoice2ForCausalLM@g {}/config.json'.format(os.path.abspath(model_path)))
127
+ model.llm.model.config.vocab_size = tmp_vocab_size
128
+ model.llm.model.config.tie_word_embeddings = tmp_tie_embedding
129
+ model.llm.model.set_input_embeddings(embed_tokens)
speech/cosyvoice/utils/frontend_utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import re
16
+ import regex
17
+ chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]+')
18
+
19
+
20
+ # whether contain chinese character
21
+ def contains_chinese(text):
22
+ return bool(chinese_char_pattern.search(text))
23
+
24
+
25
+ # replace special symbol
26
+ def replace_corner_mark(text):
27
+ text = text.replace('²', '平方')
28
+ text = text.replace('³', '立方')
29
+ return text
30
+
31
+
32
+ # remove meaningless symbol
33
+ def remove_bracket(text):
34
+ text = text.replace('(', '').replace(')', '')
35
+ text = text.replace('【', '').replace('】', '')
36
+ text = text.replace('`', '').replace('`', '')
37
+ text = text.replace("——", " ")
38
+ return text
39
+
40
+
41
+ # spell Arabic numerals
42
+ def spell_out_number(text: str, inflect_parser):
43
+ new_text = []
44
+ st = None
45
+ for i, c in enumerate(text):
46
+ if not c.isdigit():
47
+ if st is not None:
48
+ num_str = inflect_parser.number_to_words(text[st: i])
49
+ new_text.append(num_str)
50
+ st = None
51
+ new_text.append(c)
52
+ else:
53
+ if st is None:
54
+ st = i
55
+ if st is not None and st < len(text):
56
+ num_str = inflect_parser.number_to_words(text[st:])
57
+ new_text.append(num_str)
58
+ return ''.join(new_text)
59
+
60
+
61
+ # split paragrah logic:
62
+ # 1. per sentence max len token_max_n, min len token_min_n, merge if last sentence len less than merge_len
63
+ # 2. cal sentence len according to lang
64
+ # 3. split sentence according to puncatation
65
+ def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=60, merge_len=20, comma_split=False):
66
+ def calc_utt_length(_text: str):
67
+ if lang == "zh":
68
+ return len(_text)
69
+ else:
70
+ return len(tokenize(_text))
71
+
72
+ def should_merge(_text: str):
73
+ if lang == "zh":
74
+ return len(_text) < merge_len
75
+ else:
76
+ return len(tokenize(_text)) < merge_len
77
+
78
+ if lang == "zh":
79
+ pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
80
+ else:
81
+ pounc = ['.', '?', '!', ';', ':']
82
+ if comma_split:
83
+ pounc.extend([',', ','])
84
+
85
+ if text[-1] not in pounc:
86
+ if lang == "zh":
87
+ text += "。"
88
+ else:
89
+ text += "."
90
+
91
+ st = 0
92
+ utts = []
93
+ for i, c in enumerate(text):
94
+ if c in pounc:
95
+ if len(text[st: i]) > 0:
96
+ utts.append(text[st: i] + c)
97
+ if i + 1 < len(text) and text[i + 1] in ['"', '”']:
98
+ tmp = utts.pop(-1)
99
+ utts.append(tmp + text[i + 1])
100
+ st = i + 2
101
+ else:
102
+ st = i + 1
103
+
104
+ final_utts = []
105
+ cur_utt = ""
106
+ for utt in utts:
107
+ if calc_utt_length(cur_utt + utt) > token_max_n and calc_utt_length(cur_utt) > token_min_n:
108
+ final_utts.append(cur_utt)
109
+ cur_utt = ""
110
+ cur_utt = cur_utt + utt
111
+ if len(cur_utt) > 0:
112
+ if should_merge(cur_utt) and len(final_utts) != 0:
113
+ final_utts[-1] = final_utts[-1] + cur_utt
114
+ else:
115
+ final_utts.append(cur_utt)
116
+
117
+ return final_utts
118
+
119
+
120
+ # remove blank between chinese character
121
+ def replace_blank(text: str):
122
+ out_str = []
123
+ for i, c in enumerate(text):
124
+ if c == " ":
125
+ if ((text[i + 1].isascii() and text[i + 1] != " ") and
126
+ (text[i - 1].isascii() and text[i - 1] != " ")):
127
+ out_str.append(c)
128
+ else:
129
+ out_str.append(c)
130
+ return "".join(out_str)
131
+
132
+
133
+ def is_only_punctuation(text):
134
+ # Regular expression: Match strings that consist only of punctuation marks or are empty.
135
+ punctuation_pattern = r'^[\p{P}\p{S}]*$'
136
+ return bool(regex.fullmatch(punctuation_pattern, text))
speech/cosyvoice/utils/losses.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from typing import Tuple
4
+
5
+
6
+ def tpr_loss(disc_real_outputs, disc_generated_outputs, tau):
7
+ loss = 0
8
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
9
+ m_DG = torch.median((dr - dg))
10
+ L_rel = torch.mean((((dr - dg) - m_DG) ** 2)[dr < dg + m_DG])
11
+ loss += tau - F.relu(tau - L_rel)
12
+ return loss
13
+
14
+
15
+ def mel_loss(real_speech, generated_speech, mel_transforms):
16
+ loss = 0
17
+ for transform in mel_transforms:
18
+ mel_r = transform(real_speech)
19
+ mel_g = transform(generated_speech)
20
+ loss += F.l1_loss(mel_g, mel_r)
21
+ return loss
22
+
23
+
24
+ class DPOLoss(torch.nn.Module):
25
+ """
26
+ DPO Loss
27
+ """
28
+
29
+ def __init__(self, beta: float, label_smoothing: float = 0.0, ipo: bool = False) -> None:
30
+ super().__init__()
31
+ self.beta = beta
32
+ self.label_smoothing = label_smoothing
33
+ self.ipo = ipo
34
+
35
+ def forward(
36
+ self,
37
+ policy_chosen_logps: torch.Tensor,
38
+ policy_rejected_logps: torch.Tensor,
39
+ reference_chosen_logps: torch.Tensor,
40
+ reference_rejected_logps: torch.Tensor,
41
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
42
+ pi_logratios = policy_chosen_logps - policy_rejected_logps
43
+ ref_logratios = reference_chosen_logps - reference_rejected_logps
44
+ logits = pi_logratios - ref_logratios
45
+ if self.ipo:
46
+ losses = (logits - 1 / (2 * self.beta)) ** 2 # Eq. 17 of https://arxiv.org/pdf/2310.12036v2.pdf
47
+ else:
48
+ # Eq. 3 https://ericmitchell.ai/cdpo.pdf; label_smoothing=0 gives original DPO (Eq. 7 of https://arxiv.org/pdf/2305.18290.pdf)
49
+ losses = (
50
+ -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
51
+ - F.logsigmoid(-self.beta * logits) * self.label_smoothing
52
+ )
53
+ loss = losses.mean()
54
+ chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach()
55
+ rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach()
56
+
57
+ return loss, chosen_rewards, rejected_rewards
speech/cosyvoice/utils/mask.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2019 Shigeki Karita
2
+ # 2020 Mobvoi Inc (Binbin Zhang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import torch
18
+ '''
19
+ def subsequent_mask(
20
+ size: int,
21
+ device: torch.device = torch.device("cpu"),
22
+ ) -> torch.Tensor:
23
+ """Create mask for subsequent steps (size, size).
24
+
25
+ This mask is used only in decoder which works in an auto-regressive mode.
26
+ This means the current step could only do attention with its left steps.
27
+
28
+ In encoder, fully attention is used when streaming is not necessary and
29
+ the sequence is not long. In this case, no attention mask is needed.
30
+
31
+ When streaming is need, chunk-based attention is used in encoder. See
32
+ subsequent_chunk_mask for the chunk-based attention mask.
33
+
34
+ Args:
35
+ size (int): size of mask
36
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
37
+ dtype (torch.device): result dtype
38
+
39
+ Returns:
40
+ torch.Tensor: mask
41
+
42
+ Examples:
43
+ >>> subsequent_mask(3)
44
+ [[1, 0, 0],
45
+ [1, 1, 0],
46
+ [1, 1, 1]]
47
+ """
48
+ ret = torch.ones(size, size, device=device, dtype=torch.bool)
49
+ return torch.tril(ret)
50
+ '''
51
+
52
+
53
+ def subsequent_mask(
54
+ size: int,
55
+ device: torch.device = torch.device("cpu"),
56
+ ) -> torch.Tensor:
57
+ """Create mask for subsequent steps (size, size).
58
+
59
+ This mask is used only in decoder which works in an auto-regressive mode.
60
+ This means the current step could only do attention with its left steps.
61
+
62
+ In encoder, fully attention is used when streaming is not necessary and
63
+ the sequence is not long. In this case, no attention mask is needed.
64
+
65
+ When streaming is need, chunk-based attention is used in encoder. See
66
+ subsequent_chunk_mask for the chunk-based attention mask.
67
+
68
+ Args:
69
+ size (int): size of mask
70
+ str device (str): "cpu" or "cuda" or torch.Tensor.device
71
+ dtype (torch.device): result dtype
72
+
73
+ Returns:
74
+ torch.Tensor: mask
75
+
76
+ Examples:
77
+ >>> subsequent_mask(3)
78
+ [[1, 0, 0],
79
+ [1, 1, 0],
80
+ [1, 1, 1]]
81
+ """
82
+ arange = torch.arange(size, device=device)
83
+ mask = arange.expand(size, size)
84
+ arange = arange.unsqueeze(-1)
85
+ mask = mask <= arange
86
+ return mask
87
+
88
+
89
+ def subsequent_chunk_mask_deprecated(
90
+ size: int,
91
+ chunk_size: int,
92
+ num_left_chunks: int = -1,
93
+ device: torch.device = torch.device("cpu"),
94
+ ) -> torch.Tensor:
95
+ """Create mask for subsequent steps (size, size) with chunk size,
96
+ this is for streaming encoder
97
+
98
+ Args:
99
+ size (int): size of mask
100
+ chunk_size (int): size of chunk
101
+ num_left_chunks (int): number of left chunks
102
+ <0: use full chunk
103
+ >=0: use num_left_chunks
104
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
105
+
106
+ Returns:
107
+ torch.Tensor: mask
108
+
109
+ Examples:
110
+ >>> subsequent_chunk_mask(4, 2)
111
+ [[1, 1, 0, 0],
112
+ [1, 1, 0, 0],
113
+ [1, 1, 1, 1],
114
+ [1, 1, 1, 1]]
115
+ """
116
+ ret = torch.zeros(size, size, device=device, dtype=torch.bool)
117
+ for i in range(size):
118
+ if num_left_chunks < 0:
119
+ start = 0
120
+ else:
121
+ start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
122
+ ending = min((i // chunk_size + 1) * chunk_size, size)
123
+ ret[i, start:ending] = True
124
+ return ret
125
+
126
+
127
+ def subsequent_chunk_mask(
128
+ size: int,
129
+ chunk_size: int,
130
+ num_left_chunks: int = -1,
131
+ device: torch.device = torch.device("cpu"),
132
+ ) -> torch.Tensor:
133
+ """Create mask for subsequent steps (size, size) with chunk size,
134
+ this is for streaming encoder
135
+
136
+ Args:
137
+ size (int): size of mask
138
+ chunk_size (int): size of chunk
139
+ num_left_chunks (int): number of left chunks
140
+ <0: use full chunk
141
+ >=0: use num_left_chunks
142
+ device (torch.device): "cpu" or "cuda" or torch.Tensor.device
143
+
144
+ Returns:
145
+ torch.Tensor: mask
146
+
147
+ Examples:
148
+ >>> subsequent_chunk_mask(4, 2)
149
+ [[1, 1, 0, 0],
150
+ [1, 1, 0, 0],
151
+ [1, 1, 1, 1],
152
+ [1, 1, 1, 1]]
153
+ """
154
+ # NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
155
+ pos_idx = torch.arange(size, device=device)
156
+ block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
157
+ ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
158
+ return ret
159
+
160
+
161
+ def add_optional_chunk_mask(xs: torch.Tensor,
162
+ masks: torch.Tensor,
163
+ use_dynamic_chunk: bool,
164
+ use_dynamic_left_chunk: bool,
165
+ decoding_chunk_size: int,
166
+ static_chunk_size: int,
167
+ num_decoding_left_chunks: int,
168
+ enable_full_context: bool = True):
169
+ """ Apply optional mask for encoder.
170
+
171
+ Args:
172
+ xs (torch.Tensor): padded input, (B, L, D), L for max length
173
+ mask (torch.Tensor): mask for xs, (B, 1, L)
174
+ use_dynamic_chunk (bool): whether to use dynamic chunk or not
175
+ use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
176
+ training.
177
+ decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
178
+ 0: default for training, use random dynamic chunk.
179
+ <0: for decoding, use full chunk.
180
+ >0: for decoding, use fixed chunk size as set.
181
+ static_chunk_size (int): chunk size for static chunk training/decoding
182
+ if it's greater than 0, if use_dynamic_chunk is true,
183
+ this parameter will be ignored
184
+ num_decoding_left_chunks: number of left chunks, this is for decoding,
185
+ the chunk size is decoding_chunk_size.
186
+ >=0: use num_decoding_left_chunks
187
+ <0: use all left chunks
188
+ enable_full_context (bool):
189
+ True: chunk size is either [1, 25] or full context(max_len)
190
+ False: chunk size ~ U[1, 25]
191
+
192
+ Returns:
193
+ torch.Tensor: chunk mask of the input xs.
194
+ """
195
+ # Whether to use chunk mask or not
196
+ if use_dynamic_chunk:
197
+ max_len = xs.size(1)
198
+ if decoding_chunk_size < 0:
199
+ chunk_size = max_len
200
+ num_left_chunks = -1
201
+ elif decoding_chunk_size > 0:
202
+ chunk_size = decoding_chunk_size
203
+ num_left_chunks = num_decoding_left_chunks
204
+ else:
205
+ # chunk size is either [1, 25] or full context(max_len).
206
+ # Since we use 4 times subsampling and allow up to 1s(100 frames)
207
+ # delay, the maximum frame is 100 / 4 = 25.
208
+ chunk_size = torch.randint(1, max_len, (1, )).item()
209
+ num_left_chunks = -1
210
+ if chunk_size > max_len // 2 and enable_full_context:
211
+ chunk_size = max_len
212
+ else:
213
+ chunk_size = chunk_size % 25 + 1
214
+ if use_dynamic_left_chunk:
215
+ max_left_chunks = (max_len - 1) // chunk_size
216
+ num_left_chunks = torch.randint(0, max_left_chunks,
217
+ (1, )).item()
218
+ chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
219
+ num_left_chunks,
220
+ xs.device) # (L, L)
221
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
222
+ chunk_masks = masks & chunk_masks # (B, L, L)
223
+ elif static_chunk_size > 0:
224
+ num_left_chunks = num_decoding_left_chunks
225
+ chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
226
+ num_left_chunks,
227
+ xs.device) # (L, L)
228
+ chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
229
+ chunk_masks = masks & chunk_masks # (B, L, L)
230
+ else:
231
+ chunk_masks = masks
232
+ assert chunk_masks.dtype == torch.bool
233
+ if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
234
+ print('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
235
+ chunk_masks[chunk_masks.sum(dim=-1) == 0] = True
236
+ return chunk_masks
237
+
238
+
239
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
240
+ """Make mask tensor containing indices of padded part.
241
+
242
+ See description of make_non_pad_mask.
243
+
244
+ Args:
245
+ lengths (torch.Tensor): Batch of lengths (B,).
246
+ Returns:
247
+ torch.Tensor: Mask tensor containing indices of padded part.
248
+
249
+ Examples:
250
+ >>> lengths = [5, 3, 2]
251
+ >>> make_pad_mask(lengths)
252
+ masks = [[0, 0, 0, 0 ,0],
253
+ [0, 0, 0, 1, 1],
254
+ [0, 0, 1, 1, 1]]
255
+ """
256
+ batch_size = lengths.size(0)
257
+ max_len = max_len if max_len > 0 else lengths.max().item()
258
+ seq_range = torch.arange(0,
259
+ max_len,
260
+ dtype=torch.int64,
261
+ device=lengths.device)
262
+ seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
263
+ seq_length_expand = lengths.unsqueeze(-1)
264
+ mask = seq_range_expand >= seq_length_expand
265
+ return mask
speech/cosyvoice/utils/scheduler.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
2
+ # 2022 Ximalaya Inc (Yuguang Yang)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # Modified from ESPnet(https://github.com/espnet/espnet)
17
+ # NeMo(https://github.com/NVIDIA/NeMo)
18
+
19
+ from typing import Union
20
+
21
+ import math
22
+ import warnings
23
+ import torch
24
+ from torch.optim.lr_scheduler import _LRScheduler
25
+
26
+
27
+ class WarmupLR(_LRScheduler):
28
+ """The WarmupLR scheduler
29
+
30
+ This scheduler is almost same as NoamLR Scheduler except for following
31
+ difference:
32
+
33
+ NoamLR:
34
+ lr = optimizer.lr * model_size ** -0.5
35
+ * min(step ** -0.5, step * warmup_step ** -1.5)
36
+ WarmupLR:
37
+ lr = optimizer.lr * warmup_step ** 0.5
38
+ * min(step ** -0.5, step * warmup_step ** -1.5)
39
+
40
+ Note that the maximum lr equals to optimizer.lr in this scheduler.
41
+
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ optimizer: torch.optim.Optimizer,
47
+ warmup_steps: Union[int, float] = 25000,
48
+ last_epoch: int = -1,
49
+ ):
50
+ self.warmup_steps = warmup_steps
51
+
52
+ # __init__() must be invoked before setting field
53
+ # because step() is also invoked in __init__()
54
+ super().__init__(optimizer, last_epoch)
55
+
56
+ def __repr__(self):
57
+ return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
58
+
59
+ def get_lr(self):
60
+ step_num = self.last_epoch + 1
61
+ if self.warmup_steps == 0:
62
+ return [lr * step_num**-0.5 for lr in self.base_lrs]
63
+ else:
64
+ return [
65
+ lr * self.warmup_steps**0.5 *
66
+ min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
67
+ for lr in self.base_lrs
68
+ ]
69
+
70
+ def set_step(self, step: int):
71
+ self.last_epoch = step
72
+
73
+
74
+ class WarmupPolicy(_LRScheduler):
75
+ """Adds warmup kwargs and warmup logic to lr policy.
76
+ All arguments should be passed as kwargs for clarity,
77
+ Args:
78
+ warmup_steps: Number of training steps in warmup stage
79
+ warmup_ratio: Ratio of warmup steps to total steps
80
+ max_steps: Total number of steps while training or `None` for
81
+ infinite training
82
+ """
83
+
84
+ def __init__(self,
85
+ optimizer,
86
+ *,
87
+ warmup_steps=None,
88
+ warmup_ratio=None,
89
+ max_steps=None,
90
+ min_lr=0.0,
91
+ last_epoch=-1):
92
+ assert not (warmup_steps is not None and warmup_ratio is not None),\
93
+ "Either use particular number of step or ratio"
94
+ assert warmup_ratio is None or max_steps is not None, \
95
+ "If there is a ratio, there should be a total steps"
96
+
97
+ # It is necessary to assign all attributes *before* __init__,
98
+ # as class is wrapped by an inner class.
99
+ self.max_steps = max_steps
100
+ if warmup_steps is not None:
101
+ self.warmup_steps = warmup_steps
102
+ elif warmup_ratio is not None:
103
+ self.warmup_steps = int(warmup_ratio * max_steps)
104
+ else:
105
+ self.warmup_steps = 0
106
+
107
+ self.min_lr = min_lr
108
+ super().__init__(optimizer, last_epoch)
109
+
110
+ def get_lr(self):
111
+ if not self._get_lr_called_within_step:
112
+ warnings.warn(
113
+ "To get the last learning rate computed "
114
+ "by the scheduler, please use `get_last_lr()`.",
115
+ UserWarning,
116
+ stacklevel=2)
117
+
118
+ step = self.last_epoch
119
+
120
+ if step <= self.warmup_steps and self.warmup_steps > 0:
121
+ return self._get_warmup_lr(step)
122
+
123
+ if step > self.max_steps:
124
+ return [self.min_lr for _ in self.base_lrs]
125
+
126
+ return self._get_lr(step)
127
+
128
+ def _get_warmup_lr(self, step):
129
+ lr_val = (step + 1) / (self.warmup_steps + 1)
130
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
131
+
132
+ def _get_lr(self, step):
133
+ """Simple const lr policy"""
134
+ return self.base_lrs
135
+
136
+
137
+ class SquareRootConstantPolicy(_LRScheduler):
138
+ """Adds warmup kwargs and warmup logic to lr policy.
139
+ All arguments should be passed as kwargs for clarity,
140
+ Args:
141
+ warmup_steps: Number of training steps in warmup stage
142
+ warmup_ratio: Ratio of warmup steps to total steps
143
+ max_steps: Total number of steps while training or `None` for
144
+ infinite training
145
+ """
146
+
147
+ def __init__(self,
148
+ optimizer,
149
+ *,
150
+ constant_steps=None,
151
+ constant_ratio=None,
152
+ max_steps=None,
153
+ min_lr=0.0,
154
+ last_epoch=-1):
155
+ assert not (constant_steps is not None
156
+ and constant_ratio is not None), \
157
+ "Either use particular number of step or ratio"
158
+ assert constant_ratio is None or max_steps is not None, \
159
+ "If there is a ratio, there should be a total steps"
160
+
161
+ # It is necessary to assign all attributes *before* __init__,
162
+ # as class is wrapped by an inner class.
163
+ self.max_steps = max_steps
164
+ if constant_steps is not None:
165
+ self.constant_steps = constant_steps
166
+ elif constant_ratio is not None:
167
+ self.constant_steps = int(constant_ratio * max_steps)
168
+ else:
169
+ self.constant_steps = 0
170
+
171
+ self.constant_lr = 1 / (constant_steps**0.5)
172
+ self.min_lr = min_lr
173
+ super().__init__(optimizer, last_epoch)
174
+
175
+ def get_lr(self):
176
+ if not self._get_lr_called_within_step:
177
+ warnings.warn(
178
+ "To get the last learning rate computed "
179
+ "by the scheduler, please use `get_last_lr()`.",
180
+ UserWarning,
181
+ stacklevel=2)
182
+
183
+ step = self.last_epoch
184
+
185
+ if step <= self.constant_steps:
186
+ return [self.constant_lr for _ in self.base_lrs]
187
+
188
+ if step > self.max_steps:
189
+ return [self.min_lr for _ in self.base_lrs]
190
+
191
+ return self._get_lr(step)
192
+
193
+ def _get_lr(self, step):
194
+ """Simple const lr policy"""
195
+ return self.base_lrs
196
+
197
+
198
+ class WarmupHoldPolicy(WarmupPolicy):
199
+ """Variant of WarmupPolicy which maintains high
200
+ learning rate for a defined number of steps.
201
+ All arguments should be passed as kwargs for clarity,
202
+ Args:
203
+ warmup_steps: Number of training steps in warmup stage
204
+ warmup_ratio: Ratio of warmup steps to total steps
205
+ hold_steps: Number of training steps to
206
+ hold the learning rate after warm up
207
+ hold_ratio: Ratio of hold steps to total steps
208
+ max_steps: Total number of steps while training or `None` for
209
+ infinite training
210
+ """
211
+
212
+ def __init__(
213
+ self,
214
+ optimizer,
215
+ *,
216
+ warmup_steps=None,
217
+ warmup_ratio=None,
218
+ hold_steps=None,
219
+ hold_ratio=None,
220
+ max_steps=None,
221
+ min_lr=0.0,
222
+ last_epoch=-1,
223
+ ):
224
+ assert not (hold_steps is not None and hold_ratio is not None), \
225
+ "Either use particular number of step or ratio"
226
+ assert hold_ratio is None or max_steps is not None, \
227
+ "If there is a ratio, there should be a total steps"
228
+
229
+ self.min_lr = min_lr
230
+ self._last_warmup_lr = 0.0
231
+
232
+ # Necessary to duplicate as class attributes are hidden in inner class
233
+ self.max_steps = max_steps
234
+ if warmup_steps is not None:
235
+ self.warmup_steps = warmup_steps
236
+ elif warmup_ratio is not None:
237
+ self.warmup_steps = int(warmup_ratio * max_steps)
238
+ else:
239
+ self.warmup_steps = 0
240
+
241
+ if hold_steps is not None:
242
+ self.hold_steps = hold_steps + self.warmup_steps
243
+ elif hold_ratio is not None:
244
+ self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
245
+ else:
246
+ self.hold_steps = 0
247
+
248
+ super().__init__(
249
+ optimizer,
250
+ warmup_steps=warmup_steps,
251
+ warmup_ratio=warmup_ratio,
252
+ max_steps=max_steps,
253
+ last_epoch=last_epoch,
254
+ min_lr=min_lr,
255
+ )
256
+
257
+ def get_lr(self):
258
+ if not self._get_lr_called_within_step:
259
+ warnings.warn(
260
+ "To get the last learning rate computed by the scheduler,"
261
+ " "
262
+ "please use `get_last_lr()`.",
263
+ UserWarning,
264
+ stacklevel=2)
265
+
266
+ step = self.last_epoch
267
+
268
+ # Warmup phase
269
+ if step <= self.warmup_steps and self.warmup_steps > 0:
270
+ return self._get_warmup_lr(step)
271
+
272
+ # Hold phase
273
+ if (step >= self.warmup_steps) and (step < self.hold_steps):
274
+ return self.base_lrs
275
+
276
+ if step > self.max_steps:
277
+ return [self.min_lr for _ in self.base_lrs]
278
+
279
+ return self._get_lr(step)
280
+
281
+
282
+ class WarmupAnnealHoldPolicy(_LRScheduler):
283
+ """Adds warmup kwargs and warmup logic to lr policy.
284
+ All arguments should be passed as kwargs for clarity,
285
+ Args:
286
+ warmup_steps: Number of training steps in warmup stage
287
+ warmup_ratio: Ratio of warmup steps to total steps
288
+ max_steps: Total number of steps while training or `None` for
289
+ infinite training
290
+ min_lr: Minimum lr to hold the learning rate after decay at.
291
+ constant_steps: Number of steps to keep lr constant at.
292
+ constant_ratio: Ratio of steps to keep lr constant.
293
+ """
294
+
295
+ def __init__(
296
+ self,
297
+ optimizer,
298
+ *,
299
+ warmup_steps=None,
300
+ warmup_ratio=None,
301
+ constant_steps=None,
302
+ constant_ratio=None,
303
+ max_steps=None,
304
+ min_lr=0.0,
305
+ last_epoch=-1,
306
+ ):
307
+ assert not (warmup_steps is not None
308
+ and warmup_ratio is not None), \
309
+ "Either use particular number of step or ratio"
310
+ assert not (constant_steps is not None
311
+ and constant_ratio is not None), \
312
+ "Either use constant_steps or constant_ratio"
313
+ assert warmup_ratio is None or max_steps is not None, \
314
+ "If there is a ratio, there should be a total steps"
315
+
316
+ # It is necessary to assign all attributes *before* __init__,
317
+ # as class is wrapped by an inner class.
318
+ self.max_steps = max_steps
319
+
320
+ if warmup_steps is not None:
321
+ self.warmup_steps = warmup_steps
322
+ elif warmup_ratio is not None:
323
+ self.warmup_steps = int(warmup_ratio * max_steps)
324
+ else:
325
+ self.warmup_steps = 0
326
+
327
+ if constant_steps is not None:
328
+ self.constant_steps = constant_steps
329
+ elif constant_ratio is not None:
330
+ self.constant_steps = int(constant_ratio * max_steps)
331
+ else:
332
+ self.constant_steps = 0
333
+
334
+ self.decay_steps = max_steps - (self.constant_steps +
335
+ self.warmup_steps)
336
+
337
+ self.min_lr = min_lr
338
+ super().__init__(optimizer, last_epoch)
339
+
340
+ def get_lr(self):
341
+ if not self._get_lr_called_within_step:
342
+ warnings.warn(
343
+ "To get the last learning rate computed "
344
+ "by the scheduler, please use `get_last_lr()`.",
345
+ UserWarning,
346
+ stacklevel=2)
347
+
348
+ step = self.last_epoch
349
+
350
+ # Warmup steps
351
+ if self.warmup_steps > 0 and step <= self.warmup_steps:
352
+ return self._get_warmup_lr(step)
353
+
354
+ # Constant steps after warmup and decay
355
+ if self.constant_steps > 0 and (
356
+ self.warmup_steps + self.decay_steps) < step <= self.max_steps:
357
+ return self._get_constant_lr(step)
358
+
359
+ # Min lr after max steps of updates
360
+ if step > self.max_steps:
361
+ return [self.min_lr for _ in self.base_lrs]
362
+
363
+ return self._get_lr(step)
364
+
365
+ def _get_warmup_lr(self, step):
366
+ lr_val = (step + 1) / (self.warmup_steps + 1)
367
+ return [initial_lr * lr_val for initial_lr in self.base_lrs]
368
+
369
+ def _get_constant_lr(self, step):
370
+ return [self.min_lr for _ in self.base_lrs]
371
+
372
+ def _get_lr(self, step):
373
+ """Simple const lr policy"""
374
+ return self.base_lrs
375
+
376
+
377
+ def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
378
+ mult = ((max_steps - step) / max_steps)**0.5
379
+ out_lr = initial_lr * mult
380
+ out_lr = max(out_lr, min_lr)
381
+ return out_lr
382
+
383
+
384
+ def _square_annealing(initial_lr, step, max_steps, min_lr):
385
+ mult = ((max_steps - step) / max_steps)**2
386
+ out_lr = initial_lr * mult
387
+ out_lr = max(out_lr, min_lr)
388
+ return out_lr
389
+
390
+
391
+ def _cosine_annealing(initial_lr, step, max_steps, min_lr):
392
+ mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
393
+ out_lr = (initial_lr - min_lr) * mult + min_lr
394
+ return out_lr
395
+
396
+
397
+ def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step,
398
+ decay_steps, min_lr):
399
+ assert max_lr > min_lr
400
+ # Use linear warmup for the initial part.
401
+ if warmup_steps > 0 and step <= warmup_steps:
402
+ return max_lr * float(step) / float(warmup_steps)
403
+
404
+ # For any steps larger than `decay_steps`, use `min_lr`.
405
+ if step > warmup_steps + decay_steps:
406
+ return min_lr
407
+
408
+ # If we are done with the warmup period, use the decay style.
409
+ num_steps_ = step - warmup_steps
410
+ decay_steps_ = decay_steps
411
+ decay_ratio = float(num_steps_) / float(decay_steps_)
412
+ assert decay_ratio >= 0.0
413
+ assert decay_ratio <= 1.0
414
+ delta_lr = max_lr - min_lr
415
+
416
+ coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
417
+
418
+ return min_lr + coeff * delta_lr
419
+
420
+
421
+ def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
422
+ if cycle:
423
+ multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
424
+ decay_steps *= multiplier
425
+ else:
426
+ step = min(step, decay_steps)
427
+ p = step / decay_steps
428
+ lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
429
+ lr += min_lr
430
+ return lr
431
+
432
+
433
+ def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps,
434
+ decay_rate, min_lr):
435
+ # hold_steps = total number of steps
436
+ # to hold the LR, not the warmup + hold steps.
437
+ T_warmup_decay = max(1, warmup_steps**decay_rate)
438
+ T_hold_decay = max(1, (step - hold_steps)**decay_rate)
439
+ lr = (initial_lr * T_warmup_decay) / T_hold_decay
440
+ lr = max(lr, min_lr)
441
+ return lr
442
+
443
+
444
+ class SquareAnnealing(WarmupPolicy):
445
+
446
+ def __init__(self,
447
+ optimizer,
448
+ *,
449
+ max_steps,
450
+ min_lr=1e-5,
451
+ last_epoch=-1,
452
+ **kwargs):
453
+ super().__init__(optimizer=optimizer,
454
+ max_steps=max_steps,
455
+ last_epoch=last_epoch,
456
+ min_lr=min_lr,
457
+ **kwargs)
458
+
459
+ def _get_lr(self, step):
460
+ new_lrs = [
461
+ _square_annealing(
462
+ initial_lr=initial_lr,
463
+ step=step - self.warmup_steps,
464
+ max_steps=self.max_steps - self.warmup_steps,
465
+ min_lr=self.min_lr,
466
+ ) for initial_lr in self.base_lrs
467
+ ]
468
+ return new_lrs
469
+
470
+
471
+ class SquareRootAnnealing(WarmupPolicy):
472
+
473
+ def __init__(self,
474
+ optimizer,
475
+ *,
476
+ max_steps,
477
+ min_lr=0,
478
+ last_epoch=-1,
479
+ **kwargs):
480
+ super().__init__(optimizer=optimizer,
481
+ max_steps=max_steps,
482
+ last_epoch=last_epoch,
483
+ min_lr=min_lr,
484
+ **kwargs)
485
+
486
+ def _get_lr(self, step):
487
+ new_lrs = [
488
+ _squareroot_annealing(initial_lr=initial_lr,
489
+ step=step,
490
+ max_steps=self.max_steps,
491
+ min_lr=self.min_lr)
492
+ for initial_lr in self.base_lrs
493
+ ]
494
+ return new_lrs
495
+
496
+
497
+ class CosineAnnealing(WarmupAnnealHoldPolicy):
498
+
499
+ def __init__(self,
500
+ optimizer,
501
+ *,
502
+ max_steps,
503
+ min_lr=0,
504
+ last_epoch=-1,
505
+ **kwargs):
506
+ super().__init__(optimizer=optimizer,
507
+ max_steps=max_steps,
508
+ last_epoch=last_epoch,
509
+ min_lr=min_lr,
510
+ **kwargs)
511
+
512
+ def _get_lr(self, step):
513
+ for initial_lr in self.base_lrs:
514
+ if initial_lr < self.min_lr:
515
+ raise ValueError(
516
+ f"{self} received an initial learning rate "
517
+ f"that was lower than the minimum learning rate.")
518
+
519
+ if self.constant_steps is None or self.constant_steps == 0:
520
+ new_lrs = [
521
+ _cosine_annealing(
522
+ initial_lr=initial_lr,
523
+ step=step - self.warmup_steps,
524
+ max_steps=self.max_steps - self.warmup_steps,
525
+ min_lr=self.min_lr,
526
+ ) for initial_lr in self.base_lrs
527
+ ]
528
+ else:
529
+ new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step)
530
+ return new_lrs
531
+
532
+ def _get_warmup_lr(self, step):
533
+ if self.constant_steps is None or self.constant_steps == 0:
534
+ return super()._get_warmup_lr(step)
535
+ else:
536
+ # Use linear warmup for the initial part.
537
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
538
+
539
+ def _get_constant_lr(self, step):
540
+ # Only called when `constant_steps` > 0.
541
+ return self._get_linear_warmup_with_cosine_annealing_lr(step)
542
+
543
+ def _get_linear_warmup_with_cosine_annealing_lr(self, step):
544
+ # Cosine Schedule for Megatron LM,
545
+ # slightly different warmup schedule + constant LR at the end.
546
+ new_lrs = [
547
+ _linear_warmup_with_cosine_annealing(
548
+ max_lr=self.base_lrs[0],
549
+ warmup_steps=self.warmup_steps,
550
+ step=step,
551
+ decay_steps=self.decay_steps,
552
+ min_lr=self.min_lr,
553
+ ) for _ in self.base_lrs
554
+ ]
555
+ return new_lrs
556
+
557
+
558
+ class NoamAnnealing(_LRScheduler):
559
+
560
+ def __init__(self,
561
+ optimizer,
562
+ *,
563
+ d_model,
564
+ warmup_steps=None,
565
+ warmup_ratio=None,
566
+ max_steps=None,
567
+ min_lr=0.0,
568
+ last_epoch=-1):
569
+ self._normalize = d_model**(-0.5)
570
+ assert not (warmup_steps is not None and warmup_ratio is not None), \
571
+ "Either use particular number of step or ratio"
572
+ assert warmup_ratio is None or max_steps is not None, \
573
+ "If there is a ratio, there should be a total steps"
574
+
575
+ # It is necessary to assign all attributes *before* __init__,
576
+ # as class is wrapped by an inner class.
577
+ self.max_steps = max_steps
578
+ if warmup_steps is not None:
579
+ self.warmup_steps = warmup_steps
580
+ elif warmup_ratio is not None:
581
+ self.warmup_steps = int(warmup_ratio * max_steps)
582
+ else:
583
+ self.warmup_steps = 0
584
+
585
+ self.min_lr = min_lr
586
+ super().__init__(optimizer, last_epoch)
587
+
588
+ def get_lr(self):
589
+ if not self._get_lr_called_within_step:
590
+ warnings.warn(
591
+ "To get the last learning rate computed "
592
+ "by the scheduler, please use `get_last_lr()`.",
593
+ UserWarning,
594
+ stacklevel=2)
595
+
596
+ step = max(1, self.last_epoch)
597
+
598
+ for initial_lr in self.base_lrs:
599
+ if initial_lr < self.min_lr:
600
+ raise ValueError(
601
+ f"{self} received an initial learning rate "
602
+ f"that was lower than the minimum learning rate.")
603
+
604
+ new_lrs = [
605
+ self._noam_annealing(initial_lr=initial_lr, step=step)
606
+ for initial_lr in self.base_lrs
607
+ ]
608
+ return new_lrs
609
+
610
+ def _noam_annealing(self, initial_lr, step):
611
+ if self.warmup_steps > 0:
612
+ mult = self._normalize * min(step**(-0.5),
613
+ step * (self.warmup_steps**(-1.5)))
614
+ else:
615
+ mult = self._normalize * step**(-0.5)
616
+
617
+ out_lr = initial_lr * mult
618
+ if step > self.warmup_steps:
619
+ out_lr = max(out_lr, self.min_lr)
620
+ return out_lr
621
+
622
+
623
+ class NoamHoldAnnealing(WarmupHoldPolicy):
624
+
625
+ def __init__(self,
626
+ optimizer,
627
+ *,
628
+ max_steps,
629
+ decay_rate=0.5,
630
+ min_lr=0.0,
631
+ last_epoch=-1,
632
+ **kwargs):
633
+ """
634
+ From Nemo:
635
+ Implementation of the Noam Hold Annealing policy
636
+ from the SqueezeFormer paper.
637
+
638
+ Unlike NoamAnnealing, the peak learning rate
639
+ can be explicitly set for this scheduler.
640
+ The schedule first performs linear warmup,
641
+ then holds the peak LR, then decays with some schedule for
642
+ the remainder of the steps.
643
+ Therefore the min-lr is still dependent
644
+ on the hyper parameters selected.
645
+
646
+ It's schedule is determined by three factors-
647
+
648
+ Warmup Steps: Initial stage, where linear warmup
649
+ occurs uptil the peak LR is reached. Unlike NoamAnnealing,
650
+ the peak LR is explicitly stated here instead of a scaling factor.
651
+
652
+ Hold Steps: Intermediate stage, where the peak LR
653
+ is maintained for some number of steps. In this region,
654
+ the high peak LR allows the model to converge faster
655
+ if training is stable. However the high LR
656
+ may also cause instability during training.
657
+ Should usually be a significant fraction of training
658
+ steps (around 30-40% of the entire training steps).
659
+
660
+ Decay Steps: Final stage, where the LR rapidly decays
661
+ with some scaling rate (set by decay rate).
662
+ To attain Noam decay, use 0.5,
663
+ for Squeezeformer recommended decay, use 1.0.
664
+ The fast decay after prolonged high LR during
665
+ hold phase allows for rapid convergence.
666
+
667
+ References:
668
+ - [Squeezeformer:
669
+ An Efficient Transformer for Automatic Speech Recognition]
670
+ (https://arxiv.org/abs/2206.00888)
671
+
672
+ Args:
673
+ optimizer: Pytorch compatible Optimizer object.
674
+ warmup_steps: Number of training steps in warmup stage
675
+ warmup_ratio: Ratio of warmup steps to total steps
676
+ hold_steps: Number of training steps to
677
+ hold the learning rate after warm up
678
+ hold_ratio: Ratio of hold steps to total steps
679
+ max_steps: Total number of steps while training or `None` for
680
+ infinite training
681
+ decay_rate: Float value describing the polynomial decay
682
+ after the hold period. Default value
683
+ of 0.5 corresponds to Noam decay.
684
+ min_lr: Minimum learning rate.
685
+ """
686
+ self.decay_rate = decay_rate
687
+ super().__init__(optimizer=optimizer,
688
+ max_steps=max_steps,
689
+ last_epoch=last_epoch,
690
+ min_lr=min_lr,
691
+ **kwargs)
692
+
693
+ def _get_lr(self, step):
694
+ if self.warmup_steps is None or self.warmup_steps == 0:
695
+ raise ValueError(
696
+ "Noam scheduler cannot be used without warmup steps")
697
+
698
+ if self.hold_steps > 0:
699
+ hold_steps = self.hold_steps - self.warmup_steps
700
+ else:
701
+ hold_steps = 0
702
+
703
+ new_lrs = [
704
+ _noam_hold_annealing(
705
+ initial_lr,
706
+ step=step,
707
+ warmup_steps=self.warmup_steps,
708
+ hold_steps=hold_steps,
709
+ decay_rate=self.decay_rate,
710
+ min_lr=self.min_lr,
711
+ ) for initial_lr in self.base_lrs
712
+ ]
713
+ return new_lrs
714
+
715
+ def set_step(self, step: int):
716
+ self.last_epoch = step
717
+
718
+
719
+ class ConstantLR(_LRScheduler):
720
+ """The ConstantLR scheduler
721
+
722
+ This scheduler keeps a constant lr
723
+
724
+ """
725
+
726
+ def __init__(
727
+ self,
728
+ optimizer: torch.optim.Optimizer,
729
+ ):
730
+ # __init__() must be invoked before setting field
731
+ # because step() is also invoked in __init__()
732
+ super().__init__(optimizer)
733
+
734
+ def get_lr(self):
735
+ return self.base_lrs
736
+
737
+ def set_step(self, step: int):
738
+ self.last_epoch = step
speech/cosyvoice/utils/train_utils.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
2
+ # 2023 Horizon Inc. (authors: Xingchen Song)
3
+ # 2024 Alibaba Inc (authors: Xiang Lyu)
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ import os
19
+ import torch
20
+ import json
21
+ import re
22
+ import datetime
23
+ import yaml
24
+
25
+ import deepspeed
26
+ import torch.optim as optim
27
+ import torch.distributed as dist
28
+
29
+ from torch.utils.tensorboard import SummaryWriter
30
+ from torch.utils.data import DataLoader
31
+ from torch.nn.utils import clip_grad_norm_
32
+
33
+ from deepspeed.runtime.zero.stage_1_and_2 import estimate_zero2_model_states_mem_needs_all_live
34
+
35
+ from cosyvoice.dataset.dataset import Dataset
36
+ from cosyvoice.utils.scheduler import WarmupLR, NoamHoldAnnealing, ConstantLR
37
+
38
+
39
+ def init_distributed(args):
40
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
41
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
42
+ rank = int(os.environ.get('RANK', 0))
43
+ logging.info('training on multiple gpus, this gpu {}'.format(local_rank) +
44
+ ', rank {}, world_size {}'.format(rank, world_size))
45
+ if args.train_engine == 'torch_ddp':
46
+ torch.cuda.set_device(local_rank)
47
+ dist.init_process_group(args.dist_backend)
48
+ else:
49
+ deepspeed.init_distributed(dist_backend=args.dist_backend)
50
+ return world_size, local_rank, rank
51
+
52
+
53
+ def init_dataset_and_dataloader(args, configs, gan, dpo):
54
+ data_pipeline = configs['data_pipeline_gan'] if gan is True else configs['data_pipeline']
55
+ train_dataset = Dataset(args.train_data, data_pipeline=data_pipeline, mode='train', gan=gan, dpo=dpo, shuffle=True, partition=True)
56
+ cv_dataset = Dataset(args.cv_data, data_pipeline=data_pipeline, mode='train', gan=gan, dpo=dpo, shuffle=False, partition=False)
57
+
58
+ # do not use persistent_workers=True, as whisper tokenizer opens tiktoken file each time when the for loop starts
59
+ train_data_loader = DataLoader(train_dataset,
60
+ batch_size=None,
61
+ pin_memory=args.pin_memory,
62
+ num_workers=args.num_workers,
63
+ prefetch_factor=args.prefetch)
64
+ cv_data_loader = DataLoader(cv_dataset,
65
+ batch_size=None,
66
+ pin_memory=args.pin_memory,
67
+ num_workers=args.num_workers,
68
+ prefetch_factor=args.prefetch)
69
+ return train_dataset, cv_dataset, train_data_loader, cv_data_loader
70
+
71
+
72
+ def check_modify_and_save_config(args, configs):
73
+ if args.train_engine == "torch_ddp":
74
+ configs['train_conf']["dtype"] = 'fp32'
75
+ else:
76
+ with open(args.deepspeed_config, 'r') as fin:
77
+ ds_configs = json.load(fin)
78
+ if "fp16" in ds_configs and ds_configs["fp16"]["enabled"]:
79
+ configs['train_conf']["dtype"] = "fp16"
80
+ elif "bf16" in ds_configs and ds_configs["bf16"]["enabled"]:
81
+ configs['train_conf']["dtype"] = "bf16"
82
+ else:
83
+ configs['train_conf']["dtype"] = "fp32"
84
+ assert ds_configs["train_micro_batch_size_per_gpu"] == 1
85
+ # if use deepspeed, override ddp config
86
+ configs['train_conf']['save_per_step'] = int(configs['train_conf']['save_per_step'] *
87
+ configs['train_conf']['accum_grad'] / ds_configs["gradient_accumulation_steps"])
88
+ configs['train_conf']['accum_grad'] = ds_configs["gradient_accumulation_steps"]
89
+ configs['train_conf']['grad_clip'] = ds_configs["gradient_clipping"]
90
+ configs['train_conf']['log_interval'] = ds_configs["steps_per_print"]
91
+ return configs
92
+
93
+
94
+ def wrap_cuda_model(args, model):
95
+ local_world_size = int(os.environ.get('LOCAL_WORLD_SIZE', 1))
96
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
97
+ if args.train_engine == "torch_ddp": # native pytorch ddp
98
+ assert (torch.cuda.is_available())
99
+ model.cuda()
100
+ model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
101
+ else:
102
+ if int(os.environ.get('RANK', 0)) == 0:
103
+ logging.info("Estimating model states memory needs (zero2)...")
104
+ estimate_zero2_model_states_mem_needs_all_live(
105
+ model,
106
+ num_gpus_per_node=local_world_size,
107
+ num_nodes=world_size // local_world_size)
108
+ return model
109
+
110
+
111
+ def init_optimizer_and_scheduler(args, configs, model, gan):
112
+ if gan is False:
113
+ if configs['train_conf']['optim'] == 'adam':
114
+ optimizer = optim.Adam(model.parameters(), **configs['train_conf']['optim_conf'])
115
+ elif configs['train_conf']['optim'] == 'adamw':
116
+ optimizer = optim.AdamW(model.parameters(), **configs['train_conf']['optim_conf'])
117
+ else:
118
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
119
+
120
+ if configs['train_conf']['scheduler'] == 'warmuplr':
121
+ scheduler_type = WarmupLR
122
+ scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf'])
123
+ elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing':
124
+ scheduler_type = NoamHoldAnnealing
125
+ scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf'])
126
+ elif configs['train_conf']['scheduler'] == 'constantlr':
127
+ scheduler_type = ConstantLR
128
+ scheduler = ConstantLR(optimizer)
129
+ else:
130
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
131
+
132
+ # use deepspeed optimizer for speedup
133
+ if args.train_engine == "deepspeed":
134
+ def scheduler(opt):
135
+ return scheduler_type(opt, **configs['train_conf']['scheduler_conf'])
136
+ model, optimizer, _, scheduler = deepspeed.initialize(
137
+ args=args,
138
+ model=model,
139
+ optimizer=None,
140
+ lr_scheduler=scheduler,
141
+ model_parameters=model.parameters())
142
+
143
+ optimizer_d, scheduler_d = None, None
144
+
145
+ else:
146
+ # currently we wrap generator and discriminator in one model, so we cannot use deepspeed
147
+ if configs['train_conf']['optim'] == 'adam':
148
+ optimizer = optim.Adam(model.module.generator.parameters(), **configs['train_conf']['optim_conf'])
149
+ elif configs['train_conf']['optim'] == 'adamw':
150
+ optimizer = optim.AdamW(model.module.generator.parameters(), **configs['train_conf']['optim_conf'])
151
+ else:
152
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
153
+
154
+ if configs['train_conf']['scheduler'] == 'warmuplr':
155
+ scheduler_type = WarmupLR
156
+ scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf'])
157
+ elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing':
158
+ scheduler_type = NoamHoldAnnealing
159
+ scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf'])
160
+ elif configs['train_conf']['scheduler'] == 'constantlr':
161
+ scheduler_type = ConstantLR
162
+ scheduler = ConstantLR(optimizer)
163
+ else:
164
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
165
+
166
+ if configs['train_conf']['optim_d'] == 'adam':
167
+ optimizer_d = optim.Adam(model.module.discriminator.parameters(), **configs['train_conf']['optim_conf'])
168
+ elif configs['train_conf']['optim_d'] == 'adamw':
169
+ optimizer_d = optim.AdamW(model.module.discriminator.parameters(), **configs['train_conf']['optim_conf'])
170
+ else:
171
+ raise ValueError("unknown optimizer: " + configs['train_conf'])
172
+
173
+ if configs['train_conf']['scheduler_d'] == 'warmuplr':
174
+ scheduler_type = WarmupLR
175
+ scheduler_d = WarmupLR(optimizer_d, **configs['train_conf']['scheduler_conf'])
176
+ elif configs['train_conf']['scheduler_d'] == 'NoamHoldAnnealing':
177
+ scheduler_type = NoamHoldAnnealing
178
+ scheduler_d = NoamHoldAnnealing(optimizer_d, **configs['train_conf']['scheduler_conf'])
179
+ elif configs['train_conf']['scheduler'] == 'constantlr':
180
+ scheduler_type = ConstantLR
181
+ scheduler_d = ConstantLR(optimizer_d)
182
+ else:
183
+ raise ValueError("unknown scheduler: " + configs['train_conf'])
184
+ return model, optimizer, scheduler, optimizer_d, scheduler_d
185
+
186
+
187
+ def init_summarywriter(args):
188
+ writer = None
189
+ if int(os.environ.get('RANK', 0)) == 0:
190
+ os.makedirs(args.model_dir, exist_ok=True)
191
+ writer = SummaryWriter(args.tensorboard_dir)
192
+ return writer
193
+
194
+
195
+ def save_model(model, model_name, info_dict):
196
+ rank = int(os.environ.get('RANK', 0))
197
+ model_dir = info_dict["model_dir"]
198
+ save_model_path = os.path.join(model_dir, '{}.pt'.format(model_name))
199
+
200
+ if info_dict["train_engine"] == "torch_ddp":
201
+ if rank == 0:
202
+ torch.save({**model.module.state_dict(), 'epoch': info_dict['epoch'], 'step': info_dict['step']}, save_model_path)
203
+ else:
204
+ with torch.no_grad():
205
+ model.save_checkpoint(save_dir=model_dir,
206
+ tag=model_name,
207
+ client_state=info_dict)
208
+ if rank == 0:
209
+ info_path = re.sub('.pt$', '.yaml', save_model_path)
210
+ info_dict['save_time'] = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')
211
+ with open(info_path, 'w') as fout:
212
+ data = yaml.dump(info_dict)
213
+ fout.write(data)
214
+ logging.info('[Rank {}] Checkpoint: save to checkpoint {}'.format(rank, save_model_path))
215
+
216
+
217
+ def cosyvoice_join(group_join, info_dict):
218
+ world_size = int(os.environ.get('WORLD_SIZE', 1))
219
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
220
+ rank = int(os.environ.get('RANK', 0))
221
+
222
+ if info_dict["batch_idx"] != 0:
223
+ # we try to join all rank in both ddp and deepspeed mode, in case different rank has different lr
224
+ try:
225
+ dist.monitored_barrier(group=group_join,
226
+ timeout=group_join.options._timeout)
227
+ return False
228
+ except RuntimeError as e:
229
+ logging.info("Detected uneven workload distribution: {}\n".format(e) +
230
+ "Break current worker to manually join all workers, " +
231
+ "world_size {}, current rank {}, current local_rank {}\n".
232
+ format(world_size, rank, local_rank))
233
+ return True
234
+ else:
235
+ return False
236
+
237
+
238
+ def batch_forward(model, batch, scaler, info_dict, ref_model=None, dpo_loss=None):
239
+ device = int(os.environ.get('LOCAL_RANK', 0))
240
+
241
+ dtype = info_dict["dtype"]
242
+ if dtype == "fp16":
243
+ dtype = torch.float16
244
+ elif dtype == "bf16":
245
+ dtype = torch.bfloat16
246
+ else: # fp32
247
+ dtype = torch.float32
248
+
249
+ if info_dict['train_engine'] == 'torch_ddp':
250
+ autocast = torch.cuda.amp.autocast(enabled=scaler is not None)
251
+ else:
252
+ autocast = torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False)
253
+
254
+ with autocast:
255
+ info_dict['loss_dict'] = model(batch, device)
256
+ if ref_model is not None and dpo_loss is not None:
257
+ chosen_logps = info_dict['loss_dict']["chosen_logps"]
258
+ rejected_logps = info_dict['loss_dict']["rejected_logps"]
259
+ sft_loss = info_dict['loss_dict']['loss']
260
+ with torch.no_grad():
261
+ ref_loss_dict = ref_model(batch, device)
262
+ reference_chosen_logps = ref_loss_dict["chosen_logps"]
263
+ reference_rejected_logps = ref_loss_dict["rejected_logps"]
264
+ preference_loss, chosen_reward, reject_reward = dpo_loss(
265
+ chosen_logps, rejected_logps, reference_chosen_logps, reference_rejected_logps
266
+ )
267
+ dpo_acc = (chosen_reward > reject_reward).float().mean()
268
+ info_dict['loss_dict']["loss"] = preference_loss + sft_loss
269
+ info_dict['loss_dict']["sft_loss"] = sft_loss
270
+ info_dict['loss_dict']["dpo_loss"] = preference_loss
271
+ info_dict['loss_dict']["dpo_acc"] = dpo_acc
272
+ info_dict['loss_dict']["chosen_reward"] = chosen_reward.mean()
273
+ info_dict['loss_dict']["reject_reward"] = reject_reward.mean()
274
+ return info_dict
275
+
276
+
277
+ def batch_backward(model, scaler, info_dict):
278
+ if info_dict["train_engine"] == "deepspeed":
279
+ scaled_loss = model.backward(info_dict['loss_dict']['loss'])
280
+ else:
281
+ scaled_loss = info_dict['loss_dict']['loss'] / info_dict['accum_grad']
282
+ if scaler is not None:
283
+ scaler.scale(scaled_loss).backward()
284
+ else:
285
+ scaled_loss.backward()
286
+
287
+ info_dict['loss_dict']['loss'] = scaled_loss
288
+ return info_dict
289
+
290
+
291
+ def update_parameter_and_lr(model, optimizer, scheduler, scaler, info_dict):
292
+ grad_norm = 0.0
293
+ if info_dict['train_engine'] == "deepspeed":
294
+ info_dict["is_gradient_accumulation_boundary"] = model.is_gradient_accumulation_boundary()
295
+ model.step()
296
+ grad_norm = model.get_global_grad_norm()
297
+ elif (info_dict['batch_idx'] + 1) % info_dict["accum_grad"] == 0:
298
+ # Use mixed precision training
299
+ if scaler is not None:
300
+ scaler.unscale_(optimizer)
301
+ grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip'])
302
+ # We don't check grad here since that if the gradient
303
+ # has inf/nan values, scaler.step will skip
304
+ # optimizer.step().
305
+ if torch.isfinite(grad_norm):
306
+ scaler.step(optimizer)
307
+ else:
308
+ logging.warning('get infinite grad_norm, check your code/data if it appears frequently')
309
+ scaler.update()
310
+ else:
311
+ grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip'])
312
+ if torch.isfinite(grad_norm):
313
+ optimizer.step()
314
+ else:
315
+ logging.warning('get infinite grad_norm, check your code/data if it appears frequently')
316
+ optimizer.zero_grad()
317
+ scheduler.step()
318
+ info_dict["lr"] = optimizer.param_groups[0]['lr']
319
+ info_dict["grad_norm"] = grad_norm
320
+ return info_dict
321
+
322
+
323
+ def log_per_step(writer, info_dict):
324
+ tag = info_dict["tag"]
325
+ epoch = info_dict.get('epoch', 0)
326
+ step = info_dict["step"]
327
+ batch_idx = info_dict["batch_idx"]
328
+ loss_dict = info_dict['loss_dict']
329
+ rank = int(os.environ.get('RANK', 0))
330
+
331
+ # only rank 0 write to tensorboard to avoid multi-process write
332
+ if writer is not None:
333
+ if (info_dict['train_engine'] == 'deepspeed' and info_dict['is_gradient_accumulation_boundary'] is True) or \
334
+ (info_dict['train_engine'] == 'torch_ddp' and (info_dict['batch_idx'] + 1) % info_dict['accum_grad'] == 0):
335
+ for k in ['epoch', 'lr', 'grad_norm']:
336
+ writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
337
+ for k, v in loss_dict.items():
338
+ writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
339
+
340
+ # TRAIN & CV, Shell log (stdout)
341
+ if (info_dict['batch_idx'] + 1) % info_dict['log_interval'] == 0:
342
+ log_str = '{} Batch {}/{} '.format(tag, epoch, batch_idx + 1)
343
+ for name, value in loss_dict.items():
344
+ log_str += '{} {:.6f} '.format(name, value)
345
+ if tag == "TRAIN":
346
+ log_str += 'lr {:.8f} grad_norm {:.6f}'.format(
347
+ info_dict["lr"], info_dict['grad_norm'])
348
+ log_str += ' rank {}'.format(rank)
349
+ logging.debug(log_str)
350
+
351
+
352
+ def log_per_save(writer, info_dict):
353
+ tag = info_dict["tag"]
354
+ epoch = info_dict["epoch"]
355
+ step = info_dict["step"]
356
+ loss_dict = info_dict["loss_dict"]
357
+ lr = info_dict['lr']
358
+ rank = int(os.environ.get('RANK', 0))
359
+ logging.info(
360
+ 'Epoch {} Step {} CV info lr {} {} rank {}'.format(
361
+ epoch, step + 1, lr, rank, ' '.join(['{} {}'.format(k, v) for k, v in loss_dict.items()])))
362
+
363
+ if writer is not None:
364
+ for k in ['epoch', 'lr']:
365
+ writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
366
+ for k, v in loss_dict.items():
367
+ writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
speech/examples/magicdata-read/cosyvoice/conf ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../libritts/cosyvoice/conf
speech/examples/magicdata-read/cosyvoice/cosyvoice ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../../cosyvoice
speech/examples/magicdata-read/cosyvoice/local/prepare_data.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ from tqdm import tqdm
5
+
6
+
7
+ logger = logging.getLogger()
8
+
9
+
10
+ def main():
11
+ utt2wav, utt2text, utt2spk, spk2utt = {}, {}, {}, {}
12
+ with open(os.path.join(args.src_dir, "TRANS.txt"), "r") as f:
13
+ lines = f.readlines()[1:]
14
+ lines = [l.split('\t') for l in lines]
15
+ for wav, spk, content in tqdm(lines):
16
+ wav, spk, content = wav.strip(), spk.strip(), content.strip()
17
+ content = content.replace('[FIL]', '')
18
+ content = content.replace('[SPK]', '')
19
+ wav = os.path.join(args.src_dir, spk, wav)
20
+ if not os.path.exists(wav):
21
+ continue
22
+ utt = os.path.basename(wav).replace('.wav', '')
23
+ utt2wav[utt] = wav
24
+ utt2text[utt] = content
25
+ utt2spk[utt] = spk
26
+ if spk not in spk2utt:
27
+ spk2utt[spk] = []
28
+ spk2utt[spk].append(utt)
29
+
30
+ with open('{}/wav.scp'.format(args.des_dir), 'w') as f:
31
+ for k, v in utt2wav.items():
32
+ f.write('{} {}\n'.format(k, v))
33
+ with open('{}/text'.format(args.des_dir), 'w') as f:
34
+ for k, v in utt2text.items():
35
+ f.write('{} {}\n'.format(k, v))
36
+ with open('{}/utt2spk'.format(args.des_dir), 'w') as f:
37
+ for k, v in utt2spk.items():
38
+ f.write('{} {}\n'.format(k, v))
39
+ with open('{}/spk2utt'.format(args.des_dir), 'w') as f:
40
+ for k, v in spk2utt.items():
41
+ f.write('{} {}\n'.format(k, ' '.join(v)))
42
+ return
43
+
44
+
45
+ if __name__ == "__main__":
46
+ parser = argparse.ArgumentParser()
47
+ parser.add_argument('--src_dir',
48
+ type=str)
49
+ parser.add_argument('--des_dir',
50
+ type=str)
51
+ args = parser.parse_args()
52
+ main()