Huakang Chen commited on
Commit
1ec923d
·
1 Parent(s): 43dae2f

Add application file

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +519 -0
  2. requirements.txt +14 -0
  3. tools/__pycache__/wer.cpython-310.pyc +0 -0
  4. tools/wer.py +59 -0
  5. xcodec2/.gitattributes +35 -0
  6. xcodec2/.vscode/settings.json +5 -0
  7. xcodec2/README.md +69 -0
  8. xcodec2/__init__.py +0 -0
  9. xcodec2/__pycache__/__init__.cpython-310.pyc +0 -0
  10. xcodec2/__pycache__/configuration_bigcodec.cpython-310.pyc +0 -0
  11. xcodec2/__pycache__/configuration_bigcodec.cpython-38.pyc +0 -0
  12. xcodec2/__pycache__/modeling_xcodec2.cpython-310.pyc +0 -0
  13. xcodec2/__pycache__/modeling_xcodec2.cpython-38.pyc +0 -0
  14. xcodec2/config.json +11 -0
  15. xcodec2/configuration_bigcodec.py +19 -0
  16. xcodec2/modeling_xcodec2.py +164 -0
  17. xcodec2/module.py +0 -0
  18. xcodec2/vq/__init__.py +4 -0
  19. xcodec2/vq/__pycache__/__init__.cpython-310.pyc +0 -0
  20. xcodec2/vq/__pycache__/__init__.cpython-311.pyc +0 -0
  21. xcodec2/vq/__pycache__/__init__.cpython-312.pyc +0 -0
  22. xcodec2/vq/__pycache__/__init__.cpython-38.pyc +0 -0
  23. xcodec2/vq/__pycache__/__init__.cpython-39.pyc +0 -0
  24. xcodec2/vq/__pycache__/activations.cpython-310.pyc +0 -0
  25. xcodec2/vq/__pycache__/activations.cpython-311.pyc +0 -0
  26. xcodec2/vq/__pycache__/activations.cpython-312.pyc +0 -0
  27. xcodec2/vq/__pycache__/activations.cpython-38.pyc +0 -0
  28. xcodec2/vq/__pycache__/activations.cpython-39.pyc +0 -0
  29. xcodec2/vq/__pycache__/blocks.cpython-310.pyc +0 -0
  30. xcodec2/vq/__pycache__/blocks.cpython-38.pyc +0 -0
  31. xcodec2/vq/__pycache__/blocks.cpython-39.pyc +0 -0
  32. xcodec2/vq/__pycache__/bs_roformer5.cpython-310.pyc +0 -0
  33. xcodec2/vq/__pycache__/bs_roformer5.cpython-38.pyc +0 -0
  34. xcodec2/vq/__pycache__/bs_roformer5.cpython-39.pyc +0 -0
  35. xcodec2/vq/__pycache__/codec_decoder.cpython-310.pyc +0 -0
  36. xcodec2/vq/__pycache__/codec_decoder.cpython-311.pyc +0 -0
  37. xcodec2/vq/__pycache__/codec_decoder.cpython-312.pyc +0 -0
  38. xcodec2/vq/__pycache__/codec_decoder.cpython-38.pyc +0 -0
  39. xcodec2/vq/__pycache__/codec_decoder.cpython-39.pyc +0 -0
  40. xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-310.pyc +0 -0
  41. xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-311.pyc +0 -0
  42. xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-312.pyc +0 -0
  43. xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-38.pyc +0 -0
  44. xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-39.pyc +0 -0
  45. xcodec2/vq/__pycache__/codec_encoder.cpython-310.pyc +0 -0
  46. xcodec2/vq/__pycache__/codec_encoder.cpython-311.pyc +0 -0
  47. xcodec2/vq/__pycache__/codec_encoder.cpython-312.pyc +0 -0
  48. xcodec2/vq/__pycache__/codec_encoder.cpython-38.pyc +0 -0
  49. xcodec2/vq/__pycache__/codec_encoder.cpython-39.pyc +0 -0
  50. xcodec2/vq/__pycache__/factorized_vector_quantize.cpython-310.pyc +0 -0
app.py ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import traceback
3
+
4
+ import gradio as gr
5
+ import numpy as np
6
+ import pyrootutils
7
+ import torch
8
+ from loguru import logger
9
+ from transformers import AutoTokenizer
10
+ from vllm import LLM, SamplingParams, TokensPrompt
11
+ from funasr_onnx import Paraformer
12
+ from huggingface_hub import snapshot_download
13
+
14
+ from tools.wer import compute_wers
15
+
16
+ os.environ["EINX_FILTER_TRACEBACK"] = "false"
17
+ os.environ["VLLM_USE_V1"] = "0"
18
+
19
+ pyrootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
20
+ from i18n import i18n
21
+ from text.chn_text_norm.text import Text as ChnNormedText
22
+ from xcodec2.modeling_xcodec2 import XCodec2Model
23
+
24
+
25
+ TEXTBOX_PLACEHOLDER = i18n("Put your text here.")
26
+
27
+ # ===== Hugging Face Model IDs =====
28
+ LLASA_MODEL_ID = "ASLP-lab/VoiceSculptor"
29
+ LLASA_SUBFOLDER = "LLaSA-Instruct-3B"
30
+ XCODEC_MODEL_ID = "HKUSTAudio/xcodec2"
31
+ PARAFORMER_REPO_ID = "funasr/Paraformer-large"
32
+
33
+ # logo
34
+ LOGO_URL = "https://raw.githubusercontent.com/ASLP-lab/VoiceSculptor/main/assets/logo.png"
35
+
36
+
37
+ def normalize_text_final(user_input: str) -> str:
38
+ return ChnNormedText(raw_text=user_input).normalize()
39
+
40
+
41
+ def extract_speech_ids(speech_tokens_str):
42
+ speech_ids = []
43
+ for token_str in speech_tokens_str:
44
+ if token_str.startswith("<|s_") and token_str.endswith("|>"):
45
+ num_str = token_str[4:-2]
46
+ speech_ids.append(int(num_str))
47
+ else:
48
+ logger.warning(f"Unexpected token: {token_str}")
49
+ return speech_ids
50
+
51
+
52
+ def get_asr(asr_model: Paraformer, wav_list: list[np.ndarray]) -> list[str]:
53
+ """wav_list: list of 1D numpy waveform (16k)"""
54
+ try:
55
+ result = asr_model(wav_list)
56
+ if isinstance(result, dict):
57
+ result = [result]
58
+
59
+ texts = []
60
+ for res in result:
61
+ preds = res.get("preds", None)
62
+ if preds is None:
63
+ texts.append(res.get("text", ""))
64
+ else:
65
+ texts.append(preds[0] if len(preds) > 0 else "")
66
+
67
+ # 容错:batch 返回数量不一致 -> fallback
68
+ if len(texts) != len(wav_list):
69
+ logger.warning(f"[ASR] batch返回数量不一致: got {len(texts)} expect {len(wav_list)},fallback逐条补齐")
70
+ texts = []
71
+ for w in wav_list:
72
+ try:
73
+ r = asr_model(w)
74
+ if isinstance(r, list) and len(r) > 0:
75
+ r0 = r[0]
76
+ preds = r0.get("preds", None)
77
+ texts.append(preds[0] if preds else r0.get("text", ""))
78
+ elif isinstance(r, dict):
79
+ preds = r.get("preds", None)
80
+ texts.append(preds[0] if preds else r.get("text", ""))
81
+ else:
82
+ texts.append("")
83
+ except Exception:
84
+ texts.append("")
85
+ return texts
86
+
87
+ except Exception as e:
88
+ logger.warning(f"[ASR] batch失败,fallback逐条: {e}")
89
+ texts = []
90
+ for w in wav_list:
91
+ try:
92
+ r = asr_model(w)
93
+ if isinstance(r, list) and len(r) > 0:
94
+ r0 = r[0]
95
+ preds = r0.get("preds", None)
96
+ texts.append(preds[0] if preds else r0.get("text", ""))
97
+ elif isinstance(r, dict):
98
+ preds = r.get("preds", None)
99
+ texts.append(preds[0] if preds else r.get("text", ""))
100
+ else:
101
+ texts.append("")
102
+ except Exception:
103
+ texts.append("")
104
+ return texts
105
+
106
+
107
+ def inference_batch(
108
+ model: LLM,
109
+ codec_model: XCodec2Model,
110
+ device: str,
111
+ tokenizer: AutoTokenizer,
112
+ refined_text: str,
113
+ instruct_text: str,
114
+ control_tags: str,
115
+ batch_size: int = 5,
116
+ ) -> list[tuple[int, np.ndarray]]:
117
+ refined_text_norm = normalize_text_final(refined_text)
118
+ instruct_text_norm = normalize_text_final(instruct_text)
119
+
120
+ if len(refined_text_norm) < 5:
121
+ raise ValueError("输入文本长度不能少于5个字符")
122
+ if len(refined_text_norm) > 150:
123
+ raise ValueError("输入文本长度不能超过150个字符")
124
+
125
+ target_text = instruct_text_norm + "<|endofprompt|>" + control_tags + refined_text_norm
126
+ formatted_text = f"<|TEXT_UNDERSTANDING_START|>{target_text}<|TEXT_UNDERSTANDING_END|>"
127
+ chat = [
128
+ {"role": "user", "content": "Convert the text to speech:" + formatted_text},
129
+ {"role": "assistant", "content": "<|SPEECH_GENERATION_START|>"},
130
+ ]
131
+
132
+ with torch.no_grad():
133
+ input_ids = tokenizer.apply_chat_template(
134
+ chat,
135
+ tokenize=True,
136
+ return_tensors="pt",
137
+ continue_final_message=True,
138
+ ).to(device)
139
+
140
+ speech_end_id = tokenizer.convert_tokens_to_ids("<|SPEECH_GENERATION_END|>")
141
+ prompt_ids = input_ids.squeeze(0).tolist()
142
+ prompts = [TokensPrompt(prompt_token_ids=prompt_ids) for _ in range(batch_size)]
143
+
144
+ base_seed = int.from_bytes(os.urandom(4), "little")
145
+
146
+ try:
147
+ sampling_params_list = [
148
+ SamplingParams(
149
+ temperature=0.9,
150
+ top_p=0.95,
151
+ top_k=15,
152
+ max_tokens=2048,
153
+ repetition_penalty=1.05,
154
+ stop_token_ids=[speech_end_id],
155
+ seed=base_seed + i,
156
+ )
157
+ for i in range(batch_size)
158
+ ]
159
+ outputs = model.generate(prompts=prompts, sampling_params=sampling_params_list)
160
+ except TypeError:
161
+ logger.warning("[vLLM] 当前版本不支持 SamplingParams(seed=...),将不带 seed 生成")
162
+ sampling_params = SamplingParams(
163
+ temperature=0.9,
164
+ top_p=0.95,
165
+ top_k=15,
166
+ max_tokens=2048,
167
+ repetition_penalty=1.05,
168
+ stop_token_ids=[speech_end_id],
169
+ )
170
+ outputs = model.generate(prompts=prompts, sampling_params=sampling_params)
171
+
172
+ audios: list[tuple[int, np.ndarray]] = []
173
+ for out in outputs:
174
+ token_ids = out.outputs[0].token_ids
175
+ if len(token_ids) > 0 and token_ids[-1] == speech_end_id:
176
+ token_ids = token_ids[:-1]
177
+
178
+ speech_tokens = tokenizer.batch_decode(token_ids, skip_special_tokens=True)
179
+ speech_tokens = extract_speech_ids(speech_tokens)
180
+
181
+ speech_tokens_t = torch.tensor(speech_tokens, device=device).unsqueeze(0).unsqueeze(0)
182
+ wav = codec_model.decode_code(speech_tokens_t)
183
+ wav = wav.squeeze(0).squeeze(0).detach().cpu().numpy().astype(np.float32)
184
+ audios.append((16000, wav))
185
+
186
+ return audios
187
+
188
+
189
+ def build_app():
190
+ device = "cuda" if torch.cuda.is_available() else "cpu"
191
+ logger.info(f"✅ Loading models on device={device}")
192
+
193
+ # ===== LLaSA =====
194
+ tokenizer = AutoTokenizer.from_pretrained(LLASA_MODEL_ID, subfolder=LLASA_SUBFOLDER, trust_remote_code=True)
195
+
196
+ model = LLM(
197
+ model=LLASA_MODEL_ID,
198
+ gpu_memory_utilization=0.90,
199
+ max_model_len=2048,
200
+ enable_prefix_caching=True,
201
+ dtype="auto",
202
+ quantization=None,
203
+ enforce_eager=False,
204
+ kv_cache_dtype="auto",
205
+ trust_remote_code=True,
206
+ hf_model_subfolder=LLASA_SUBFOLDER,
207
+ )
208
+
209
+ # ===== XCodec2 =====
210
+ codec_model = XCodec2Model.from_pretrained(XCODEC_MODEL_ID).eval().to(device)
211
+
212
+ # ===== Paraformer =====
213
+ paraformer_dir = snapshot_download(
214
+ repo_id=PARAFORMER_REPO_ID,
215
+ local_dir="checkpoints/Paraformer-large",
216
+ local_dir_use_symlinks=False,
217
+ )
218
+ asr_model = Paraformer(paraformer_dir, batch_size=5, quantize=True)
219
+
220
+ logger.info("✅ Models loaded: VoiceSculptor + xcodec2 + Paraformer")
221
+
222
+ INSTRUCT_TEMPLATES = {
223
+ "自定义": "",
224
+ "default": "这是一位男性评书表演者,用传统说唱腔调,以变速节奏和韵律感极强的语速讲述江湖故事,音量时高时低,充满江湖气。",
225
+ "幼儿园女教师-温柔甜美": "这是一位幼儿园女教师,用甜美明亮的嗓音,以极慢且富有耐心的语速,带着温柔鼓励的情感,用标准普通话给小朋友讲睡前故事,音量轻柔适中,咬字格外清晰。",
226
+ "电台主播-平静温柔": "深夜电台主播,男性、音调偏低、语速偏慢、音量小;情绪平静带点忧伤,语气温柔;音色微哑",
227
+ "成熟御姐-冷静坚定": "成熟御姐风格,音调偏低、语速正常、音量中等;情绪冷静,语气不容置疑的坚定;音色偏磁性,吐字清晰",
228
+ "年轻妈妈-温暖安抚": "年轻妈妈哄孩子入睡,女性、音调柔和偏低、语速偏慢、音量偏小但清晰;情绪温暖安抚、充满耐心与爱意,语气轻柔哄劝、像贴近耳边低声说话;音色软糯,吐字清晰、节奏舒缓。",
229
+ "小女孩-尖锐清脆": "一位7岁的小女孩,用天真高亢的童声,以不稳定的快节奏,充满兴奋和炫耀地背诵乘法口诀,音调忽高忽低,带着儿童特有的尖锐清脆。",
230
+ "老奶奶-沙哑低沉": "一位慈祥的老奶奶,用沙哑低沉的嗓音,以极慢而温暖的语速讲述民间传说,音量微弱但清晰,带着怀旧和神秘的情感。",
231
+ "诗歌朗诵-雄浑有力": "一位男性现代诗朗诵者,用深沉磁性的低音,以顿挫有力的节奏演绎艾青诗歌,音量洪亮,情感激昂澎湃。",
232
+ "童话风格-甜美夸张": "这是一位女性童话旁白朗诵者,用甜美夸张的童声,以跳跃变化的语速讲述《安徒生童话》,音调偏高,充满奇幻色彩。",
233
+ "评书风格-抑扬顿挫": "这是一位男性评书表演者,用传统说唱腔调,以变速节奏和韵律感极强的语速讲述江湖故事,音量时高时低,充满江湖气。",
234
+ "新闻风格-平静专业": "这是一位女性新闻主播,用标准普通话以清晰明亮的中高音,以平稳专业的语速播报时事新闻,音量洪亮,情感客观中立。",
235
+ "相声风格-夸张幽默": "这是一位男性相声表演者,用夸张幽默的嗓音,以时快时慢的节奏抖包袱,音调起伏大,充满喜感和节奏感。",
236
+ "游戏直播-亢奋激昂": "这是一位男性游戏解说,用亢奋激昂的嗓音,以极快且情绪化的语速直播电竞比赛,音量突然爆发,充满悬念和热血。",
237
+ "悬疑小说-低沉神秘": "一位男性悬疑小说演播者,用低沉神秘的嗓音,以时快时慢的变速节奏营造紧张氛围,音量忽高忽低,充满悬念感。",
238
+ "戏剧表演-夸张戏剧": "这是一位男性戏剧表演者,用夸张戏剧化的嗓音,以忽高忽低的音调和时快时慢的语速表演独白,充满张力。",
239
+ "法治节目-庄严庄重": "这是一位男性法治节目主持人,用严肃庄重的嗓音,以平稳有力的语速讲述案件,音量适中,体现法律的威严。",
240
+ "纪录片旁白-低沉磁性": "这是一位男性纪录片旁白,用深沉磁性的嗓音,以缓慢而富有画面感的语速讲述自然奇观,音量适中,充满敬畏和诗意。",
241
+ "广告配音-沧桑浑厚": "这是一位男性白酒品牌广告配音,用沧桑浑厚的嗓音,以缓慢而豪迈的语速,音量洪亮,传递历史底蕴和男人情怀。",
242
+ "冥想引导师-空灵悠长": "一位女性冥想引导师,用空灵悠长的气声,以极慢而飘渺的语速,配合环境音效,音量轻柔,营造禅意空间。",
243
+ "ASMR-气声耳语": "一位女性ASMR主播,用气声耳语,以极慢而细腻的语速,配合唇舌音,音量极轻,营造极度放松的氛围。",
244
+ }
245
+
246
+ TEXT_REQUIREMENTS = {
247
+ "自定义": "",
248
+ "default": "话说那武松,提着哨棒,直奔景阳冈。天色将晚,酒劲上头,只听一阵狂风,老虎来啦!",
249
+ "幼儿园女教师-温柔甜美": "月亮婆婆升上天空啦,星星宝宝都困啦。小白兔躺在床上,盖好小被子,闭上眼睛。兔妈妈轻轻地唱着摇篮曲:睡吧睡吧,我亲爱的宝贝。",
250
+ "电台主播-平静温柔": "大家好,欢迎收听你的月亮我的心,好男人就是我,我就是:曾小贤。",
251
+ "成熟御姐-冷静坚定": "别担心,我不会让你输,把那些乱七八糟的念头先收起来,姐姐带你赢。",
252
+ "年轻妈妈-温暖安抚": "从前有座山,山里有座庙,庙里面有个小和尚,小和尚在给老和尚讲故事,他说:从前有座山,山里有座庙,庙里面有个小和尚。",
253
+ "小女孩-尖锐清脆": "一一得一!一二得二!一三得三!我会背乘法口诀啦!老师今天表扬我啦!妈妈说我最棒!",
254
+ "老奶奶-沙哑低沉": "很久很久以前,在山的那边,住着一只会说话的狐狸。它常常在月圆之夜,变成美丽的姑娘,来到村子里。",
255
+ "诗歌朗诵-雄浑有力": "为什么我的眼里常含泪水?因为我对这土地爱得深沉。这土地,这河流,这吹刮着的暴风。",
256
+ "童话风格-甜美夸张": "在一个很冷很冷的夜晚,小女孩擦亮了一根火柴。突然,温暖的火炉出现了!她觉得自己好像坐在火炉旁。",
257
+ "评书风格-抑扬顿挫": "话说那武松,提着哨棒,直奔景阳冈。天色将晚,酒劲上头,只听一阵狂风,老虎来啦!",
258
+ "新闻风格-平静专业": "本台讯,今日凌晨,我国成功发射新一代载人飞船试验船。此次任务验证了多项关键技术,为后续空间站建设奠定基础。",
259
+ "相声风格-夸张幽默": "我这个人啊,最大的优点就是太谦虚。谦虚到什么程度?连谦虚本身都觉得我太谦虚了!",
260
+ "游戏直播-亢奋激昂": "大招!大招好了!开团了!ACE!团灭!这波操作神了!冠军相尽显无疑!",
261
+ "悬疑小说-低沉神秘": "深夜,他独自走在空无一人的小巷。脚步声,回声,还有……另一个人的呼吸声。他猛地回头——什么也没有。",
262
+ "戏剧表演-夸张戏剧": "我疯了!彻底疯了!你们都说我疯了!可疯的是这个世界!清醒的人反而被当成疯子!",
263
+ "法治节目-庄严庄重": "天网恢恢,疏而不漏。任何触犯法律的行为,终将受到公正的审判。正义或许会迟到,但绝不会缺席。",
264
+ "纪录片旁白-低沉磁性": "在这片广袤的非洲草原上,生命与死亡每天都在上演。猎豹的速度,羚羊的敏捷,都是生存的代价。",
265
+ "广告配音-沧桑浑厚": "一杯敬过往,一杯敬远方。传承千年的酿造工艺,只在每一滴醇香。老朋友,值得好酒。",
266
+ "冥想引导师-空灵悠长": "想象你是一片叶子,随风飘落。没有牵挂,没有重量。只有呼吸,只有当下,只有宁静。",
267
+ "ASMR-气声耳语": "现在,让我在你耳边轻声细语。听到我的声音了吗?放松你的头皮,感受每一个毛孔都在呼吸。",
268
+ }
269
+
270
+ def build_control_tags(age, gender, pitch, pitch_var, volume, speed, emo):
271
+ tag_map = {
272
+ "小孩": "<|小孩|>", "青年": "<|青年|>", "中年": "<|中年|>", "老年": "<|老年|>",
273
+ "男性": "<|男性|>", "女性": "<|女性|>",
274
+ "音调很高": "<|音调很高|>", "音调较高": "<|音调较高|>", "音调中等": "<|音调中等|>",
275
+ "音调较低": "<|音调较低|>", "音调很低": "<|音调很低|>",
276
+ "音调变化很强": "<|音调变化很强|>", "音调变化较强": "<|音调变化较强|>", "音调变化一般": "<|音调变化一般|>",
277
+ "音调变化较弱": "<|音调变化较弱|>", "音调变化很弱": "<|音调变化很弱|>",
278
+ "音量很大": "<|音量很大|>", "音量较大": "<|音量较大|>", "音量中等": "<|音量中等|>",
279
+ "音量较小": "<|音量较小|>", "音量很小": "<|音量很小|>",
280
+ "语速很快": "<|语速很快|>", "语速较快": "<|语速较快|>", "语速中等": "<|语速中等|>",
281
+ "语速较慢": "<|语速较慢|>", "语速很慢": "<|语速很慢|>",
282
+ "开心": "<|开心|>", "生气": "<|生气|>", "难过": "<|难过|>", "惊讶": "<|惊讶|>", "厌恶": "<|厌恶|>", "害怕": "<|害怕|>",
283
+ }
284
+ tags = []
285
+ for v in [gender, age, speed, volume, pitch, pitch_var, emo]:
286
+ if v != "不指定":
287
+ tags.append(tag_map[v])
288
+ return "".join(tags)
289
+
290
+ def inference_select_best3(refined_text, instruct_text, age, gender, pitch, pitch_var, volume, speed, emo):
291
+ control_tags = build_control_tags(age, gender, pitch, pitch_var, volume, speed, emo)
292
+ try:
293
+ audios5 = inference_batch(
294
+ model=model,
295
+ codec_model=codec_model,
296
+ device=device,
297
+ tokenizer=tokenizer,
298
+ refined_text=refined_text,
299
+ instruct_text=instruct_text,
300
+ control_tags=control_tags,
301
+ batch_size=5,
302
+ )
303
+ wav_list = [wav for (_, wav) in audios5]
304
+ asr_texts = get_asr(asr_model, wav_list)
305
+
306
+ refined_text_norm = normalize_text_final(refined_text)
307
+ gt_texts = [refined_text_norm] * len(asr_texts)
308
+ wers = compute_wers(gt_texts, asr_texts, lang="zh")
309
+
310
+ for i, (hyp, w) in enumerate(zip(asr_texts, wers)):
311
+ logger.info(f"[ASR/WER] idx={i} wer={w:.4f} gt='{refined_text_norm}' asr='{hyp}'")
312
+
313
+ best_idx = np.argsort(np.array(wers))[:3].tolist()
314
+ logger.info(f"[ASR/WER] best_idx={best_idx} best_wers={[float(wers[i]) for i in best_idx]}")
315
+ best3 = [audios5[i] for i in best_idx]
316
+ return best3[0], best3[1], best3[2]
317
+ except Exception as e:
318
+ logger.error(f"推理/ASR/WER 失败: {e}", exc_info=True)
319
+ logger.error("错误详细信息:\n" + traceback.format_exc())
320
+ return None, None, None
321
+
322
+ THEME = gr.themes.Soft(
323
+ primary_hue="orange",
324
+ secondary_hue="cyan",
325
+ neutral_hue="slate",
326
+ )
327
+
328
+ CUSTOM_CSS = """
329
+ /* layout */
330
+ #vs-root {max-width: 1180px; margin: 0 auto;}
331
+ #vs-header {padding: 14px 14px 4px 14px;}
332
+ #vs-card {border-radius: 14px; padding: 14px; border: 1px solid rgba(0,0,0,0.08);}
333
+
334
+ /* ===== VoiceSculptor palette (from logo) ===== */
335
+ :root, .gradio-container {
336
+ --vs-orange: #FF6A00;
337
+ --vs-orange2:#FFB000;
338
+ --vs-teal: #00A6C6;
339
+ --vs-blue: #0B2E8A;
340
+ --vs-teal-a: rgba(0,166,198,.18);
341
+ }
342
+
343
+ /* primary button */
344
+ .gr-button-primary, button.primary {
345
+ background: linear-gradient(90deg, var(--vs-orange), var(--vs-orange2)) !important;
346
+ border: none !important;
347
+ color: white !important;
348
+ }
349
+ .gr-button-primary:hover, button.primary:hover {
350
+ filter: brightness(1.03);
351
+ }
352
+ .gr-button-primary:active, button.primary:active {
353
+ filter: brightness(0.98);
354
+ }
355
+
356
+ /* links */
357
+ .gradio-container a {
358
+ color: var(--vs-teal) !important;
359
+ }
360
+ .gradio-container a:hover {
361
+ text-decoration: underline;
362
+ }
363
+
364
+ /* focus ring / active border for inputs */
365
+ textarea:focus, input:focus {
366
+ border-color: var(--vs-teal) !important;
367
+ box-shadow: 0 0 0 3px var(--vs-teal-a) !important;
368
+ outline: none !important;
369
+ }
370
+ /* some gradio versions wrap inputs in these */
371
+ .gr-input:focus-within, .gr-text-input:focus-within, .gr-box:focus-within {
372
+ border-color: var(--vs-teal) !important;
373
+ box-shadow: 0 0 0 3px var(--vs-teal-a) !important;
374
+ }
375
+
376
+ /* accordion highlight */
377
+ .gr-accordion .label, .gr-accordion summary {
378
+ color: var(--vs-blue) !important;
379
+ }
380
+ """
381
+
382
+ DEFAULT_STYLE = "评书风格-抑扬顿挫"
383
+ template_choices = [k for k in INSTRUCT_TEMPLATES.keys() if k not in ("default",)]
384
+
385
+ BEST_PRACTICE_MD = """
386
+ ## Best Practice Guide(音色设计)
387
+
388
+ 完整指南请见:Voice Design README
389
+ https://github.com/ASLP-lab/VoiceSculptor/blob/main/docs/voice_design.md
390
+
391
+ ### 关键约束
392
+ - **voice_prompt ≤ 200 字**
393
+ - **当前仅支持中文**
394
+ - **待合成文本长度 ≥ 5 个字**
395
+
396
+ ### 写法建议
397
+ - **具体**:用可感知特质词(低沉/清脆/沙哑/明亮、语速快慢、音量大小等),避免“好听/不错”。
398
+ - **完整**:建议覆盖 **3–4 个维度**(人设/场景 + 性别/年龄 + 音调/语速 + 音质/情绪)。
399
+ - **客观**:描述声音特征与表达方式,避免“我喜欢/很棒”。
400
+ - **不做模仿**:禁止“像某明星/某演员”,只描述声音特质本身。
401
+ - **尽量精炼**:每个词都承载信息,避免重复强调(如“非常非常”)。
402
+
403
+ ### 参考模板
404
+ > - 这是一位男性评书表演者,用传统说唱腔调,以变速节奏和韵律感极强的语速讲述江湖故事,音量时高时低,充满江湖气。
405
+ > - 深夜电台主播,男性、音调偏低、语速偏慢、音量小;情绪平静带点忧伤,语气温柔;音色微哑。
406
+ > - 成熟御姐风格,音调偏低、语速正常、音量中等;情绪冷静,语气不容置疑的坚定;音色偏磁性,吐字清晰。
407
+
408
+
409
+ ### 细粒度控制提示
410
+ - 细粒度控制(年龄/性别/音调/语速/音量/情感等)**建议与指令描述保持一致**,尽量避免相互矛盾(如指令写“低沉慢速”,细粒度却选“音调很高/语速很快”)。
411
+ """
412
+
413
+ with gr.Blocks(theme=THEME, css=CUSTOM_CSS) as app:
414
+ with gr.Column(elem_id="vs-root"):
415
+ with gr.Row(elem_id="vs-header"):
416
+ gr.HTML(f"""
417
+ <div style="display:flex; align-items:center; gap:16px;">
418
+ <img src="{LOGO_URL}"
419
+ alt="Voice Sculptor Logo"
420
+ style="width:360px; max-height:130px; object-fit:contain; display:block;" />
421
+ <div>
422
+ <div style="font-size:32px; font-weight:700; line-height:1;">Voice Sculptor</div>
423
+ <div style="opacity:.85; margin-top:6px;">
424
+ {i18n('An instruct text-to-speech solution based on LLaSA and CosyVoice2 developed by the ASLP lab and collaborators.')}
425
+ </div>
426
+ </div>
427
+ </div>
428
+ """)
429
+
430
+ with gr.Row():
431
+ # Left: Controls + Guide
432
+ with gr.Column(scale=5, elem_id="vs-card"):
433
+ gr.Markdown("### 🪄 Voice Design(捏音色)")
434
+
435
+ with gr.Accordion("🎭 风格与文本", open=True):
436
+ instruct_template = gr.Dropdown(
437
+ choices=template_choices,
438
+ value=DEFAULT_STYLE,
439
+ label=i18n("指令风格(必选)"),
440
+ interactive=True,
441
+ )
442
+
443
+ instruct_text = gr.Textbox(
444
+ label=i18n("指令文本"),
445
+ placeholder=TEXTBOX_PLACEHOLDER,
446
+ lines=4,
447
+ value=INSTRUCT_TEMPLATES.get(DEFAULT_STYLE, INSTRUCT_TEMPLATES["default"]),
448
+ )
449
+
450
+ text = gr.Textbox(
451
+ label=i18n("待合成文本"),
452
+ placeholder=TEXTBOX_PLACEHOLDER,
453
+ lines=4,
454
+ value=TEXT_REQUIREMENTS.get(DEFAULT_STYLE, TEXT_REQUIREMENTS["default"]),
455
+ )
456
+
457
+ with gr.Accordion("🎛️ 细粒度声音控制(可选)", open=False):
458
+ with gr.Row():
459
+ age_ctrl = gr.Dropdown(label="年龄", choices=["不指定", "小孩", "青年", "中年", "老年"], value="不指定")
460
+ gender_ctrl = gr.Dropdown(label="性别", choices=["不指定", "男性", "女性"], value="不指定")
461
+
462
+ with gr.Row():
463
+ pitch_ctrl = gr.Dropdown(
464
+ label="音调高度",
465
+ choices=["不指定", "音调很高", "音调较高", "音调中等", "音调较低", "音调很低"],
466
+ value="不指定",
467
+ )
468
+ pitch_var_ctrl = gr.Dropdown(
469
+ label="音调变化",
470
+ choices=["不指定", "音调变化很强", "音调变化较强", "音调变化一般", "音调变化较弱", "音调变化很弱"],
471
+ value="不指定",
472
+ )
473
+
474
+ with gr.Row():
475
+ volume_ctrl = gr.Dropdown(
476
+ label="音量",
477
+ choices=["不指定", "音量很大", "音量较大", "音量中等", "音量较小", "音量很小"],
478
+ value="不指定",
479
+ )
480
+ speed_ctrl = gr.Dropdown(
481
+ label="语速",
482
+ choices=["不指定", "语速很快", "语速较快", "语速中等", "语速较慢", "语速很慢"],
483
+ value="不指定",
484
+ )
485
+
486
+ emo_ctrl = gr.Dropdown(
487
+ label="情感",
488
+ choices=["不指定", "开心", "生气", "难过", "惊讶", "厌恶", "害怕"],
489
+ value="不指定",
490
+ )
491
+
492
+ with gr.Accordion("📚 Best Practice Guide", open=False):
493
+ gr.Markdown(BEST_PRACTICE_MD)
494
+
495
+ def apply_template(tpl_name):
496
+ return INSTRUCT_TEMPLATES.get(tpl_name, ""), TEXT_REQUIREMENTS.get(tpl_name, "")
497
+
498
+ instruct_template.change(apply_template, inputs=[instruct_template], outputs=[instruct_text, text])
499
+
500
+ # Right: Results + Generate
501
+ with gr.Column(scale=5, elem_id="vs-card"):
502
+ gr.Markdown("### 🎵 Results")
503
+ generate = gr.Button("🎧 Generate", variant="primary")
504
+ audio_output1 = gr.Audio(label=i18n("Generated Audio 1"), type="numpy", interactive=False)
505
+ audio_output2 = gr.Audio(label=i18n("Generated Audio 2"), type="numpy", interactive=False)
506
+ audio_output3 = gr.Audio(label=i18n("Generated Audio 3"), type="numpy", interactive=False)
507
+
508
+ generate.click(
509
+ fn=inference_select_best3,
510
+ inputs=[text, instruct_text, age_ctrl, gender_ctrl, pitch_ctrl, pitch_var_ctrl, volume_ctrl, speed_ctrl, emo_ctrl],
511
+ outputs=[audio_output1, audio_output2, audio_output3],
512
+ )
513
+
514
+ return app
515
+
516
+
517
+ if __name__ == "__main__":
518
+ demo = build_app()
519
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers
4
+ vllm
5
+ funasr-onnx
6
+ huggingface_hub
7
+ jiwer
8
+ zhon
9
+ loguru
10
+ pyrootutils
11
+ jieba
12
+ torchtune
13
+ torchao
14
+ vector_quantize_pytorch
tools/__pycache__/wer.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
tools/wer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools/wer.py
2
+ from __future__ import annotations
3
+
4
+ from typing import List, Tuple
5
+ import string
6
+
7
+ from jiwer import process_words
8
+ from zhon.hanzi import punctuation as zh_punctuation
9
+
10
+ # 中文标点 + 英文标点 + '-'
11
+ _PUNCTUATION_ALL = zh_punctuation + string.punctuation + "-"
12
+
13
+
14
+ def _normalize_pair(gt: str, gen: str, lang: str) -> Tuple[str, str]:
15
+ gt = "" if gt is None else str(gt)
16
+ gen = "" if gen is None else str(gen)
17
+
18
+ # 去标点(保留 "'")
19
+ for x in _PUNCTUATION_ALL:
20
+ if x == "'":
21
+ continue
22
+ gt = gt.replace(x, "")
23
+ gen = gen.replace(x, "")
24
+
25
+ # 统一空格与连字符
26
+ gt = gt.replace(" ", " ").replace("-", " ")
27
+ gen = gen.replace(" ", " ").replace("-", " ")
28
+
29
+ if lang == "zh":
30
+ # 把“字”当作 token
31
+ gt = " ".join([ch for ch in gt])
32
+ gen = " ".join([ch for ch in gen])
33
+ elif lang == "en":
34
+ gt = gt.lower()
35
+ gen = gen.lower()
36
+ else:
37
+ raise NotImplementedError("lang must be 'zh' or 'en'")
38
+
39
+ return gt, gen
40
+
41
+
42
+ def compute_wers(gt_texts: List[str], gen_texts: List[str], lang: str = "zh") -> List[float]:
43
+ if len(gt_texts) != len(gen_texts):
44
+ raise ValueError(f"Length mismatch: {len(gt_texts)} != {len(gen_texts)}")
45
+
46
+ wers: List[float] = []
47
+ for gt_raw, gen_raw in zip(gt_texts, gen_texts):
48
+ gt_norm, gen_norm = _normalize_pair(gt_raw, gen_raw, lang=lang)
49
+ measures = process_words(reference=gt_norm, hypothesis=gen_norm)
50
+ wers.append(float(measures.wer))
51
+ return wers
52
+
53
+
54
+
55
+ if __name__ == "__main__":
56
+ gt = ["你好世界啊", "今天天气不对", "abc-def"]
57
+ gen = ["你好,世界!", "今天 天气 不错", "abc def"]
58
+ print(compute_wers(gt, gen, lang="zh"))
59
+ print(compute_wers(["Hello World"], ["hello, world!"], lang="en"))
xcodec2/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
xcodec2/.vscode/settings.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "python-envs.defaultEnvManager": "ms-python.python:conda",
3
+ "python-envs.defaultPackageManager": "ms-python.python:conda",
4
+ "python-envs.pythonProjects": []
5
+ }
xcodec2/README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ tags:
4
+ - audio-to-audio
5
+ pipeline_tag: audio-to-audio
6
+ ---
7
+
8
+ [![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)](https://arxiv.org/abs/2502.04128)
9
+ **Update (2025-02-13):** Add [Llasa finetune instruction](https://github.com/zhenye234/LLaSA_training/tree/main/finetune).
10
+
11
+ **Update (2025-02-07):** Our paper has been released!
12
+
13
+
14
+ ## Paper
15
+
16
+ LLaSA: Scaling Train Time and Inference Time Compute for LLaMA based Speech Synthesis
17
+
18
+ Codec Does Matter: Exploring the Semantic Shortcoming of Codec for Audio Language Model (AAAI 2025, xcodec 1.0)
19
+
20
+
21
+ # Getting Started with XCodec2 on Hugging Face
22
+ XCodec2 is a speech tokenizer that offers the following key features:
23
+
24
+ 1. **Single Vector Quantization**
25
+ 2. **50 Tokens per Second**
26
+ 3. **Multilingual Speech Semantic Support and High-Quality Speech Reconstruction**
27
+
28
+
29
+ To use `xcodec2`, ensure you have it installed. You can install it using the following command:
30
+
31
+ ```bash
32
+ conda create -n xcodec2 python=3.9
33
+ conda activate xcodec2
34
+ pip install xcodec2 (Use `xcodec2==0.1.5` for codec inference and llasa fine-tuning. I’ve removed unnecessary dependencies, and it works fine in my testing. However, I’m not sure if other problems may arise. If you prefer more stability, I recommend using `xcodec2==0.1.3` which accurately aligns during my codec training.)
35
+
36
+ ```
37
+ Then,
38
+ ```python
39
+ import torch
40
+ import soundfile as sf
41
+ from transformers import AutoConfig
42
+
43
+
44
+ from xcodec2.modeling_xcodec2 import XCodec2Model
45
+
46
+ model_path = "HKUSTAudio/xcodec2"
47
+
48
+ model = XCodec2Model.from_pretrained(model_path)
49
+ model.eval().cuda()
50
+
51
+
52
+ wav, sr = sf.read("test.wav")
53
+ wav_tensor = torch.from_numpy(wav).float().unsqueeze(0) # Shape: (1, T)
54
+
55
+
56
+ with torch.no_grad():
57
+ # Only 16khz speech
58
+ # Only supports single input. For batch inference, please refer to the link below.
59
+ vq_code = model.encode_code(input_waveform=wav_tensor)
60
+ print("Code:", vq_code )
61
+
62
+ recon_wav = model.decode_code(vq_code).cpu() # Shape: (1, 1, T')
63
+
64
+
65
+ sf.write("reconstructed.wav", recon_wav[0, 0, :].numpy(), sr)
66
+ print("Done! Check reconstructed.wav")
67
+ ```
68
+
69
+ # If you want to train your own xcodec2, batch inference, or large-scale code extraction, the code is released [here](https://github.com/zhenye234/X-Codec-2.0).
xcodec2/__init__.py ADDED
File without changes
xcodec2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (156 Bytes). View file
 
xcodec2/__pycache__/configuration_bigcodec.cpython-310.pyc ADDED
Binary file (791 Bytes). View file
 
xcodec2/__pycache__/configuration_bigcodec.cpython-38.pyc ADDED
Binary file (774 Bytes). View file
 
xcodec2/__pycache__/modeling_xcodec2.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
xcodec2/__pycache__/modeling_xcodec2.cpython-38.pyc ADDED
Binary file (4.11 kB). View file
 
xcodec2/config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "xcodec2",
3
+ "semantic_hidden_size": 1024,
4
+ "codec_encoder_hidden_size": 1024,
5
+ "codec_decoder_hidden_size": 1024,
6
+ "use_vocos": true,
7
+ "architectures": [
8
+ "XCodec2Model"
9
+ ]
10
+ }
11
+
xcodec2/configuration_bigcodec.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class BigCodecConfig(PretrainedConfig):
4
+ model_type = "bigcodec"
5
+
6
+ def __init__(
7
+ self,
8
+ # 下面这些只是示例超参
9
+ semantic_hidden_size=1024,
10
+ codec_encoder_hidden_size=1024,
11
+ codec_decoder_hidden_size=1024,
12
+ use_vocos=True,
13
+ **kwargs
14
+ ):
15
+ super().__init__(**kwargs)
16
+ self.semantic_hidden_size = semantic_hidden_size
17
+ self.codec_encoder_hidden_size = codec_encoder_hidden_size
18
+ self.codec_decoder_hidden_size = codec_decoder_hidden_size
19
+ self.use_vocos = use_vocos
xcodec2/modeling_xcodec2.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel
4
+ from xcodec2.configuration_bigcodec import BigCodecConfig
5
+
6
+ from xcodec2.vq.codec_encoder import CodecEncoder_Transformer
7
+ from xcodec2.vq.codec_decoder_vocos import CodecDecoderVocos
8
+ from xcodec2.vq.module import SemanticEncoder
9
+ from transformers import AutoFeatureExtractor, Wav2Vec2BertModel
10
+
11
+ class XCodec2Model(PreTrainedModel):
12
+ config_class = BigCodecConfig
13
+
14
+ def __init__(self, config: BigCodecConfig):
15
+ super().__init__(config)
16
+
17
+ # 1) 语义模型
18
+ self.semantic_model = Wav2Vec2BertModel.from_pretrained(
19
+ "facebook/w2v-bert-2.0",
20
+ output_hidden_states=True
21
+ )
22
+ self.semantic_model.eval()
23
+
24
+ self.SemanticEncoder_module = SemanticEncoder(
25
+ config.semantic_hidden_size,
26
+ config.semantic_hidden_size,
27
+ config.semantic_hidden_size
28
+ )
29
+
30
+ # 2) Codec Encoder
31
+ self.CodecEnc = CodecEncoder_Transformer()
32
+
33
+ # 3) Codec Decoder
34
+ self.generator = CodecDecoderVocos()
35
+
36
+ # 4) 两个全连接层
37
+ self.fc_prior = nn.Linear(2048, 2048)
38
+ self.fc_post_a = nn.Linear(2048, 1024)
39
+ feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/w2v-bert-2.0")
40
+ self.feature_extractor = feature_extractor
41
+
42
+ def forward(self, input_waveform, sample_rate=16000):
43
+ """
44
+ 这里的 forward 不一定要叫 forward,也可以拆成别的方法;
45
+ 但是如果想兼容 pipeline,需要在 forward 里给出核心逻辑。
46
+
47
+ 参数:
48
+ input_waveform: [batch_size, waveform_length]
49
+ sample_rate: 默认 16000
50
+ 返回:
51
+ 重构后的语音音频 (Tensor)
52
+ """
53
+ # 1) 特征提取
54
+ # 如果需要 padding,可以在这里做
55
+ input_features = self.feature_extractor(
56
+ input_waveform,
57
+ sampling_rate=sample_rate,
58
+ return_tensors="pt"
59
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
60
+
61
+ # 2) 语义层
62
+ semantic_output = self.semantic_model(input_features)
63
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
64
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
65
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
66
+
67
+ # 3) codec encoder
68
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
69
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
70
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
71
+
72
+ # 对齐语义向量的时间帧数,这里只做示例处理
73
+ # 真实做法里可能要先对齐维度
74
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
75
+ # 简单强行截断或补零都行,需要你自己决定
76
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
77
+ vq_emb = vq_emb[:, :, :min_len]
78
+ semantic_encoded = semantic_encoded[:, :, :min_len]
79
+
80
+ # 4) 拼接
81
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 1024 + 1024, frames]
82
+
83
+ # 5) fc_prior
84
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
85
+
86
+ # 6) decoder 的量化部分
87
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
88
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
89
+ vq_post_emb = vq_post_emb.transpose(1, 2)
90
+
91
+ # 7) fc_post_a
92
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2)
93
+
94
+ # 8) 最后解码成波形
95
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0]
96
+ # recon_audio: [batch, time]
97
+ return recon_audio
98
+
99
+ def encode_code(self, input_waveform, sample_rate=16000):
100
+ """
101
+ 将输入的音频编码为代码表示。
102
+
103
+ 参数:
104
+ input_waveform: [batch_size, waveform_length]
105
+ sample_rate: 默认 16000
106
+ 返回:
107
+ 编码后的代码 (Tensor)
108
+ """
109
+ with torch.no_grad():
110
+ # 1) 特征提取
111
+ input_features = self.feature_extractor(
112
+ input_waveform,
113
+ sampling_rate=sample_rate,
114
+ return_tensors="pt"
115
+ ).input_features.to(self.device) # [batch, frames, feat_dim]
116
+
117
+ # 2) 语义层
118
+ semantic_output = self.semantic_model(input_features)
119
+ semantic_hidden_16 = semantic_output.hidden_states[16] # 取第16层
120
+ semantic_hidden_16 = semantic_hidden_16.transpose(1, 2) # [batch, hidden_dim, frames]
121
+ semantic_encoded = self.SemanticEncoder_module(semantic_hidden_16)
122
+
123
+ # 3) codec encoder
124
+ wav = input_waveform.unsqueeze(1).to(self.device) # shape: [batch, 1, time]
125
+ vq_emb = self.CodecEnc(wav) # [batch, time//down, 1024] 只是示例
126
+ vq_emb = vq_emb.transpose(1, 2) # -> [batch, 1024, frames]
127
+
128
+ # 对齐语义向量的时间帧数,这里只做示例处理
129
+ if vq_emb.shape[-1] != semantic_encoded.shape[-1]:
130
+ min_len = min(vq_emb.shape[-1], semantic_encoded.shape[-1])
131
+ vq_emb = vq_emb[:, :, :min_len]
132
+ semantic_encoded = semantic_encoded[:, :, :min_len]
133
+
134
+ # 4) 拼接
135
+ concat_emb = torch.cat([semantic_encoded, vq_emb], dim=1) # [batch, 2048, frames]
136
+
137
+ # 5) fc_prior
138
+ concat_emb = self.fc_prior(concat_emb.transpose(1, 2)).transpose(1, 2)
139
+
140
+ # 6) decoder 的量化部分,获取code
141
+ _, vq_code, _ = self.generator(concat_emb, vq=True)
142
+ # vq_code: [batch, frames]
143
+ return vq_code
144
+
145
+ def decode_code(self, vq_code):
146
+ """
147
+ 将编码后的代码解码回音频。
148
+
149
+ 参数:
150
+ vq_code: 编码后的代码 (Tensor) [batch, frames]
151
+ 返回:
152
+ 解码后的音频 (Tensor) [batch, waveform_length]
153
+ """
154
+ with torch.no_grad():
155
+ # 获取量化后的嵌入
156
+ vq_post_emb = self.generator.quantizer.get_output_from_indices(vq_code.transpose(1, 2))
157
+ vq_post_emb = vq_post_emb.transpose(1, 2) # [batch, 1024, frames]
158
+
159
+ # 7) fc_post_a
160
+ vq_post_emb = self.fc_post_a(vq_post_emb.transpose(1, 2)).transpose(1, 2) # [batch, 1024, frames]
161
+
162
+ # 8) 最后解码成波形
163
+ recon_audio = self.generator(vq_post_emb.transpose(1, 2), vq=False)[0] # [batch, time]
164
+ return recon_audio
xcodec2/module.py ADDED
File without changes
xcodec2/vq/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from xcodec2.vq.codec_encoder import CodecEncoder
2
+ from xcodec2.vq.codec_decoder import CodecDecoder
3
+ from xcodec2.vq.codec_decoder_vocos import CodecDecoderVocos
4
+ from xcodec2.vq.codec_encoder import CodecEncoder_Transformer,CodecEncoder_only_Transformer
xcodec2/vq/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (440 Bytes). View file
 
xcodec2/vq/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (371 Bytes). View file
 
xcodec2/vq/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (318 Bytes). View file
 
xcodec2/vq/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (418 Bytes). View file
 
xcodec2/vq/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (383 Bytes). View file
 
xcodec2/vq/__pycache__/activations.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
xcodec2/vq/__pycache__/activations.cpython-311.pyc ADDED
Binary file (6.07 kB). View file
 
xcodec2/vq/__pycache__/activations.cpython-312.pyc ADDED
Binary file (5.65 kB). View file
 
xcodec2/vq/__pycache__/activations.cpython-38.pyc ADDED
Binary file (4.07 kB). View file
 
xcodec2/vq/__pycache__/activations.cpython-39.pyc ADDED
Binary file (4.04 kB). View file
 
xcodec2/vq/__pycache__/blocks.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
xcodec2/vq/__pycache__/blocks.cpython-38.pyc ADDED
Binary file (6.34 kB). View file
 
xcodec2/vq/__pycache__/blocks.cpython-39.pyc ADDED
Binary file (6.29 kB). View file
 
xcodec2/vq/__pycache__/bs_roformer5.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
xcodec2/vq/__pycache__/bs_roformer5.cpython-38.pyc ADDED
Binary file (3.91 kB). View file
 
xcodec2/vq/__pycache__/bs_roformer5.cpython-39.pyc ADDED
Binary file (3.86 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder.cpython-310.pyc ADDED
Binary file (9 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder.cpython-311.pyc ADDED
Binary file (8.78 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder.cpython-312.pyc ADDED
Binary file (7.76 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder.cpython-38.pyc ADDED
Binary file (9.26 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder.cpython-39.pyc ADDED
Binary file (9.22 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-311.pyc ADDED
Binary file (27.7 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-312.pyc ADDED
Binary file (25.2 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-38.pyc ADDED
Binary file (18.4 kB). View file
 
xcodec2/vq/__pycache__/codec_decoder_vocos.cpython-39.pyc ADDED
Binary file (18.4 kB). View file
 
xcodec2/vq/__pycache__/codec_encoder.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
xcodec2/vq/__pycache__/codec_encoder.cpython-311.pyc ADDED
Binary file (4.91 kB). View file
 
xcodec2/vq/__pycache__/codec_encoder.cpython-312.pyc ADDED
Binary file (4.39 kB). View file
 
xcodec2/vq/__pycache__/codec_encoder.cpython-38.pyc ADDED
Binary file (10.3 kB). View file
 
xcodec2/vq/__pycache__/codec_encoder.cpython-39.pyc ADDED
Binary file (10.2 kB). View file
 
xcodec2/vq/__pycache__/factorized_vector_quantize.cpython-310.pyc ADDED
Binary file (3.68 kB). View file