vasugo05 commited on
Commit
4029aa1
·
verified ·
1 Parent(s): fac6851

Delete webui.py

Browse files
Files changed (1) hide show
  1. webui.py +0 -392
webui.py DELETED
@@ -1,392 +0,0 @@
1
- import spaces
2
- import json
3
- import os
4
- import sys
5
- import threading
6
- import time
7
-
8
- import warnings
9
-
10
- import numpy as np
11
-
12
- warnings.filterwarnings("ignore", category=FutureWarning)
13
- warnings.filterwarnings("ignore", category=UserWarning)
14
-
15
- import pandas as pd
16
-
17
- current_dir = os.path.dirname(os.path.abspath(__file__))
18
- sys.path.append(current_dir)
19
- sys.path.append(os.path.join(current_dir, "indextts"))
20
-
21
- import argparse
22
- parser = argparse.ArgumentParser(
23
- description="IndexTTS WebUI",
24
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
25
- )
26
- parser.add_argument("--verbose", action="store_true", default=False, help="Enable verbose mode")
27
- parser.add_argument("--port", type=int, default=7860, help="Port to run the web UI on")
28
- parser.add_argument("--host", type=str, default="0.0.0.0", help="Host to run the web UI on")
29
- parser.add_argument("--model_dir", type=str, default="./checkpoints", help="Model checkpoints directory")
30
- parser.add_argument("--fp16", action="store_true", default=False, help="Use FP16 for inference if available")
31
- parser.add_argument("--deepspeed", action="store_true", default=False, help="Use DeepSpeed to accelerate if available")
32
- parser.add_argument("--cuda_kernel", action="store_true", default=False, help="Use CUDA kernel for inference if available")
33
- parser.add_argument("--gui_seg_tokens", type=int, default=120, help="GUI: Max tokens per generation segment")
34
- cmd_args = parser.parse_args()
35
-
36
- from tools.download_files import download_model_from_huggingface
37
- download_model_from_huggingface(os.path.join(current_dir,"checkpoints"),
38
- os.path.join(current_dir, "checkpoints","hf_cache"))
39
-
40
- import gradio as gr
41
- from indextts.infer_v2 import IndexTTS2
42
- from tools.i18n.i18n import I18nAuto
43
-
44
- i18n = I18nAuto(language="Auto")
45
- MODE = 'local'
46
- tts = IndexTTS2(model_dir=cmd_args.model_dir,
47
- cfg_path=os.path.join(cmd_args.model_dir, "config.yaml"),
48
- use_fp16=cmd_args.fp16,
49
- use_deepspeed=cmd_args.deepspeed,
50
- use_cuda_kernel=cmd_args.cuda_kernel,
51
- )
52
- # 支持的语言列表
53
- LANGUAGES = {
54
- "中文": "zh_CN",
55
- "English": "en_US"
56
- }
57
- EMO_CHOICES = [i18n("与音色参考音频相同"),
58
- i18n("使用情感参考音频"),
59
- i18n("使用情感向量控制"),
60
- i18n("使用情感描述文本控制")]
61
- EMO_CHOICES_BASE = EMO_CHOICES[:3] # 基础选项
62
- EMO_CHOICES_EXPERIMENTAL = EMO_CHOICES # 全部选项(包括文本描述)
63
-
64
- os.makedirs("outputs/tasks",exist_ok=True)
65
- os.makedirs("prompts",exist_ok=True)
66
-
67
- MAX_LENGTH_TO_USE_SPEED = 70
68
- with open("examples/cases.jsonl", "r", encoding="utf-8") as f:
69
- example_cases = []
70
- for line in f:
71
- line = line.strip()
72
- if not line:
73
- continue
74
- example = json.loads(line)
75
- if example.get("emo_audio",None):
76
- emo_audio_path = os.path.join("examples",example["emo_audio"])
77
- else:
78
- emo_audio_path = None
79
- example_cases.append([os.path.join("examples", example.get("prompt_audio", "sample_prompt.wav")),
80
- EMO_CHOICES[example.get("emo_mode",0)],
81
- example.get("text"),
82
- emo_audio_path,
83
- example.get("emo_weight",1.0),
84
- example.get("emo_text",""),
85
- example.get("emo_vec_1",0),
86
- example.get("emo_vec_2",0),
87
- example.get("emo_vec_3",0),
88
- example.get("emo_vec_4",0),
89
- example.get("emo_vec_5",0),
90
- example.get("emo_vec_6",0),
91
- example.get("emo_vec_7",0),
92
- example.get("emo_vec_8",0),
93
- example.get("emo_text") is not None]
94
- )
95
-
96
- def normalize_emo_vec(emo_vec):
97
- # emotion factors for better user experience
98
- k_vec = [0.75,0.70,0.80,0.80,0.75,0.75,0.55,0.45]
99
- tmp = np.array(k_vec) * np.array(emo_vec)
100
- if np.sum(tmp) > 0.8:
101
- tmp = tmp * 0.8/ np.sum(tmp)
102
- return tmp.tolist()
103
-
104
- @spaces.GPU
105
- def gen_single(emo_control_method,prompt, text,
106
- emo_ref_path, emo_weight,
107
- vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8,
108
- emo_text,emo_random,
109
- max_text_tokens_per_segment=120,
110
- *args, progress=gr.Progress()):
111
- output_path = None
112
- if not output_path:
113
- output_path = os.path.join("outputs", f"spk_{int(time.time())}.wav")
114
- # set gradio progress
115
- tts.gr_progress = progress
116
- do_sample, top_p, top_k, temperature, \
117
- length_penalty, num_beams, repetition_penalty, max_mel_tokens = args
118
- kwargs = {
119
- "do_sample": bool(do_sample),
120
- "top_p": float(top_p),
121
- "top_k": int(top_k) if int(top_k) > 0 else None,
122
- "temperature": float(temperature),
123
- "length_penalty": float(length_penalty),
124
- "num_beams": num_beams,
125
- "repetition_penalty": float(repetition_penalty),
126
- "max_mel_tokens": int(max_mel_tokens),
127
- # "typical_sampling": bool(typical_sampling),
128
- # "typical_mass": float(typical_mass),
129
- }
130
- if type(emo_control_method) is not int:
131
- emo_control_method = emo_control_method.value
132
- if emo_control_method == 0: # emotion from speaker
133
- emo_ref_path = None # remove external reference audio
134
- if emo_control_method == 1: # emotion from reference audio
135
- # normalize emo_alpha for better user experience
136
- emo_weight = emo_weight * 0.8
137
- pass
138
- if emo_control_method == 2: # emotion from custom vectors
139
- vec = [vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8]
140
- vec = normalize_emo_vec(vec)
141
- else:
142
- # don't use the emotion vector inputs for the other modes
143
- vec = None
144
-
145
- if emo_text == "":
146
- # erase empty emotion descriptions; `infer()` will then automatically use the main prompt
147
- emo_text = None
148
-
149
- print(f"Emo control mode:{emo_control_method},weight:{emo_weight},vec:{vec}")
150
- output = tts.infer(spk_audio_prompt=prompt, text=text,
151
- output_path=output_path,
152
- emo_audio_prompt=emo_ref_path, emo_alpha=emo_weight,
153
- emo_vector=vec,
154
- use_emo_text=(emo_control_method==3), emo_text=emo_text,use_random=emo_random,
155
- verbose=cmd_args.verbose,
156
- max_text_tokens_per_segment=int(max_text_tokens_per_segment),
157
- **kwargs)
158
- return gr.update(value=output,visible=True)
159
-
160
- def update_prompt_audio():
161
- update_button = gr.update(interactive=True)
162
- return update_button
163
-
164
- with gr.Blocks(title="IndexTTS Demo") as demo:
165
- mutex = threading.Lock()
166
- gr.HTML('''
167
- <h2><center>IndexTTS2: A Breakthrough in Emotionally Expressive and Duration-Controlled Auto-Regressive Zero-Shot Text-to-Speech</h2>
168
- <p align="center">
169
- <a href='https://arxiv.org/abs/2506.21619'><img src='https://img.shields.io/badge/ArXiv-2506.21619-red'></a>
170
- </p>
171
- ''')
172
-
173
- with gr.Tab(i18n("音频生成")):
174
- with gr.Row():
175
- os.makedirs("prompts",exist_ok=True)
176
- prompt_audio = gr.Audio(label=i18n("音色参考音频"),key="prompt_audio",
177
- sources=["upload","microphone"],type="filepath")
178
- prompt_list = os.listdir("prompts")
179
- default = ''
180
- if prompt_list:
181
- default = prompt_list[0]
182
- with gr.Column():
183
- input_text_single = gr.TextArea(label=i18n("文本"),key="input_text_single", placeholder=i18n("请输入目标文本"), info=f"{i18n('当前模型版本')}{tts.model_version or '1.0'}")
184
- gen_button = gr.Button(i18n("生成语音"), key="gen_button",interactive=True)
185
- output_audio = gr.Audio(label=i18n("生成结果"), visible=True,key="output_audio")
186
- experimental_checkbox = gr.Checkbox(label=i18n("显示实验功能"),value=False)
187
- with gr.Accordion(i18n("功能设置")):
188
- # 情感控制选项部分
189
- with gr.Row():
190
- emo_control_method = gr.Radio(
191
- choices=EMO_CHOICES_BASE,
192
- type="index",
193
- value=EMO_CHOICES_BASE[0],label=i18n("情感控制方式"))
194
- # 情感参考音频部分
195
- with gr.Group(visible=False) as emotion_reference_group:
196
- with gr.Row():
197
- emo_upload = gr.Audio(label=i18n("上传情感参考音频"), type="filepath")
198
-
199
- # 情感随机采样
200
- with gr.Row(visible=False) as emotion_randomize_group:
201
- emo_random = gr.Checkbox(label=i18n("情感随机采样"), value=False)
202
-
203
- # 情感向量控制部分
204
- with gr.Group(visible=False) as emotion_vector_group:
205
- with gr.Row():
206
- with gr.Column():
207
- vec1 = gr.Slider(label=i18n("喜"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
208
- vec2 = gr.Slider(label=i18n("怒"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
209
- vec3 = gr.Slider(label=i18n("哀"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
210
- vec4 = gr.Slider(label=i18n("惧"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
211
- with gr.Column():
212
- vec5 = gr.Slider(label=i18n("厌恶"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
213
- vec6 = gr.Slider(label=i18n("低落"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
214
- vec7 = gr.Slider(label=i18n("惊喜"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
215
- vec8 = gr.Slider(label=i18n("平静"), minimum=0.0, maximum=1.0, value=0.0, step=0.05)
216
-
217
- with gr.Group(visible=False) as emo_text_group:
218
- with gr.Row():
219
- emo_text = gr.Textbox(label=i18n("情感描述文本"),
220
- placeholder=i18n("请输入情绪描述(或留空以自动使用目标文本作为情绪描述)"),
221
- value="",
222
- info=i18n("例如:委屈巴巴、危险在悄悄逼近"))
223
-
224
-
225
- with gr.Row(visible=False) as emo_weight_group:
226
- emo_weight = gr.Slider(label=i18n("情感权重"), minimum=0.0, maximum=1.0, value=0.8, step=0.01)
227
-
228
- with gr.Accordion(i18n("高级生成参数设置"), open=False,visible=False) as advanced_settings_group:
229
- with gr.Row():
230
- with gr.Column(scale=1):
231
- gr.Markdown(f"**{i18n('GPT2 采样设置')}** _{i18n('参数会影响音频多样性和生成速度详见')} [Generation strategies](https://huggingface.co/docs/transformers/main/en/generation_strategies)._")
232
- with gr.Row():
233
- do_sample = gr.Checkbox(label="do_sample", value=True, info=i18n("是否进行采样"))
234
- temperature = gr.Slider(label="temperature", minimum=0.1, maximum=2.0, value=0.8, step=0.1)
235
- with gr.Row():
236
- top_p = gr.Slider(label="top_p", minimum=0.0, maximum=1.0, value=0.8, step=0.01)
237
- top_k = gr.Slider(label="top_k", minimum=0, maximum=100, value=30, step=1)
238
- num_beams = gr.Slider(label="num_beams", value=3, minimum=1, maximum=10, step=1)
239
- with gr.Row():
240
- repetition_penalty = gr.Number(label="repetition_penalty", precision=None, value=10.0, minimum=0.1, maximum=20.0, step=0.1)
241
- length_penalty = gr.Number(label="length_penalty", precision=None, value=0.0, minimum=-2.0, maximum=2.0, step=0.1)
242
- max_mel_tokens = gr.Slider(label="max_mel_tokens", value=1500, minimum=50, maximum=tts.cfg.gpt.max_mel_tokens, step=10, info=i18n("生成Token最大数量,过小导致音频被截断"), key="max_mel_tokens")
243
- # with gr.Row():
244
- # typical_sampling = gr.Checkbox(label="typical_sampling", value=False, info="不建议使用")
245
- # typical_mass = gr.Slider(label="typical_mass", value=0.9, minimum=0.0, maximum=1.0, step=0.1)
246
- with gr.Column(scale=2):
247
- gr.Markdown(f'**{i18n("分句设置")}** _{i18n("参数会影响音频质量和生成速度")}_')
248
- with gr.Row():
249
- initial_value = max(20, min(tts.cfg.gpt.max_text_tokens, cmd_args.gui_seg_tokens))
250
- max_text_tokens_per_segment = gr.Slider(
251
- label=i18n("分句最大Token数"), value=initial_value, minimum=20, maximum=tts.cfg.gpt.max_text_tokens, step=2, key="max_text_tokens_per_segment",
252
- info=i18n("建议80~200之间,值越大,分句越长;值越小,分句越碎;过小过大都可能导致音频质量不高"),
253
- )
254
- with gr.Accordion(i18n("预览分句结果"), open=True) as segments_settings:
255
- segments_preview = gr.Dataframe(
256
- headers=[i18n("序号"), i18n("分句内容"), i18n("Token数")],
257
- key="segments_preview",
258
- wrap=True,
259
- )
260
- advanced_params = [
261
- do_sample, top_p, top_k, temperature,
262
- length_penalty, num_beams, repetition_penalty, max_mel_tokens,
263
- # typical_sampling, typical_mass,
264
- ]
265
-
266
- if len(example_cases) > 2:
267
- example_table = gr.Examples(
268
- examples=example_cases[:-2],
269
- examples_per_page=20,
270
- inputs=[prompt_audio,
271
- emo_control_method,
272
- input_text_single,
273
- emo_upload,
274
- emo_weight,
275
- emo_text,
276
- vec1,vec2,vec3,vec4,vec5,vec6,vec7,vec8,experimental_checkbox]
277
- )
278
- elif len(example_cases) > 0:
279
- example_table = gr.Examples(
280
- examples=example_cases,
281
- examples_per_page=20,
282
- inputs=[prompt_audio,
283
- emo_control_method,
284
- input_text_single,
285
- emo_upload,
286
- emo_weight,
287
- emo_text,
288
- vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, experimental_checkbox]
289
- )
290
-
291
- def on_input_text_change(text, max_text_tokens_per_segment):
292
- if text and len(text) > 0:
293
- text_tokens_list = tts.tokenizer.tokenize(text)
294
-
295
- segments = tts.tokenizer.split_segments(text_tokens_list, max_text_tokens_per_segment=int(max_text_tokens_per_segment))
296
- data = []
297
- for i, s in enumerate(segments):
298
- segment_str = ''.join(s)
299
- tokens_count = len(s)
300
- data.append([i, segment_str, tokens_count])
301
- return {
302
- segments_preview: gr.update(value=data, visible=True, type="array"),
303
- }
304
- else:
305
- df = pd.DataFrame([], columns=[i18n("序号"), i18n("分句内容"), i18n("Token数")])
306
- return {
307
- segments_preview: gr.update(value=df),
308
- }
309
-
310
- def on_method_select(emo_control_method):
311
- if emo_control_method == 1: # emotion reference audio
312
- return (gr.update(visible=True),
313
- gr.update(visible=False),
314
- gr.update(visible=False),
315
- gr.update(visible=False),
316
- gr.update(visible=True)
317
- )
318
- elif emo_control_method == 2: # emotion vectors
319
- return (gr.update(visible=False),
320
- gr.update(visible=True),
321
- gr.update(visible=True),
322
- gr.update(visible=False),
323
- gr.update(visible=False)
324
- )
325
- elif emo_control_method == 3: # emotion text description
326
- return (gr.update(visible=False),
327
- gr.update(visible=True),
328
- gr.update(visible=False),
329
- gr.update(visible=True),
330
- gr.update(visible=True)
331
- )
332
- else: # 0: same as speaker voice
333
- return (gr.update(visible=False),
334
- gr.update(visible=False),
335
- gr.update(visible=False),
336
- gr.update(visible=False),
337
- gr.update(visible=False)
338
- )
339
-
340
- def on_experimental_change(is_exp):
341
- # 切换情感控制选项
342
- # 第三个返回值实际没有起作用
343
- if is_exp:
344
- return gr.update(choices=EMO_CHOICES_EXPERIMENTAL, value=EMO_CHOICES_EXPERIMENTAL[0]), gr.update(visible=True),gr.update(value=example_cases)
345
- else:
346
- return gr.update(choices=EMO_CHOICES_BASE, value=EMO_CHOICES_BASE[0]), gr.update(visible=False),gr.update(value=example_cases[:-2])
347
-
348
- emo_control_method.select(on_method_select,
349
- inputs=[emo_control_method],
350
- outputs=[emotion_reference_group,
351
- emotion_randomize_group,
352
- emotion_vector_group,
353
- emo_text_group,
354
- emo_weight_group]
355
- )
356
-
357
- input_text_single.change(
358
- on_input_text_change,
359
- inputs=[input_text_single, max_text_tokens_per_segment],
360
- outputs=[segments_preview]
361
- )
362
-
363
- experimental_checkbox.change(
364
- on_experimental_change,
365
- inputs=[experimental_checkbox],
366
- outputs=[emo_control_method, advanced_settings_group,example_table.dataset] # 高级参数Accordion
367
- )
368
-
369
- max_text_tokens_per_segment.change(
370
- on_input_text_change,
371
- inputs=[input_text_single, max_text_tokens_per_segment],
372
- outputs=[segments_preview]
373
- )
374
-
375
- prompt_audio.upload(update_prompt_audio,
376
- inputs=[],
377
- outputs=[gen_button])
378
-
379
- gen_button.click(gen_single,
380
- inputs=[emo_control_method,prompt_audio, input_text_single, emo_upload, emo_weight,
381
- vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8,
382
- emo_text,emo_random,
383
- max_text_tokens_per_segment,
384
- *advanced_params,
385
- ],
386
- outputs=[output_audio])
387
-
388
-
389
-
390
- if __name__ == "__main__":
391
- demo.queue(20)
392
- demo.launch(server_name=cmd_args.host, server_port=cmd_args.port)