Approximetal commited on
Commit
4fee97c
·
verified ·
1 Parent(s): fe9a2a5

Delete inference_gradio.py

Browse files
Files changed (1) hide show
  1. inference_gradio.py +0 -1225
inference_gradio.py DELETED
@@ -1,1225 +0,0 @@
1
- import os, gc
2
- import re, time
3
- import logging
4
- from num2words import num2words
5
- import gradio as gr
6
- import torch, torchaudio
7
- import numpy as np
8
- import random
9
- from scipy.io import wavfile
10
- import onnx
11
- import onnxruntime as ort
12
- import copy
13
- import uroman as ur
14
- import jieba, zhconv
15
- from pypinyin.core import Pinyin
16
- from pypinyin import Style
17
-
18
- from lemas_tts.api import TTS, PRETRAINED_ROOT, CKPTS_ROOT
19
- from lemas_tts.infer.edit_multilingual import gen_wav_multilingual
20
- from lemas_tts.infer.text_norm.txt2pinyin import (
21
- MyConverter,
22
- _PAUSE_SYMBOL,
23
- change_tone_in_bu_or_yi,
24
- get_phoneme_from_char_and_pinyin,
25
- )
26
- from lemas_tts.infer.text_norm.cn_tn import NSWNormalizer
27
- # import io
28
- # import uuid
29
- _JIEBA_DICT = os.path.join(
30
- os.path.dirname(__file__),
31
- "lemas_tts",
32
- "infer",
33
- "text_norm",
34
- "jieba_dict.txt",
35
- )
36
- if os.path.isfile(_JIEBA_DICT):
37
- jieba.set_dictionary(_JIEBA_DICT)
38
-
39
- # from inference_tts_scale import inference_one_sample as inference_tts
40
- import langid
41
- langid.set_languages(['es','pt','zh','en','de','fr','it', 'ru', 'id', 'vi'])
42
-
43
-
44
- os.environ['CURL_CA_BUNDLE'] = ''
45
- DEMO_PATH = os.getenv("DEMO_PATH", "./pretrained_models/demo")
46
- TMP_PATH = os.getenv("TMP_PATH", "./pretrained_models/demo/temp")
47
- MODELS_PATH = os.getenv("MODELS_PATH", "./pretrained_models")
48
-
49
- device = "cuda" if torch.cuda.is_available() else "cpu"
50
- ASR_DEVICE = "cpu" # force whisperx/pyannote to CPU to avoid cuDNN issues
51
- whisper_model, align_model = None, None
52
- tts_edit_model = None
53
-
54
- _whitespace_re = re.compile(r"\s+")
55
- alpha_pattern = re.compile(r"[a-zA-Z]")
56
-
57
- formatter = ("%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d || %(message)s")
58
- logging.basicConfig(format=formatter, level=logging.INFO)
59
-
60
- # def get_random_string():
61
- # return "".join(str(uuid.uuid4()).split("-"))
62
-
63
- def seed_everything(seed):
64
- if seed != -1:
65
- os.environ['PYTHONHASHSEED'] = str(seed)
66
- random.seed(seed)
67
- np.random.seed(seed)
68
- torch.manual_seed(seed)
69
- torch.cuda.manual_seed(seed)
70
- torch.backends.cudnn.benchmark = False
71
- torch.backends.cudnn.deterministic = True
72
-
73
-
74
- class UVR5:
75
- """Small wrapper around the bundled uvr5 implementation for denoising."""
76
-
77
- def __init__(self, model_dir):
78
- # Code directory is always the local `uvr5` folder in this repo
79
- code_dir = os.path.join(os.path.dirname(__file__), "uvr5")
80
- self.model = self.load_model(model_dir, code_dir)
81
-
82
- def load_model(self, model_dir, code_dir):
83
- import sys, json, os
84
- if code_dir not in sys.path:
85
- sys.path.append(code_dir)
86
- from multiprocess_cuda_infer import ModelData, Inference
87
- # In the minimal LEMAS-TTS layout, UVR5 weights live under:
88
- # <pretrained_models>/uvr5/models/MDX_Net_Models/model_data/
89
- # Here `model_dir` points to that `model_data` directory.
90
- model_path = os.path.join(model_dir, "Kim_Vocal_1.onnx")
91
- config_path = os.path.join(model_dir, "MDX-Net-Kim-Vocal1.json")
92
- with open(config_path, "r", encoding="utf-8") as f:
93
- configs = json.load(f)
94
- model_data = ModelData(
95
- model_path=model_path,
96
- audio_path = model_dir,
97
- result_path = model_dir,
98
- device = 'cpu',
99
- process_method = "MDX-Net",
100
- base_dir=code_dir,
101
- **configs
102
- )
103
-
104
- uvr5_model = Inference(model_data, 'cpu')
105
- uvr5_model.load_model(model_path, 1)
106
- return uvr5_model
107
-
108
- def denoise(self, audio_info):
109
- input_audio = load_wav(audio_info, sr=44100, channel=2)
110
- output_audio = self.model.demix_base({0:input_audio.squeeze()}, is_match_mix=False)
111
- # transform = torchaudio.transforms.Resample(44100, 16000)
112
- # output_audio = transform(output_audio)
113
- return output_audio.squeeze().T.numpy(), 44100
114
-
115
-
116
- class DeepFilterNet:
117
- def __init__(self, model_path):
118
- self.hop_size = 480
119
- self.fft_size = 960
120
- self.model = self.load_model(model_path)
121
-
122
-
123
- def load_model(self, model_path, threads=1):
124
- sess_options = ort.SessionOptions()
125
- sess_options.intra_op_num_threads = threads
126
- sess_options.graph_optimization_level = (ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
127
- sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
128
-
129
- model = onnx.load_model(model_path)
130
- ort_session = ort.InferenceSession(
131
- model.SerializeToString(),
132
- sess_options,
133
- providers=["CPUExecutionProvider"], # ["CUDAExecutionProvider"], #
134
- )
135
-
136
- input_names = ["input_frame", "states", "atten_lim_db"]
137
- output_names = ["enhanced_audio_frame", "new_states", "lsnr"]
138
- return ort_session
139
-
140
-
141
- def denoise(self, audio_info):
142
- wav = load_wav(audio_info, 48000)
143
- orig_len = wav.shape[-1]
144
- hop_size_divisible_padding_size = (self.hop_size - orig_len % self.hop_size) % self.hop_size
145
- orig_len += hop_size_divisible_padding_size
146
- wav = torch.nn.functional.pad(
147
- wav, (0, self.fft_size + hop_size_divisible_padding_size)
148
- )
149
- chunked_audio = torch.split(wav, self.hop_size)
150
- # chunked_audio = torch.split(wav, int(wav.shape[-1]/2))
151
-
152
- state = np.zeros(45304,dtype=np.float32)
153
- atten_lim_db = np.zeros(1,dtype=np.float32)
154
- enhanced = []
155
- for frame in chunked_audio:
156
- out = self.model.run(None,input_feed={"input_frame":frame.numpy(),"states":state,"atten_lim_db":atten_lim_db})
157
- enhanced.append(torch.tensor(out[0]))
158
- state = out[1]
159
-
160
- enhanced_audio = torch.cat(enhanced).unsqueeze(0) # [t] -> [1, t] typical mono format
161
-
162
- d = self.fft_size - self.hop_size
163
- enhanced_audio = enhanced_audio[:, d: orig_len + d]
164
-
165
- return enhanced_audio.squeeze().numpy(), 48000
166
-
167
-
168
- class TextNorm():
169
- def __init__(self):
170
- my_pinyin = Pinyin(MyConverter())
171
- self.pinyin_parser = my_pinyin.pinyin
172
-
173
- def sil_type(self, time_s):
174
- if round(time_s) < 0.4:
175
- return ""
176
- elif round(time_s) >= 0.4 and round(time_s) < 0.8:
177
- return "#1"
178
- elif round(time_s) >= 0.8 and round(time_s) < 1.5:
179
- return "#2"
180
- elif round(time_s) >= 1.5 and round(time_s) < 3.0:
181
- return "#3"
182
- elif round(time_s) >= 3.0:
183
- return "#4"
184
-
185
-
186
- def add_sil_raw(self, sub_list, start_time, end_time, target_transcript):
187
- txt = []
188
- txt_list = [x["word"] for x in sub_list]
189
- sil = self.sil_type(sub_list[0]["start"])
190
- if len(sil) > 0:
191
- txt.append(sil)
192
- txt.append(txt_list[0])
193
- for i in range(1, len(sub_list)):
194
- if sub_list[i]["start"] >= start_time and sub_list[i]["end"] <= end_time:
195
- txt.append(target_transcript)
196
- target_transcript = ""
197
- else:
198
- sil = self.sil_type(sub_list[i]["start"] - sub_list[i-1]["end"])
199
- if len(sil) > 0:
200
- txt.append(sil)
201
- txt.append(txt_list[i])
202
- return ' '.join(txt)
203
-
204
- def add_sil(self, sub_list, start_time, end_time, target_transcript, src_lang, tar_lang):
205
- txts = []
206
- txt_list = [x["word"] for x in sub_list]
207
- sil = self.sil_type(sub_list[0]["start"])
208
- if len(sil) > 0:
209
- txts.append([src_lang, sil])
210
-
211
- if sub_list[0]["start"] < start_time:
212
- txts.append([src_lang, txt_list[0]])
213
- for i in range(1, len(sub_list)):
214
- if sub_list[i]["start"] >= start_time and sub_list[i]["end"] <= end_time:
215
- txts.append([tar_lang, target_transcript])
216
- target_transcript = ""
217
- else:
218
- sil = self.sil_type(sub_list[i]["start"] - sub_list[i-1]["end"])
219
- if len(sil) > 0:
220
- txts.append([src_lang, sil])
221
- txts.append([src_lang, txt_list[i]])
222
-
223
- target_txt = [txts[0]]
224
- for txt in txts[1:]:
225
- if txt[1] == "":
226
- continue
227
- if txt[0] != target_txt[-1][0]:
228
- target_txt.append([txt[0], ""])
229
- target_txt[-1][-1] += " " + txt[1]
230
-
231
- return target_txt
232
-
233
-
234
- def get_prompt(self, sub_list, start_time, end_time, src_lang):
235
- txts = []
236
- txt_list = [x["word"] for x in sub_list]
237
-
238
- if start_time <= sub_list[0]["start"]:
239
- sil = self.sil_type(sub_list[0]["start"])
240
- if len(sil) > 0:
241
- txts.append([src_lang, sil])
242
- txts.append([src_lang, txt_list[0]])
243
-
244
- for i in range(1, len(sub_list)):
245
- # if sub_list[i]["start"] <= start_time and sub_list[i]["end"] <= end_time:
246
- # txts.append([tar_lang, target_transcript])
247
- # target_transcript = ""
248
- if sub_list[i]["start"] >= start_time and sub_list[i]["end"] <= end_time:
249
- sil = self.sil_type(sub_list[i]["start"] - sub_list[i-1]["end"])
250
- if len(sil) > 0:
251
- txts.append([src_lang, sil])
252
- txts.append([src_lang, txt_list[i]])
253
-
254
- target_txt = [txts[0]]
255
- for txt in txts[1:]:
256
- if txt[1] == "":
257
- continue
258
- if txt[0] != target_txt[-1][0]:
259
- target_txt.append([txt[0], ""])
260
- target_txt[-1][-1] += " " + txt[1]
261
- return target_txt
262
-
263
-
264
- def txt2pinyin(self, text):
265
- txts, phonemes = [], []
266
- texts = re.split(r"(#\d)", text)
267
- print("before norm: ", texts)
268
- for text in texts:
269
- if text in {'#1', '#2', '#3', '#4'}:
270
- txts.append(text)
271
- phonemes.append(text)
272
- continue
273
- text = NSWNormalizer(text.strip()).normalize()
274
-
275
- text_list = list(jieba.cut(text))
276
- print("jieba cut: ", text, text_list)
277
- for words in text_list:
278
- if words in _PAUSE_SYMBOL:
279
- # phonemes.append('#2')
280
- phonemes[-1] += _PAUSE_SYMBOL[words]
281
- txts[-1] += words
282
- elif re.search("[\u4e00-\u9fa5]+", words):
283
- pinyin = self.pinyin_parser(words, style=Style.TONE3, errors="ignore")
284
- new_pinyin = []
285
- for x in pinyin:
286
- x = "".join(x)
287
- if "#" not in x:
288
- new_pinyin.append(x)
289
- else:
290
- phonemes.append(words)
291
- continue
292
- new_pinyin = change_tone_in_bu_or_yi(words, new_pinyin) if len(words)>1 and words[-1] not in {"一","不"} else new_pinyin
293
- phoneme = get_phoneme_from_char_and_pinyin(words, new_pinyin)
294
- phonemes += phoneme
295
- txts += list(words)
296
- elif re.search(r"[a-zA-Z]", words) or re.search(r"#[1-4]", words):
297
- phonemes.append(words)
298
- txts.append(words)
299
- # phonemes.append("#1")
300
- # phones = " ".join(phonemes)
301
- return txts, phonemes
302
-
303
-
304
-
305
- def chunk_text(text, max_chars=135):
306
- """
307
- Splits the input text into chunks, each with a maximum number of characters.
308
-
309
- Args:
310
- text (str): The text to be split.
311
- max_chars (int): The maximum number of characters per chunk.
312
-
313
- Returns:
314
- List[str]: A list of text chunks.
315
- """
316
- chunks = []
317
- current_chunk = ""
318
- # Split the text into sentences based on punctuation followed by whitespace
319
- sentences = re.split(r"(?<=[;:,.!?])\s+|(?<=[;:,。!?])", text)
320
-
321
- for sentence in sentences:
322
- if len(current_chunk.encode("utf-8")) + len(sentence.encode("utf-8")) <= max_chars:
323
- current_chunk += sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
324
- else:
325
- if current_chunk:
326
- chunks.append(current_chunk.strip())
327
- current_chunk = sentence + " " if sentence and len(sentence[-1].encode("utf-8")) == 1 else sentence
328
-
329
- if current_chunk:
330
- chunks.append(current_chunk.strip())
331
-
332
- return chunks
333
-
334
-
335
- class MMSAlignModel:
336
- def __init__(self):
337
- from torchaudio.pipelines import MMS_FA as bundle
338
- self.mms_model = bundle.get_model()
339
- self.mms_model.to(device)
340
- self.mms_tokenizer = bundle.get_tokenizer()
341
- self.mms_aligner = bundle.get_aligner()
342
- self.text_normalizer = ur.Uroman()
343
-
344
-
345
- def text_normalization(self, text_list):
346
- text_normalized = []
347
- for word in text_list:
348
- text_char = ''
349
- for c in word:
350
- if c.isalpha() or c=="'":
351
- text_char += c.lower()
352
- elif c == "-":
353
- text_char += '*'
354
- text_char = text_char if len(text_char) > 0 else "*"
355
- text_normalized.append(text_char)
356
- assert len(text_normalized) == len(text_list), f"normalized text len != raw text len: {len(text_normalized)} != {text_list}"
357
- return text_normalized
358
-
359
- def compute_alignments(self, waveform: torch.Tensor, tokens):
360
- with torch.inference_mode():
361
- emission, _ = self.mms_model(waveform.to(device))
362
- token_spans = self.mms_aligner(emission[0], tokens)
363
- return emission, token_spans
364
-
365
-
366
- def align(self, data, wav):
367
- waveform = load_wav(wav, 16000).unsqueeze(0)
368
- raw_text = data['text'][0]
369
- text = " ".join(data['text'][1]).replace("-", " ")
370
- text = re.sub("\s+", " ", text)
371
- text_normed = self.text_normalizer.romanize_string(text, lcode=data["lang"])
372
- # text_normed = re.sub("[\d_.,!$£%?#−/]", '', text_normed)
373
- fliter = re.compile("[^a-z^*^'^ ]")
374
- text_normed = fliter.sub('', text_normed.lower())
375
- text_normed = re.sub("\s+", " ", text_normed)
376
- text_normed = text_normed.split()
377
- assert len(text_normed) == len(raw_text), f"normalized text len != raw text len: {len(text_normed)} != {len(raw_text)}"
378
- tokens = self.mms_tokenizer(text_normed)
379
- with torch.inference_mode():
380
- emission, _ = self.mms_model(waveform.to(device))
381
- token_spans = self.mms_aligner(emission[0], tokens)
382
- num_frames = emission.size(1)
383
- ratio = waveform.size(1) / num_frames
384
- res = []
385
- for i in range(len(token_spans)):
386
- score = round(sum([x.score for x in token_spans[i]]) / len(token_spans[i]), ndigits=3)
387
- start = round(waveform.size(-1) * token_spans[i][0].start / num_frames / 16000, ndigits=3)
388
- end = round(waveform.size(-1) * token_spans[i][-1].end / num_frames / 16000, ndigits=3)
389
- res.append({"word": raw_text[i], "start": start, "end": end, "score": score})
390
-
391
- res = {"lang":data["lang"], "start": 0, "end": round(waveform.shape[-1]/16000, ndigits=3), "text_raw":data["text_raw"], "text": text, "words": res}
392
- return res
393
-
394
-
395
- class WhisperxModel:
396
- def __init__(self, model_name):
397
- from pathlib import Path
398
- import whisperx.vad as wx_vad
399
- import whisperx.asr as wx_asr
400
- from whisperx import load_model
401
- prompt = None # "This might be a blend of Simplified Chinese and English speech, do not translate, only transcription be allowed."
402
-
403
- # Prefer a local VAD model (to avoid network download / checksum issues)
404
- vad_fp = Path(MODELS_PATH) / "whisperx-vad-segmentation.bin"
405
- if not vad_fp.is_file():
406
- logging.warning(
407
- "Local whisperx VAD not found at %s, falling back to default download path.",
408
- vad_fp,
409
- )
410
- vad_fp = None
411
- else:
412
- # Monkey-patch whisperx *before* constructing the pipeline so it
413
- # loads our local segmentation model without enforcing the
414
- # baked-in SHA256 checksum.
415
- def _patched_load_vad_model(device, vad_onset=0.500, vad_offset=0.363, use_auth_token=None, model_fp=None):
416
- import torch
417
- from pyannote.audio import Model
418
- from pyannote.audio.pipelines import VoiceActivitySegmentation
419
-
420
- model_path = str(model_fp) if model_fp is not None else str(vad_fp)
421
- model = Model.from_pretrained(model_path, use_auth_token=use_auth_token)
422
- hyperparameters = {
423
- "onset": vad_onset,
424
- "offset": vad_offset,
425
- "min_duration_on": 0.1,
426
- "min_duration_off": 0.1,
427
- }
428
- vad_pipeline = VoiceActivitySegmentation(
429
- segmentation=model,
430
- device=torch.device(device),
431
- )
432
- vad_pipeline.instantiate(hyperparameters)
433
- return vad_pipeline
434
-
435
- # asr.py does `from .vad import load_vad_model`, so we must patch
436
- # both the `vad` module and the alias in `asr`.
437
- wx_vad.load_vad_model = _patched_load_vad_model
438
- wx_asr.load_vad_model = _patched_load_vad_model
439
-
440
- self.model = load_model(
441
- model_name,
442
- ASR_DEVICE,
443
- compute_type="float32",
444
- asr_options={
445
- "suppress_numerals": False,
446
- "max_new_tokens": None,
447
- "clip_timestamps": None,
448
- "initial_prompt": prompt,
449
- "append_punctuations": ".。,,!!??::、",
450
- "hallucination_silence_threshold": None,
451
- "multilingual": True,
452
- "hotwords": None
453
- },
454
- vad_model_fp=str(vad_fp) if vad_fp is not None else None,
455
- )
456
-
457
- def transcribe(self, audio_info, lang=None):
458
- audio = load_wav(audio_info).numpy()
459
- if lang is None:
460
- lang = self.model.detect_language(audio)
461
-
462
- segments = self.model.transcribe(audio, batch_size=8, language=lang)["segments"]
463
- transcript = " ".join([segment["text"] for segment in segments])
464
-
465
- if lang not in {'es','pt','zh','en','de','fr','it', 'ar', 'ru', 'ja', 'ko', 'hi', 'th', 'id', 'vi'}:
466
- lang = langid.classify(transcript)[0]
467
- segments = self.model.transcribe(audio, batch_size=8, language=lang)["segments"]
468
- transcript = " ".join([segment["text"] for segment in segments])
469
- logging.debug(f"whisperx: {segments}")
470
-
471
- transcript = zhconv.convert(transcript, 'zh-hans')
472
- transcript = transcript.replace("-", " ")
473
- transcript = re.sub(_whitespace_re, " ", transcript)
474
- transcript = transcript[1:] if transcript[0] == " " else transcript
475
- segments = {'lang':lang, 'text_raw':transcript}
476
- if lang == "zh":
477
- segments["text"] = text_norm.txt2pinyin(transcript)
478
- else:
479
- transcript = replace_numbers_with_words(transcript, lang=lang).split(' ')
480
- segments["text"] = (transcript, transcript)
481
-
482
- return align_model.align(segments, audio_info)
483
-
484
-
485
- def load_wav(audio_info, sr=16000, channel=1):
486
- raw_sr, audio = audio_info
487
- audio = audio.T if len(audio.shape) > 1 and audio.shape[1] == 2 else audio
488
- audio = audio / np.max(np.abs(audio))
489
- audio = torch.from_numpy(audio).squeeze().float()
490
- if channel == 1 and len(audio.shape) == 2: # stereo to mono
491
- audio = audio.mean(dim=0, keepdim=True)
492
- elif channel == 2 and len(audio.shape) == 1:
493
- audio = torch.stack((audio, audio)) # mono to stereo
494
- if raw_sr != sr:
495
- audio = torchaudio.functional.resample(audio.squeeze(), raw_sr, sr)
496
- audio = torch.clip(audio, -0.999, 0.999).squeeze()
497
- return audio
498
-
499
-
500
- def update_word_time(lst, cut_time, edit_start, edit_end):
501
- for i in range(len(lst)):
502
- lst[i]["start"] = round(lst[i]["start"] - cut_time, ndigits=3)
503
- lst[i]["end"] = round(lst[i]["end"] - cut_time, ndigits=3)
504
- edit_start = max(round(edit_start - cut_time, ndigits=3), 0)
505
- edit_end = round(edit_end - cut_time, ndigits=3)
506
- return lst, edit_start, edit_end
507
-
508
-
509
- # def update_word_time2(lst, cut_time, edit_start, edit_end):
510
- # for i in range(len(lst)):
511
- # lst[i]["start"] = round(lst[i]["start"] + cut_time, ndigits=3)
512
- # return lst, edit_start, edit_end
513
-
514
-
515
- def get_audio_slice(audio, words_info, start_time, end_time, max_len=10, sr=16000, code_sr=50):
516
- audio_dur = audio.shape[-1] / sr
517
- sub_list = []
518
- # 如果尾部小于5s则保留后面全部,并截取前半段音频
519
- if audio_dur - end_time <= max_len/2:
520
- for word in reversed(words_info):
521
- if word['start'] > start_time or audio_dur - word['start'] < max_len:
522
- sub_list = [word] + sub_list
523
-
524
- # 如果头部小于5s则保留前面全部,并截取后半段音频
525
- elif start_time <=max_len/2:
526
- for word in words_info:
527
- if word['end'] < max(end_time, max_len):
528
- sub_list += [word]
529
-
530
- # 如果前后都大于5s,则前后各留5s
531
- else:
532
- for word in words_info:
533
- if word['start'] > start_time - max_len/2 and word['end'] < end_time + max_len/2:
534
- sub_list += [word]
535
- audio = audio.squeeze()
536
-
537
- start = int(sub_list[0]['start']*sr)
538
- end = int(sub_list[-1]['end']*sr)
539
- # print("wav cuts:", start, end, (end-start) % int(sr/code_sr))
540
- end -= (end-start) % int(sr/code_sr) # chunk取整
541
-
542
- sub_list, start_time, end_time = update_word_time(sub_list, sub_list[0]['start'], start_time, end_time)
543
- audio = audio.squeeze()
544
- # print("after update_word_time:", sub_list, start_time, end_time, (end-start)/sr)
545
-
546
- return (audio[:start], audio[start:end], audio[end:]), (sub_list, start_time, end_time)
547
-
548
-
549
- def load_models(lemas_model_name, whisper_model_name, alignment_model_name, denoise_model_name): # , audiosr_name):
550
-
551
- global transcribe_model, align_model, denoise_model, text_norm, tts_edit_model
552
- torch.cuda.empty_cache()
553
- gc.collect()
554
-
555
- if denoise_model_name == "UVR5":
556
- # Follow LEMAS-TTS layout but resolve from MODELS_PATH (./pretrained_models by default),
557
- # so that only the main TTS checkpoints can live in hf:// mounts while all
558
- # auxiliary models (UVR5, vocoder, prosody encoder, etc.) are loaded from
559
- # the local `pretrained_models` folder.
560
- from pathlib import Path
561
- uv_root = Path(MODELS_PATH) / "uvr5" / "models" / "MDX_Net_Models" / "model_data"
562
- denoise_model = UVR5(str(uv_root))
563
- elif denoise_model_name == "DeepFilterNet":
564
- denoise_model = DeepFilterNet("./pretrained_models/denoiser_model.onnx")
565
-
566
- if alignment_model_name == "MMS":
567
- align_model = MMSAlignModel()
568
- else:
569
- align_model = WhisperxAlignModel()
570
-
571
- text_norm = TextNorm()
572
-
573
- transcribe_model = WhisperxModel(whisper_model_name)
574
-
575
- # Load LEMAS-TTS editing model (selected multilingual variant)
576
- from pathlib import Path
577
-
578
- ckpt_dir = Path(CKPTS_ROOT) / lemas_model_name
579
- ckpt_candidates = sorted(
580
- list(ckpt_dir.glob("*.safetensors")) + list(ckpt_dir.glob("*.pt"))
581
- )
582
- if not ckpt_candidates:
583
- raise gr.Error(f"No LEMAS-TTS ckpt found under {ckpt_dir}")
584
- ckpt_file = str(ckpt_candidates[-1])
585
-
586
- vocab_file = Path(PRETRAINED_ROOT) / "data" / lemas_model_name / "vocab.txt"
587
- if not vocab_file.is_file():
588
- raise gr.Error(f"Vocab file not found: {vocab_file}")
589
-
590
- prosody_cfg = Path(CKPTS_ROOT) / "prosody_encoder" / "pretssel_cfg.json"
591
- prosody_ckpt = Path(CKPTS_ROOT) / "prosody_encoder" / "prosody_encoder_UnitY2.pt"
592
-
593
- # Decide whether to enable the prosody encoder:
594
- # - multilingual_prosody: True (if assets exist)
595
- # - multilingual_grl: False (GRL-only variant)
596
- # - others: fall back to presence of assets.
597
- if lemas_model_name.endswith("prosody"):
598
- use_prosody = prosody_cfg.is_file() and prosody_ckpt.is_file()
599
- elif lemas_model_name.endswith("grl"):
600
- use_prosody = False
601
- else:
602
- use_prosody = prosody_cfg.is_file() and prosody_ckpt.is_file()
603
-
604
- tts_edit_model = TTS(
605
- model=lemas_model_name,
606
- ckpt_file=ckpt_file,
607
- vocab_file=str(vocab_file),
608
- device=device,
609
- use_prosody_encoder=use_prosody,
610
- prosody_cfg_path=str(prosody_cfg) if use_prosody else "",
611
- prosody_ckpt_path=str(prosody_ckpt) if use_prosody else "",
612
- ode_method="euler",
613
- use_ema=True,
614
- frontend="phone",
615
- )
616
- logging.info(f"Loaded LEMAS-TTS edit model from {ckpt_file}")
617
-
618
- return gr.Accordion()
619
-
620
-
621
- def get_transcribe_state(segments):
622
- logging.info("===========After Align===========")
623
- logging.info(segments)
624
- return {
625
- "segments": segments,
626
- "transcript": segments["text_raw"],
627
- "words_info": segments["words"],
628
- "transcript_with_start_time": " ".join([f"{word['start']} {word['word']}" for word in segments["words"]]),
629
- "transcript_with_end_time": " ".join([f"{word['word']} {word['end']}" for word in segments["words"]]),
630
- "word_bounds": [f"{word['start']} {word['word']} {word['end']}" for word in segments["words"]]
631
- }
632
-
633
-
634
- def transcribe(seed, audio_info):
635
- if transcribe_model is None:
636
- raise gr.Error("Transcription model not loaded")
637
- seed_everything(seed)
638
-
639
- segments = transcribe_model.transcribe(audio_info)
640
- state = get_transcribe_state(segments)
641
-
642
- return [
643
- state["transcript"], state["transcript_with_start_time"], state["transcript_with_end_time"],
644
- # gr.Dropdown(value=state["word_bounds"][-1], choices=state["word_bounds"], interactive=True), # prompt_to_word
645
- gr.Dropdown(value=state["word_bounds"][0], choices=state["word_bounds"], interactive=True), # edit_from_word
646
- gr.Dropdown(value=state["word_bounds"][-1], choices=state["word_bounds"], interactive=True), # edit_to_word
647
- state
648
- ]
649
-
650
- def align(transcript, audio_info, state):
651
- lang = state["segments"]["lang"]
652
- # print("realign: ", transcript, state)
653
- transcript = re.sub(_whitespace_re, " ", transcript)
654
- transcript = transcript[1:] if transcript[0] == " " else transcript
655
- segments = {'lang':lang, 'text':transcript, 'text_raw':transcript}
656
- if lang == "zh":
657
- segments["text"] = text_norm.txt2pinyin(transcript)
658
- else:
659
- transcript = replace_numbers_with_words(transcript)
660
- segments["text"] = (transcript.split(' '), transcript.split(' '))
661
- # print("text:", segments["text"])
662
- segments = align_model.align(segments, audio_info)
663
-
664
- state = get_transcribe_state(segments)
665
-
666
- return [
667
- state["transcript"], state["transcript_with_start_time"], state["transcript_with_end_time"],
668
- # gr.Dropdown(value=state["word_bounds"][-1], choices=state["word_bounds"], interactive=True), # prompt_to_word
669
- gr.Dropdown(value=state["word_bounds"][0], choices=state["word_bounds"], interactive=True), # edit_from_word
670
- gr.Dropdown(value=state["word_bounds"][-1], choices=state["word_bounds"], interactive=True), # edit_to_word
671
- state
672
- ]
673
-
674
-
675
- def denoise(audio_info):
676
- denoised_audio, sr = denoise_model.denoise(audio_info)
677
- denoised_audio = denoised_audio # .squeeze().numpy()
678
- return (sr, denoised_audio)
679
-
680
- def cancel_denoise(audio_info):
681
- return audio_info
682
-
683
- def get_output_audio(audio_tensors, sr):
684
- result = torch.cat(audio_tensors, -1)
685
- result = result.squeeze().cpu().numpy()
686
- result = (result * np.iinfo(np.int16).max).astype(np.int16)
687
- print("save result:", result.shape)
688
- # wavfile.write(os.path.join(TMP_PATH, "output.wav"), sr, result)
689
- return (int(sr), result)
690
-
691
-
692
- def get_edit_audio_part(audio_info, edit_start, edit_end):
693
- sr, raw_wav = audio_info
694
- raw_wav = raw_wav[int(edit_start*sr):int(edit_end*sr)]
695
- return (sr, raw_wav)
696
-
697
-
698
- def crossfade_concat(chunk1, chunk2, overlap):
699
- # 计算淡入和淡出系数
700
- fade_out = torch.cos(torch.linspace(0, torch.pi / 2, overlap)) ** 2
701
- fade_in = torch.cos(torch.linspace(torch.pi / 2, 0, overlap)) ** 2
702
- chunk2[:overlap] = chunk1[-overlap:] * fade_out + chunk2[:overlap] * fade_in
703
- chunk = torch.cat((chunk1[:-overlap], chunk2), dim=0)
704
- return chunk
705
-
706
- def replace_numbers_with_words(sentence, lang="en"):
707
- sentence = re.sub(r'(\d+)', r' \1 ', sentence) # add spaces around numbers
708
- def replace_with_words(match):
709
- num = match.group(0)
710
- try:
711
- return num2words(num, lang=lang) # Convert numbers to words
712
- except:
713
- return num # In case num2words fails (unlikely with digits but just to be safe)
714
- return re.sub(r'\b\d+\b', replace_with_words, sentence) # Regular expression that matches numbers
715
-
716
-
717
- def run(seed, nfe_step, speed, cfg_strength, sway_sampling_coef, ref_ratio,
718
- audio_info, denoised_audio, transcribe_state, transcript, smart_transcript,
719
- mode, start_time, end_time,
720
- split_text, selected_sentence, audio_tensors):
721
- if tts_edit_model is None:
722
- raise gr.Error("LEMAS-TTS edit model not loaded")
723
- if smart_transcript and (transcribe_state is None):
724
- raise gr.Error("Can't use smart transcript: whisper transcript not found")
725
-
726
- # if mode == "Rerun":
727
- # colon_position = selected_sentence.find(':')
728
- # selected_sentence_idx = int(selected_sentence[:colon_position])
729
- # sentences = [selected_sentence[colon_position + 1:]]
730
-
731
- # Choose base audio (denoised if duration matches)
732
- audio_base = audio_info
733
- audio_dur = round(audio_info[1].shape[0] / audio_info[0], ndigits=3)
734
- if denoised_audio is not None:
735
- denoised_dur = round(denoised_audio[1].shape[0] / denoised_audio[0], ndigits=3)
736
- if audio_dur == denoised_dur or (
737
- denoised_audio[0] != audio_info[0] and abs(audio_dur - denoised_dur) < 0.1
738
- ):
739
- audio_base = denoised_audio
740
- logging.info("use denoised audio")
741
-
742
- raw_sr, raw_wav = audio_base
743
- print("audio_dur: ", audio_dur, raw_sr, raw_wav.shape, start_time, end_time)
744
-
745
- # Build target text by replacing the selected span with `transcript`
746
- words = transcribe_state["words_info"]
747
- if not words:
748
- raise gr.Error("No word-level alignment found; please run Transcribe first.")
749
-
750
- start_time = float(start_time)
751
- end_time = float(end_time)
752
- if end_time <= start_time:
753
- raise gr.Error("Edit end time must be greater than start time.")
754
-
755
- # Find word indices covering the selected region
756
- start_idx = 0
757
- for i, w in enumerate(words):
758
- if w["end"] > start_time:
759
- start_idx = i
760
- break
761
-
762
- end_idx = len(words)
763
- for i in range(len(words) - 1, -1, -1):
764
- if words[i]["start"] < end_time:
765
- end_idx = i + 1
766
- break
767
- if end_idx <= start_idx:
768
- end_idx = min(start_idx + 1, len(words))
769
-
770
- word_start_sec = float(words[start_idx]["start"])
771
- word_end_sec = float(words[end_idx - 1]["end"])
772
-
773
- # Edit span in seconds (relative to full utterance)
774
- edit_start = max(0.0, word_start_sec - 0.1)
775
- edit_end = min(word_end_sec + 0.1, audio_dur)
776
- parts_to_edit = [(edit_start, edit_end)]
777
-
778
- display_text = transcribe_state["segments"]["text_raw"].strip()
779
- txt_list = display_text.split(" ") if display_text else [w["word"] for w in words]
780
-
781
- prefix = " ".join(txt_list[:start_idx]).strip()
782
- suffix = " ".join(txt_list[end_idx:]).strip()
783
- new_phrase = transcript.strip()
784
-
785
- pieces = []
786
- if prefix:
787
- pieces.append(prefix)
788
- if new_phrase:
789
- pieces.append(new_phrase)
790
- if suffix:
791
- pieces.append(suffix)
792
- target_text = " ".join(pieces)
793
-
794
- logging.info(
795
- "target_text: %s (start_idx=%d, end_idx=%d, parts_to_edit=%s)",
796
- target_text,
797
- start_idx,
798
- end_idx,
799
- parts_to_edit,
800
- )
801
-
802
- # Prepare audio for LEMAS-TTS editing (mono, target SR)
803
- segment_audio = load_wav(audio_base, sr=tts_edit_model.target_sample_rate)
804
-
805
- seed_val = None if seed == -1 else int(seed)
806
-
807
- # Decide whether to use prosody encoder at inference based on how TTS was built
808
- use_prosody_flag = bool(getattr(tts_edit_model, "use_prosody_encoder", False))
809
-
810
- wav_out, _ = gen_wav_multilingual(
811
- tts_edit_model,
812
- segment_audio,
813
- tts_edit_model.target_sample_rate,
814
- target_text,
815
- parts_to_edit,
816
- speed=float(speed),
817
- nfe_step=int(nfe_step),
818
- cfg_strength=float(cfg_strength),
819
- sway_sampling_coef=float(sway_sampling_coef),
820
- ref_ratio=float(ref_ratio),
821
- no_ref_audio=False,
822
- use_acc_grl=False,
823
- use_prosody_encoder_flag=use_prosody_flag,
824
- seed=seed_val,
825
- )
826
-
827
- wav_np = wav_out.cpu().numpy()
828
- wav_np = np.clip(wav_np, -0.999, 0.999)
829
- wav_int16 = (wav_np * np.iinfo(np.int16).max).astype(np.int16)
830
- out_sr = int(tts_edit_model.target_sample_rate)
831
-
832
- output_audio = (out_sr, wav_int16)
833
- sentences = [f"0: {target_text}"]
834
- audio_tensors = [torch.from_numpy(wav_np)]
835
-
836
- component = gr.Dropdown(choices=sentences, value=sentences[0])
837
- return output_audio, target_text, component, audio_tensors
838
-
839
-
840
- def update_input_audio(audio_info):
841
- if audio_info is None:
842
- return 0, 0, 0
843
- elif type(audio_info) is str:
844
- info = torchaudio.info(audio_path)
845
- max_time = round(info.num_frames / info.sample_rate, 2)
846
- elif type(audio_info) is tuple:
847
- max_time = round(audio_info[1].shape[0] / audio_info[0], 2)
848
- return [
849
- # gr.Slider(maximum=max_time, value=max_time),
850
- gr.Slider(maximum=max_time, value=0),
851
- gr.Slider(maximum=max_time, value=max_time),
852
- ]
853
-
854
-
855
- def change_mode(mode):
856
- # tts_mode_controls, edit_mode_controls, edit_word_mode, split_text, long_tts_sentence_editor
857
- return [
858
- gr.Group(visible=mode != "Edit"),
859
- gr.Group(visible=mode == "Edit"),
860
- gr.Radio(visible=mode == "Edit"),
861
- gr.Radio(visible=mode == "Long TTS"),
862
- gr.Group(visible=mode == "Long TTS"),
863
- ]
864
-
865
-
866
- def load_sentence(selected_sentence, audio_tensors):
867
- if selected_sentence is None:
868
- return None
869
- colon_position = selected_sentence.find(':')
870
- selected_sentence_idx = int(selected_sentence[:colon_position])
871
- # Use LEMAS-TTS target sample rate if available, otherwise default to 16000
872
- sr = getattr(tts_edit_model, "target_sample_rate", 16000)
873
- return get_output_audio([audio_tensors[selected_sentence_idx]], sr)
874
-
875
-
876
- def update_bound_word(is_first_word, selected_word, edit_word_mode):
877
- if selected_word is None:
878
- return None
879
-
880
- word_start_time = float(selected_word.split(' ')[0])
881
- word_end_time = float(selected_word.split(' ')[-1])
882
- if edit_word_mode == "Replace half":
883
- bound_time = (word_start_time + word_end_time) / 2
884
- elif is_first_word:
885
- bound_time = word_start_time
886
- else:
887
- bound_time = word_end_time
888
-
889
- return bound_time
890
-
891
-
892
- def update_bound_words(from_selected_word, to_selected_word, edit_word_mode):
893
- return [
894
- update_bound_word(True, from_selected_word, edit_word_mode),
895
- update_bound_word(False, to_selected_word, edit_word_mode),
896
- ]
897
-
898
-
899
- smart_transcript_info = """
900
- If enabled, the target transcript will be constructed for you:</br>
901
- - In TTS and Long TTS mode just write the text you want to synthesize.</br>
902
- - In Edit mode just write the text to replace selected editing segment.</br>
903
- If disabled, you should write the target transcript yourself:</br>
904
- - In TTS mode write prompt transcript followed by generation transcript.</br>
905
- - In Long TTS select split by newline (<b>SENTENCE SPLIT WON'T WORK</b>) and start each line with a prompt transcript.</br>
906
- - In Edit mode write full prompt</br>
907
- """
908
-
909
- demo_original_transcript = ""
910
-
911
- demo_text = {
912
- "TTS": {
913
- "smart": "take over the stage for half an hour,",
914
- "regular": "Gwynplaine had, besides, for his work and for his feats of strength, take over the stage for half an hour."
915
- },
916
- "Edit": {
917
- "smart": "Just write it line-by-line.",
918
- "regular": "照片、医疗记录、神经重塑的易损性,这是某种数据库啊!还有PRELESS的脑部扫描、生物管型、神经重塑."
919
- },
920
- "Long TTS": {
921
- "smart": "You can run the model on a big text!\n"
922
- "Just write it line-by-line. Or sentence-by-sentence.\n"
923
- "If some sentences sound odd, just rerun the model on them, no need to generate the whole text again!",
924
- "regular": "Gwynplaine had, besides, for his work and for his feats of strength, You can run the model on a big text!\n"
925
- "Gwynplaine had, besides, for his work and for his feats of strength, Just write it line-by-line. Or sentence-by-sentence.\n"
926
- "Gwynplaine had, besides, for his work and for his feats of strength, If some sentences sound odd, just rerun the model on them, no need to generate the whole text again!"
927
- }
928
- }
929
-
930
- all_demo_texts = {vv for k, v in demo_text.items() for kk, vv in v.items()}
931
-
932
- demo_words = ['0.069 Gwynplain 0.611', '0.671 had, 0.912', '0.952 besides, 1.414', '1.494 for 1.634', '1.695 his 1.835', '1.915 work 2.136', '2.196 and 2.297', '2.337 for 2.517', '2.557 his 2.678', '2.758 feats 3.019', '3.079 of 3.139', '3.2 strength, 3.561', '4.022 round 4.263', '4.303 his 4.444', '4.524 neck 4.705', '4.745 and 4.825', '4.905 over 5.086', '5.146 his 5.266', '5.307 shoulders, 5.768', '6.23 an 6.33', '6.531 esclavine 7.133', '7.213 of 7.293', '7.353 leather. 7.614']
933
-
934
- demo_words_info = [{'word': 'Gwynplain', 'start': 0.069, 'end': 0.611, 'score': 0.833}, {'word': 'had,', 'start': 0.671, 'end': 0.912, 'score': 0.879}, {'word': 'besides,', 'start': 0.952, 'end': 1.414, 'score': 0.863}, {'word': 'for', 'start': 1.494, 'end': 1.634, 'score': 0.89}, {'word': 'his', 'start': 1.695, 'end': 1.835, 'score': 0.669}, {'word': 'work', 'start': 1.915, 'end': 2.136, 'score': 0.916}, {'word': 'and', 'start': 2.196, 'end': 2.297, 'score': 0.766}, {'word': 'for', 'start': 2.337, 'end': 2.517, 'score': 0.808}, {'word': 'his', 'start': 2.557, 'end': 2.678, 'score': 0.786}, {'word': 'feats', 'start': 2.758, 'end': 3.019, 'score': 0.97}, {'word': 'of', 'start': 3.079, 'end': 3.139, 'score': 0.752}, {'word': 'strength,', 'start': 3.2, 'end': 3.561, 'score': 0.742}, {'word': 'round', 'start': 4.022, 'end': 4.263, 'score': 0.916}, {'word': 'his', 'start': 4.303, 'end': 4.444, 'score': 0.666}, {'word': 'neck', 'start': 4.524, 'end': 4.705, 'score': 0.908}, {'word': 'and', 'start': 4.745, 'end': 4.825, 'score': 0.882}, {'word': 'over', 'start': 4.905, 'end': 5.086, 'score': 0.847}, {'word': 'his', 'start': 5.146, 'end': 5.266, 'score': 0.791}, {'word': 'shoulders,', 'start': 5.307, 'end': 5.768, 'score': 0.729}, {'word': 'an', 'start': 6.23, 'end': 6.33, 'score': 0.854}, {'word': 'esclavine', 'start': 6.531, 'end': 7.133, 'score': 0.803}, {'word': 'of', 'start': 7.213, 'end': 7.293, 'score': 0.772}, {'word': 'leather.', 'start': 7.353, 'end': 7.614, 'score': 0.896}]
935
-
936
-
937
- def update_demo(mode, smart_transcript, edit_word_mode, transcript, edit_from_word, edit_to_word):
938
- if transcript not in all_demo_texts:
939
- return transcript, edit_from_word, edit_to_word
940
-
941
- replace_half = edit_word_mode == "Replace half"
942
- change_edit_from_word = edit_from_word == demo_words[2] or edit_from_word == demo_words[3]
943
- change_edit_to_word = edit_to_word == demo_words[11] or edit_to_word == demo_words[12]
944
- demo_edit_from_word_value = demo_words[2] if replace_half else demo_words[3]
945
- demo_edit_to_word_value = demo_words[12] if replace_half else demo_words[11]
946
- return [
947
- demo_text[mode]["smart" if smart_transcript else "regular"],
948
- demo_edit_from_word_value if change_edit_from_word else edit_from_word,
949
- demo_edit_to_word_value if change_edit_to_word else edit_to_word,
950
- ]
951
-
952
- def get_app():
953
- with gr.Blocks() as app:
954
- with gr.Row():
955
- with gr.Column(scale=2):
956
- load_models_btn = gr.Button(value="Load models")
957
- with gr.Column(scale=5):
958
- with gr.Accordion("Select models", open=False) as models_selector:
959
- # For LEMAS-TTS editing, we expose a simple model selector
960
- # between the two multilingual variants.
961
- with gr.Row():
962
- lemas_model_choice = gr.Radio(
963
- label="Edit Model",
964
- choices=["multilingual_grl", "multilingual_prosody"],
965
- value="multilingual_grl",
966
- interactive=True,
967
- scale=3,
968
- )
969
- denoise_model_choice = gr.Radio(label="Denoise Model", scale=2, value="UVR5", choices=["UVR5", "DeepFilterNet"]) # "830M", "330M_TTSEnhanced", "830M_TTSEnhanced"])
970
- # whisper_backend_choice = gr.Radio(label="Whisper backend", value="", choices=["whisperX", "whisper"])
971
- whisper_model_choice = gr.Radio(label="Whisper model", scale=3, value="medium", choices=["base", "small", "medium", "large"])
972
- align_model_choice = gr.Radio(label="Forced alignment model", scale=2, value="MMS", choices=["whisperX", "MMS"], visible=False)
973
-
974
- with gr.Row():
975
- with gr.Column(scale=2):
976
- # Use a numpy waveform as default value to avoid Gradio's
977
- # InvalidPathError with local filesystem paths.
978
- _demo_value = None
979
- demo_candidates = [
980
- os.path.join(DEMO_PATH, "test.wav"),
981
- ]
982
- for demo_path in demo_candidates:
983
- try:
984
- if not os.path.isfile(demo_path):
985
- continue
986
- _demo_wav, _demo_sr = torchaudio.load(demo_path)
987
- if _demo_wav.dim() > 1 and _demo_wav.shape[0] > 1:
988
- _demo_wav = _demo_wav.mean(dim=0, keepdim=True)
989
- _demo_value = (_demo_sr, _demo_wav.squeeze(0).numpy())
990
- break
991
- except Exception:
992
- continue
993
-
994
- input_audio = gr.Audio(
995
- value=_demo_value,
996
- label="Input Audio",
997
- interactive=True,
998
- type="numpy",
999
- )
1000
-
1001
- with gr.Row():
1002
- transcribe_btn = gr.Button(value="Transcribe")
1003
- align_btn = gr.Button(value="ReAlign")
1004
- with gr.Group():
1005
- original_transcript = gr.Textbox(label="Original transcript", lines=5, interactive=True, value=demo_original_transcript,
1006
- info="Use whisperx model to get the transcript. Fix and align it if necessary.")
1007
- with gr.Accordion("Word start time", open=False, visible=False):
1008
- transcript_with_start_time = gr.Textbox(label="Start time", lines=5, interactive=False, info="Start time before each word")
1009
- with gr.Accordion("Word end time", open=False, visible=False):
1010
- transcript_with_end_time = gr.Textbox(label="End time", lines=5, interactive=False, info="End time after each word")
1011
-
1012
- with gr.Row():
1013
- denoise_btn = gr.Button(value="Denoise")
1014
- cancel_btn = gr.Button(value="Cancel Denoise")
1015
- denoise_audio = gr.Audio(label="Denoised Audio", value=None, interactive=False, type="numpy")
1016
-
1017
- with gr.Column(scale=3):
1018
- with gr.Group():
1019
- transcript_inbox = gr.Textbox(label="Text", lines=5, value=demo_text["Edit"]["smart"])
1020
- with gr.Row(visible=False):
1021
- smart_transcript = gr.Checkbox(label="Smart transcript", value=True)
1022
- with gr.Accordion(label="?", open=False):
1023
- info = gr.Markdown(value=smart_transcript_info)
1024
-
1025
- mode = gr.Radio(label="Mode", choices=["Edit"], value="Edit", visible=False)
1026
- with gr.Row(visible=False):
1027
- split_text = gr.Radio(label="Split text", choices=["Newline", "Sentence"], value="Newline",
1028
- info="Split text into parts and run TTS for each part.", visible=True)
1029
- edit_word_mode = gr.Radio(label="Edit word mode", choices=["Replace half", "Replace all"], value="Replace all",
1030
- info="What to do with first and last word", visible=False)
1031
-
1032
- # with gr.Group(visible=False) as tts_mode_controls:
1033
- # with gr.Row():
1034
- # edit_from_word = gr.Dropdown(label="First word in prompt", choices=demo_words, value=demo_words[12], interactive=True)
1035
- # edit_to_word = gr.Dropdown(label="Last word in prompt", choices=demo_words, value=demo_words[18], interactive=True)
1036
- # with gr.Row():
1037
- # edit_start_time = gr.Slider(label="Prompt start time", minimum=0, maximum=7.614, step=0.001, value=4.022)
1038
- # edit_end_time = gr.Slider(label="Prompt end time", minimum=0, maximum=7.614, step=0.001, value=5.768)
1039
- # with gr.Row():
1040
- # check_btn = gr.Button(value="Check prompt",scale=1)
1041
- # edit_audio = gr.Audio(label="Prompt Audio", scale=3)
1042
-
1043
- # with gr.Group() as edit_mode_controls:
1044
- with gr.Row():
1045
- edit_from_word = gr.Dropdown(label="First word to edit", choices=demo_words, value=demo_words[12], interactive=True)
1046
- edit_to_word = gr.Dropdown(label="Last word to edit", choices=demo_words, value=demo_words[18], interactive=True)
1047
- with gr.Row():
1048
- edit_start_time = gr.Slider(label="Edit from time", minimum=0, maximum=7.614, step=0.001, value=4.022)
1049
- edit_end_time = gr.Slider(label="Edit to time", minimum=0, maximum=7.614, step=0.001, value=5.768)
1050
- # Put the button and audio in separate columns so that
1051
- # the tall audio widget does not overlap the clickable
1052
- # area of the button.
1053
- with gr.Row():
1054
- with gr.Column(scale=1):
1055
- check_btn = gr.Button(value="Check edit words")
1056
- with gr.Column(scale=3):
1057
- edit_audio = gr.Audio(label="Edit word(s)", scale=3, type="numpy")
1058
-
1059
- run_btn = gr.Button(value="Run", variant="primary")
1060
-
1061
- with gr.Column(scale=2):
1062
- output_audio = gr.Audio(label="Output Audio", type="numpy")
1063
- with gr.Accordion("Inference transcript", open=True):
1064
- inference_transcript = gr.Textbox(label="Inference transcript", lines=5, interactive=False, info="Inference was performed on this transcript.")
1065
- with gr.Group(visible=False) as long_tts_sentence_editor:
1066
- sentence_selector = gr.Dropdown(label="Sentence", value=None,
1067
- info="Select sentence you want to regenerate")
1068
- sentence_audio = gr.Audio(label="Sentence Audio", scale=2, type="numpy")
1069
- rerun_btn = gr.Button(value="Rerun")
1070
-
1071
- with gr.Row():
1072
- with gr.Accordion("Generation Parameters - change these if you are unhappy with the generation", open=False):
1073
- with gr.Row():
1074
- nfe_step = gr.Number(
1075
- label="NFE Step",
1076
- value=64,
1077
- precision=0,
1078
- info="Number of function evaluations (sampling steps).",
1079
- )
1080
- speed = gr.Slider(
1081
- label="Speed",
1082
- minimum=0.5,
1083
- maximum=1.5,
1084
- step=0.1,
1085
- value=1.0,
1086
- info="Placeholder for future use; currently not applied.",
1087
- )
1088
- cfg_strength = gr.Slider(
1089
- label="CFG Strength",
1090
- minimum=0.0,
1091
- maximum=10.0,
1092
- step=0.5,
1093
- value=5.0,
1094
- info="Classifier-free guidance strength.",
1095
- )
1096
-
1097
- with gr.Row():
1098
- sway_sampling_coef = gr.Slider(
1099
- label="Sway",
1100
- minimum=2.0,
1101
- maximum=5.0,
1102
- step=0.1,
1103
- value=3.0,
1104
- info="Sampling sway coefficient.",
1105
- )
1106
- ref_ratio = gr.Slider(
1107
- label="Ref Ratio",
1108
- minimum=0.0,
1109
- maximum=1.0,
1110
- step=0.05,
1111
- value=1.0,
1112
- info="How much to rely on reference audio (if used).",
1113
- )
1114
- seed = gr.Number(
1115
- label="Seed",
1116
- value=-1,
1117
- precision=0,
1118
- info="-1 for random, otherwise fixed seed.",
1119
- )
1120
-
1121
-
1122
- audio_tensors = gr.State()
1123
- transcribe_state = gr.State(value={"words_info": demo_words_info, "lang":"zh"})
1124
-
1125
-
1126
- edit_word_mode.change(fn=update_demo,
1127
- inputs=[mode, smart_transcript, edit_word_mode, transcript_inbox, edit_from_word, edit_to_word],
1128
- outputs=[transcript_inbox, edit_from_word, edit_to_word])
1129
- smart_transcript.change(
1130
- fn=update_demo,
1131
- inputs=[mode, smart_transcript, edit_word_mode, transcript_inbox, edit_from_word, edit_to_word],
1132
- outputs=[transcript_inbox, edit_from_word, edit_to_word],
1133
- )
1134
-
1135
- load_models_btn.click(fn=load_models,
1136
- inputs=[lemas_model_choice, whisper_model_choice, align_model_choice, denoise_model_choice], # audiosr_choice],
1137
- outputs=[models_selector])
1138
-
1139
- input_audio.upload(fn=update_input_audio,
1140
- inputs=[input_audio],
1141
- outputs=[edit_start_time, edit_end_time]) # prompt_end_time
1142
-
1143
- transcribe_btn.click(fn=transcribe,
1144
- inputs=[seed, input_audio],
1145
- outputs=[original_transcript, transcript_with_start_time, transcript_with_end_time,
1146
- edit_from_word, edit_to_word, transcribe_state]) # prompt_to_word
1147
- align_btn.click(fn=align,
1148
- inputs=[original_transcript, input_audio, transcribe_state],
1149
- outputs=[original_transcript, transcript_with_start_time, transcript_with_end_time,
1150
- edit_from_word, edit_to_word, transcribe_state]) # prompt_to_word
1151
-
1152
- denoise_btn.click(fn=denoise,
1153
- inputs=[input_audio],
1154
- outputs=[denoise_audio])
1155
-
1156
- cancel_btn.click(fn=cancel_denoise,
1157
- inputs=[input_audio],
1158
- outputs=[denoise_audio])
1159
-
1160
- # mode.change(fn=change_mode,
1161
- # inputs=[mode],
1162
- # outputs=[tts_mode_controls, edit_mode_controls, edit_word_mode, split_text, long_tts_sentence_editor])
1163
-
1164
- check_btn.click(fn=get_edit_audio_part,
1165
- inputs=[input_audio, edit_start_time, edit_end_time],
1166
- outputs=[edit_audio])
1167
-
1168
- run_btn.click(fn=run,
1169
- inputs=[
1170
- seed, nfe_step, speed, cfg_strength, sway_sampling_coef, ref_ratio,
1171
- input_audio, denoise_audio, transcribe_state, transcript_inbox, smart_transcript,
1172
- mode, edit_start_time, edit_end_time,
1173
- split_text, sentence_selector, audio_tensors
1174
- ],
1175
- outputs=[output_audio, inference_transcript, sentence_selector, audio_tensors])
1176
-
1177
- sentence_selector.change(
1178
- fn=load_sentence,
1179
- inputs=[sentence_selector, audio_tensors],
1180
- outputs=[sentence_audio],
1181
- )
1182
- rerun_btn.click(fn=run,
1183
- inputs=[
1184
- seed, nfe_step, speed, cfg_strength, sway_sampling_coef, ref_ratio,
1185
- input_audio, denoise_audio, transcribe_state, transcript_inbox, smart_transcript,
1186
- gr.State(value="Rerun"), edit_start_time, edit_end_time,
1187
- split_text, sentence_selector, audio_tensors
1188
- ],
1189
- outputs=[output_audio, inference_transcript, sentence_audio, audio_tensors])
1190
-
1191
- # prompt_to_word.change(fn=update_bound_word,
1192
- # inputs=[gr.State(False), prompt_to_word, gr.State("Replace all")],
1193
- # outputs=[prompt_end_time])
1194
- edit_from_word.change(fn=update_bound_word,
1195
- inputs=[gr.State(True), edit_from_word, edit_word_mode],
1196
- outputs=[edit_start_time])
1197
- edit_to_word.change(fn=update_bound_word,
1198
- inputs=[gr.State(False), edit_to_word, edit_word_mode],
1199
- outputs=[edit_end_time])
1200
- edit_word_mode.change(fn=update_bound_words,
1201
- inputs=[edit_from_word, edit_to_word, edit_word_mode],
1202
- outputs=[edit_start_time, edit_end_time])
1203
-
1204
- return app
1205
-
1206
-
1207
- if __name__ == "__main__":
1208
- import argparse
1209
-
1210
- parser = argparse.ArgumentParser(description="LEMAS-Edit gradio app.")
1211
-
1212
- parser.add_argument("--demo-path", default="./pretrained_models/demo", help="Path to demo directory")
1213
- parser.add_argument("--tmp-path", default="./pretrained_models/tmp", help="Path to tmp directory")
1214
- parser.add_argument("--port", default=41020, type=int, help="App port")
1215
- parser.add_argument("--share", action="store_true", help="Launch with public url")
1216
- parser.add_argument("--server_name", default="0.0.0.0", type=str, help="Server name for launching the app. 127.0.0.1 for localhost; 0.0.0.0 to allow access from other machines in the local network. Might also give access to external users depends on the firewall settings.")
1217
-
1218
- os.environ["USER"] = os.getenv("USER", "user")
1219
- args = parser.parse_args()
1220
- DEMO_PATH = args.demo_path
1221
- TMP_PATH = args.tmp_path
1222
- MODELS_PATH = args.models_path
1223
-
1224
- app = get_app()
1225
- app.queue().launch(share=args.share, server_name=args.server_name, server_port=args.port)