Spaces:
Running
Running
| # flake8: noqa: E402 | |
| import sys, os | |
| import logging | |
| import os | |
| import time | |
| import numpy as np # 假设你使用NumPy来处理音频数据 | |
| import shutil # 用于删除文件夹和文件 | |
| from scipy.io import wavfile | |
| import re | |
| logging.getLogger("numba").setLevel(logging.WARNING) | |
| logging.getLogger("markdown_it").setLevel(logging.WARNING) | |
| logging.getLogger("urllib3").setLevel(logging.WARNING) | |
| logging.getLogger("matplotlib").setLevel(logging.WARNING) | |
| logging.basicConfig( | |
| level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" | |
| ) | |
| logger = logging.getLogger(__name__) | |
| import torch | |
| import argparse | |
| import commons | |
| import utils | |
| from models import SynthesizerTrn | |
| from text.symbols import symbols | |
| from text import cleaned_text_to_sequence, get_bert | |
| from text.cleaner import clean_text | |
| import gradio as gr | |
| import webbrowser | |
| import numpy as np | |
| net_g = None | |
| device = "cuda" | |
| curr_model_name:str = None | |
| hps_:tuple = None | |
| def get_text(text, language_str, hps): | |
| norm_text, phone, tone, word2ph = clean_text(text, language_str) | |
| phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) | |
| if hps.data.add_blank: | |
| phone = commons.intersperse(phone, 0) | |
| tone = commons.intersperse(tone, 0) | |
| language = commons.intersperse(language, 0) | |
| for i in range(len(word2ph)): | |
| word2ph[i] = word2ph[i] * 2 | |
| word2ph[0] += 1 | |
| bert = get_bert(norm_text, word2ph, language_str, device) | |
| del word2ph | |
| assert bert.shape[-1] == len(phone), phone | |
| if language_str == "ZH": | |
| bert = bert | |
| ja_bert = torch.zeros(768, len(phone)) | |
| elif language_str == "JP": | |
| ja_bert = bert | |
| bert = torch.zeros(1024, len(phone)) | |
| else: | |
| bert = torch.zeros(1024, len(phone)) | |
| ja_bert = torch.zeros(768, len(phone)) | |
| assert bert.shape[-1] == len( | |
| phone | |
| ), f"Bert seq len {bert.shape[-1]} != {len(phone)}" | |
| phone = torch.LongTensor(phone) | |
| tone = torch.LongTensor(tone) | |
| language = torch.LongTensor(language) | |
| return bert, ja_bert, phone, tone, language | |
| def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language): | |
| global net_g | |
| bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps) | |
| with torch.no_grad(): | |
| x_tst = phones.to(device).unsqueeze(0) | |
| tones = tones.to(device).unsqueeze(0) | |
| lang_ids = lang_ids.to(device).unsqueeze(0) | |
| bert = bert.to(device).unsqueeze(0) | |
| ja_bert = ja_bert.to(device).unsqueeze(0) | |
| x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) | |
| #print(x_tst.type(), tones.type(), lang_ids.type(), bert.type(), ja_bert.type(), x_tst_lengths.type()) | |
| del phones | |
| speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) | |
| audio = ( | |
| net_g.infer( | |
| x_tst, | |
| x_tst_lengths, | |
| speakers, | |
| tones, | |
| lang_ids, | |
| bert, | |
| ja_bert, | |
| sdp_ratio=sdp_ratio, | |
| noise_scale=noise_scale, | |
| noise_scale_w=noise_scale_w, | |
| length_scale=length_scale, | |
| )[0][0, 0] | |
| .data.cpu() | |
| .float() | |
| .numpy() | |
| ) | |
| del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers | |
| torch.cuda.empty_cache() | |
| return audio | |
| __LOG__ = "./generation_logs.txt" | |
| def tts_fn(text, model_name:str, sdp_ratio, noise_scale, noise_scale_w, length_scale, language): | |
| global curr_model_name | |
| if curr_model_name != model_name: | |
| load_model(model_name) | |
| # 清空 ./infer_save 文件夹 | |
| if os.path.exists('./infer_save'): | |
| shutil.rmtree('./infer_save') | |
| os.makedirs('./infer_save') | |
| slices = text.split("\n") | |
| slices = [slice for slice in slices if slice.strip() != ""] | |
| audio_list = [] | |
| with torch.no_grad(): | |
| with open(__LOG__,"a",encoding="UTF-8") as f: | |
| for slice in slices: | |
| assert len(slice) < 250 # 限制输入的文本长度 | |
| audio = infer(slice, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=list(hps_[curr_model_name].data.spk2id.keys())[0], language=language) | |
| audio_list.append(audio) | |
| # 创建唯一的文件名 | |
| timestamp = str(int(time.time() * 1000)) | |
| audio_file_path = f'./infer_save/audio_{timestamp}.wav' | |
| # 保存音频数据到.wav文件 | |
| wavfile.write(audio_file_path, hps.data.sampling_rate, audio) | |
| silence = np.zeros(int(hps.data.sampling_rate/2), dtype=np.int16) # 生成半秒的静音 | |
| audio_list.append(silence) # 将静音添加到列表中 | |
| f.write(f"{slice} | {curr_model_name}\n") | |
| print(f"{slice} | {curr_model_name}") | |
| audio_concat = np.concatenate(audio_list) | |
| return "Success", (hps.data.sampling_rate, audio_concat) | |
| def load_model(model_name:str): | |
| global net_g,curr_model_name,hps_,hps | |
| assert os.path.exists(os.path.join("logs",model_name)) | |
| curr_model_name = model_name | |
| hps = hps_[curr_model_name] | |
| all_files = os.listdir(os.path.join("logs",model_name)) | |
| hps = utils.get_hparams_from_file(os.path.join("logs",model_name,"config.json")) | |
| net_g = SynthesizerTrn( | |
| len(symbols), | |
| hps.data.filter_length // 2 + 1, | |
| hps.train.segment_size // hps.data.hop_length, | |
| n_speakers=hps.data.n_speakers, | |
| **hps.model, | |
| ).to(device) | |
| _ = net_g.eval() | |
| #获取G_最大的模型: | |
| g_files = [f for f in all_files if f.startswith('G_') and f.endswith('.pth')] | |
| # 提取文件名中的数字,并找到最大的数字 | |
| max_num = -1 | |
| max_file = None | |
| for f in g_files: | |
| num = int(re.search(r'G_(\d+).pth', f).group(1)) | |
| if num > max_num: | |
| max_num = num | |
| max_file = f | |
| # 加载对应的模型 | |
| if max_file: | |
| file_path = os.path.join('./logs/',model_name, max_file) | |
| _ = utils.load_checkpoint(file_path, net_g, None, skip_optimizer=True) | |
| else: | |
| print("没有找到合适的文件") | |
| if __name__ == "__main__": | |
| models = os.listdir("./logs") | |
| hps_ = {} | |
| for i in models: | |
| hps_[i] = utils.get_hparams_from_file(os.path.join("./logs", i, "config.json")) | |
| curr_model_name = models[0] | |
| hps = hps_[curr_model_name] | |
| # speaker_ids = hps.data.spk2id | |
| # speakers = list(speaker_ids.keys()) | |
| device = ( | |
| "cuda:0" | |
| if torch.cuda.is_available() | |
| else ( | |
| "mps" | |
| if sys.platform == "darwin" and torch.backends.mps.is_available() | |
| else "cpu" | |
| ) | |
| ) | |
| net_g = SynthesizerTrn( | |
| len(symbols), | |
| hps.data.filter_length // 2 + 1, | |
| hps.train.segment_size // hps.data.hop_length, | |
| n_speakers=hps.data.n_speakers, | |
| **hps.model, | |
| ).to(device) | |
| _ = net_g.eval() | |
| languages = ["JP"] | |
| with gr.Blocks() as app: | |
| with gr.Tab(label="umamusume"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| text = gr.TextArea( | |
| label="Text", | |
| placeholder="Input Text Here", | |
| value="はりきっていこう!", | |
| ) | |
| speaker = gr.Dropdown( | |
| choices=models, value=models[0], label="Models" | |
| ) | |
| with gr.Accordion("Settings",open=False): | |
| sdp_ratio = gr.Slider( | |
| minimum=0, maximum=1, value=0.2, step=0.1, label="SDP Ratio" | |
| ) | |
| noise_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.6, step=0.1, label="Noise Scale" | |
| ) | |
| noise_scale_w = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.8, step=0.1, label="Noise Scale W" | |
| ) | |
| length_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=1, step=0.1, label="Length Scale" | |
| ) | |
| language = gr.Dropdown( | |
| choices=languages, value=languages[0], label="Language" | |
| ) | |
| btn = gr.Button("Generate!", variant="primary") | |
| with gr.Column(): | |
| text_output = gr.Textbox(label="Message") | |
| audio_output = gr.Audio(label="Output Audio") | |
| gr.Markdown("# 赛马娘 Bert-VITS2 语音合成\n" | |
| "Project page:[GitHub](https://github.com/fishaudio/Bert-VITS2)\n" | |
| "- Still Updating...\n" | |
| "- We found that model trained with only 1 speaker may generate better audio than multi-speaker model.\n") | |
| btn.click( | |
| tts_fn, | |
| inputs=[ | |
| text, | |
| speaker, | |
| sdp_ratio, | |
| noise_scale, | |
| noise_scale_w, | |
| length_scale, | |
| language, | |
| ], | |
| outputs=[text_output, audio_output], | |
| ) | |
| app.launch(server_name="0.0.0.0") | |