Spaces:
Build error
Build error
| import argparse | |
| import os | |
| from pathlib import Path | |
| import logging | |
| import re_matching | |
| logging.getLogger("numba").setLevel(logging.WARNING) | |
| logging.getLogger("markdown_it").setLevel(logging.WARNING) | |
| logging.getLogger("urllib3").setLevel(logging.WARNING) | |
| logging.getLogger("matplotlib").setLevel(logging.WARNING) | |
| logging.basicConfig( | |
| level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" | |
| ) | |
| logger = logging.getLogger(__name__) | |
| import librosa | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| from torch.utils.data import Dataset | |
| from torch.utils.data import DataLoader, Dataset | |
| from tqdm import tqdm | |
| from clap_wrapper import get_clap_audio_feature, get_clap_text_feature | |
| from tools.sentence import extrac, is_japanese, is_chinese, seconds_to_ass_time, extract_text_from_file, remove_annotations,extract_and_convert | |
| import re | |
| import gradio as gr | |
| import utils | |
| from config import config | |
| import torch | |
| import commons | |
| from text import cleaned_text_to_sequence, get_bert | |
| from text.cleaner import clean_text | |
| import utils | |
| from scipy.io.wavfile import write | |
| from models import SynthesizerTrn | |
| from text.symbols import symbols | |
| import sys | |
| import shutil | |
| net_g = None | |
| device = ( | |
| "cuda:0" | |
| if torch.cuda.is_available() | |
| else ( | |
| "mps" | |
| if sys.platform == "darwin" and torch.backends.mps.is_available() | |
| else "cpu" | |
| ) | |
| ) | |
| BandList = { | |
| "PoppinParty":["香澄","有咲","たえ","りみ","沙綾"], | |
| "Afterglow":["蘭","モカ","ひまり","巴","つぐみ"], | |
| "HelloHappyWorld":["こころ","美咲","薫","花音","はぐみ"], | |
| "PastelPalettes":["彩","日菜","千聖","イヴ","麻弥"], | |
| "Roselia":["友希那","紗夜","リサ","燐子","あこ"], | |
| "RaiseASuilen":["レイヤ","ロック","ますき","チュチュ","パレオ"], | |
| "Morfonica":["ましろ","瑠唯","つくし","七深","透子"], | |
| "MyGo":["燈","愛音","そよ","立希","楽奈"], | |
| "AveMujica":["祥子","睦","海鈴","にゃむ","初華"], | |
| "圣翔音乐学园":["華戀","光","香子","雙葉","真晝","純那","克洛迪娜","真矢","奈奈"], | |
| "凛明馆女子学校":["珠緒","壘","文","悠悠子","一愛"], | |
| "弗隆提亚艺术学校":["艾露","艾露露","菈樂菲","司","靜羽"], | |
| "西克菲尔特音乐学院":["晶","未知留","八千代","栞","美帆"] | |
| } | |
| def get_net_g(model_path: str, device: str, hps): | |
| # 当前版本模型 net_g | |
| net_g = SynthesizerTrn( | |
| len(symbols), | |
| hps.data.filter_length // 2 + 1, | |
| hps.train.segment_size // hps.data.hop_length, | |
| n_speakers=hps.data.n_speakers, | |
| **hps.model, | |
| ).to(device) | |
| _ = net_g.eval() | |
| _ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True) | |
| return net_g | |
| def get_text(text, language_str, hps, device, style_text=None, style_weight=0.7): | |
| style_text = None if style_text == "" else style_text | |
| # 在此处实现当前版本的get_text | |
| norm_text, phone, tone, word2ph = clean_text(text, language_str) | |
| phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) | |
| if hps.data.add_blank: | |
| phone = commons.intersperse(phone, 0) | |
| tone = commons.intersperse(tone, 0) | |
| language = commons.intersperse(language, 0) | |
| for i in range(len(word2ph)): | |
| word2ph[i] = word2ph[i] * 2 | |
| word2ph[0] += 1 | |
| bert = get_bert(norm_text, word2ph, language_str, device, style_text, style_weight) | |
| del word2ph | |
| assert bert.shape[-1] == len( | |
| phone | |
| ), f"Bert seq len {bert.shape[-1]} != {len(phone)}" | |
| phone = torch.LongTensor(phone) | |
| tone = torch.LongTensor(tone) | |
| language = torch.LongTensor(language) | |
| return bert, phone, tone, language | |
| def infer( | |
| text, | |
| sdp_ratio, | |
| noise_scale, | |
| noise_scale_w, | |
| length_scale, | |
| sid, | |
| emotion, | |
| reference_audio=None, | |
| skip_start=False, | |
| skip_end=False, | |
| style_text=None, | |
| style_weight=0.7, | |
| ): | |
| language = "JP" | |
| if isinstance(reference_audio, np.ndarray): | |
| emo = get_clap_audio_feature(reference_audio, device) | |
| else: | |
| emo = get_clap_text_feature(emotion, device) | |
| emo = torch.squeeze(emo, dim=1) | |
| bert, phones, tones, lang_ids = get_text( | |
| text, | |
| language, | |
| hps, | |
| device, | |
| style_text=style_text, | |
| style_weight=style_weight, | |
| ) | |
| if skip_start: | |
| phones = phones[3:] | |
| tones = tones[3:] | |
| lang_ids = lang_ids[3:] | |
| bert = bert[:, 3:] | |
| if skip_end: | |
| phones = phones[:-2] | |
| tones = tones[:-2] | |
| lang_ids = lang_ids[:-2] | |
| bert = bert[:, :-2] | |
| with torch.no_grad(): | |
| x_tst = phones.to(device).unsqueeze(0) | |
| tones = tones.to(device).unsqueeze(0) | |
| lang_ids = lang_ids.to(device).unsqueeze(0) | |
| bert = bert.to(device).unsqueeze(0) | |
| x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) | |
| emo = emo.to(device).unsqueeze(0) | |
| del phones | |
| speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) | |
| audio = ( | |
| net_g.infer( | |
| x_tst, | |
| x_tst_lengths, | |
| speakers, | |
| tones, | |
| lang_ids, | |
| bert, | |
| emo, | |
| sdp_ratio=sdp_ratio, | |
| noise_scale=noise_scale, | |
| noise_scale_w=noise_scale_w, | |
| length_scale=length_scale, | |
| )[0][0, 0] | |
| .data.cpu() | |
| .float() | |
| .numpy() | |
| ) | |
| del ( | |
| x_tst, | |
| tones, | |
| lang_ids, | |
| bert, | |
| x_tst_lengths, | |
| speakers, | |
| emo, | |
| ) # , emo | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| return (hps.data.sampling_rate,gr.processing_utils.convert_to_16_bit_wav(audio)) | |
| '''srt格式 | |
| def generate_audio_and_srt_for_group(group, outputPath, group_index, sampling_rate, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime): | |
| audio_fin = [] | |
| ass_entries = [] | |
| start_time = 0 | |
| #speaker = random.choice(cara_list) | |
| ass_header = """[Script Info] | |
| ; 我没意见 | |
| Title: Audiobook | |
| ScriptType: v4.00+ | |
| WrapStyle: 0 | |
| PlayResX: 640 | |
| PlayResY: 360 | |
| ScaledBorderAndShadow: yes | |
| [V4+ Styles] | |
| Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding | |
| Style: Default,Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,1,1,2,10,10,10,1 | |
| [Events] | |
| Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | |
| """ | |
| for sentence in group: | |
| try: | |
| print(sentence) | |
| FakeSpeaker = sentence.split("|")[0] | |
| print(FakeSpeaker) | |
| SpeakersList = re.split('\n', spealerList) | |
| if FakeSpeaker in list(hps.data.spk2id.keys()): | |
| speaker = FakeSpeaker | |
| for i in SpeakersList: | |
| if FakeSpeaker == i.split("|")[1]: | |
| speaker = i.split("|")[0] | |
| if sentence != '\n': | |
| audio = infer_simple((remove_annotations(sentence.split("|")[-1]).replace(" ","")+"。").replace(",。","。").replace("。。","。"), sdp_ratio, noise_scale, noise_scale_w, length_scale,speaker) | |
| silence_frames = int(silenceTime * 44010) if is_chinese(sentence) else int(silenceTime * 44010) | |
| silence_data = np.zeros((silence_frames,), dtype=audio.dtype) | |
| audio_fin.append(audio) | |
| audio_fin.append(silence_data) | |
| duration = len(audio) / sampling_rate | |
| print(duration) | |
| end_time = start_time + duration + silenceTime | |
| ass_entries.append("Dialogue: 0,{},{},".format(seconds_to_ass_time(start_time), seconds_to_ass_time(end_time)) + "Default,,0,0,0,,{}".format(sentence.replace("|",":"))) | |
| start_time = end_time | |
| except: | |
| pass | |
| wav_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.wav') | |
| ass_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.ass') | |
| write(wav_filename, sampling_rate, np.concatenate(audio_fin)) | |
| with open(ass_filename, 'w', encoding='utf-8') as f: | |
| f.write(ass_header + '\n'.join(ass_entries)) | |
| return (hps.data.sampling_rate, np.concatenate(audio_fin)) | |
| ''' | |
| def format_srt_timestamp(seconds): | |
| ms = int((seconds - int(seconds)) * 1000) | |
| seconds = int(seconds) | |
| hours = seconds // 3600 | |
| minutes = (seconds % 3600) // 60 | |
| seconds = seconds % 60 | |
| return f"{hours:02}:{minutes:02}:{seconds:02},{ms:03}" | |
| def clean_sentence(sentence): | |
| return sentence.replace('\n', '').replace('\r', '').replace(' ', '') | |
| def generate_audio_and_srt_for_group(group, outputPath, group_index, sampling_rate, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale, spealerList, silenceTime): | |
| audio_fin = [] | |
| srt_entries = [] | |
| start_time = 0 | |
| for i, sentence in enumerate(group): | |
| try: | |
| FakeSpeaker = sentence.split("|")[0] | |
| SpeakersList = re.split('\n', spealerList) | |
| if FakeSpeaker in list(hps.data.spk2id.keys()): | |
| speaker = FakeSpeaker | |
| for s in SpeakersList: | |
| if FakeSpeaker == s.split("|")[1]: | |
| speaker = s.split("|")[0] | |
| if len(sentence)>2 and (sentence != '\n' or sentence != '\r' or sentence != '' or sentence != ' ' or sentence != '\r\n'): | |
| clean_msg = clean_sentence(sentence.split("|")[-1]) | |
| audio = infer_simple((remove_annotations(clean_msg) + "。").replace(",。", "。").replace("。。", "。"), sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker) | |
| silence_frames = int(silenceTime * 44100) if is_chinese(sentence) else int(silenceTime * 44100) | |
| silence_data = np.zeros((silence_frames,), dtype=audio.dtype) | |
| audio_fin.append(audio) | |
| audio_fin.append(silence_data) | |
| duration = len(audio) / sampling_rate | |
| end_time = start_time + duration + silenceTime | |
| srt_entries.append(f"{i+1}\n{format_srt_timestamp(start_time)} --> {format_srt_timestamp(end_time)}\n{clean_msg.replace('|', ':')}\n\n") | |
| start_time = end_time | |
| except: | |
| pass | |
| wav_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.wav') | |
| srt_filename = os.path.join(outputPath, f'audiobook_part_{group_index}.srt') | |
| write(wav_filename, sampling_rate, np.concatenate(audio_fin)) | |
| with open(srt_filename, 'w', encoding='utf-8') as f: | |
| f.writelines(srt_entries) | |
| return (hps.data.sampling_rate, np.concatenate(audio_fin)) | |
| def infer_simple( | |
| text, | |
| sdp_ratio, | |
| noise_scale, | |
| noise_scale_w, | |
| length_scale, | |
| sid, | |
| emotion = '', | |
| reference_audio=None, | |
| skip_start=False, | |
| skip_end=False, | |
| style_text=None, | |
| style_weight=0.7, | |
| ): | |
| language = "JP" | |
| if isinstance(reference_audio, np.ndarray): | |
| emo = get_clap_audio_feature(reference_audio, device) | |
| else: | |
| emo = get_clap_text_feature(emotion, device) | |
| emo = torch.squeeze(emo, dim=1) | |
| bert, phones, tones, lang_ids = get_text( | |
| text, | |
| language, | |
| hps, | |
| device, | |
| style_text=style_text, | |
| style_weight=style_weight, | |
| ) | |
| if skip_start: | |
| phones = phones[3:] | |
| tones = tones[3:] | |
| lang_ids = lang_ids[3:] | |
| bert = bert[:, 3:] | |
| if skip_end: | |
| phones = phones[:-2] | |
| tones = tones[:-2] | |
| lang_ids = lang_ids[:-2] | |
| bert = bert[:, :-2] | |
| with torch.no_grad(): | |
| x_tst = phones.to(device).unsqueeze(0) | |
| tones = tones.to(device).unsqueeze(0) | |
| lang_ids = lang_ids.to(device).unsqueeze(0) | |
| bert = bert.to(device).unsqueeze(0) | |
| x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) | |
| emo = emo.to(device).unsqueeze(0) | |
| del phones | |
| speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) | |
| audio = ( | |
| net_g.infer( | |
| x_tst, | |
| x_tst_lengths, | |
| speakers, | |
| tones, | |
| lang_ids, | |
| bert, | |
| emo, | |
| sdp_ratio=sdp_ratio, | |
| noise_scale=noise_scale, | |
| noise_scale_w=noise_scale_w, | |
| length_scale=length_scale, | |
| )[0][0, 0] | |
| .data.cpu() | |
| .float() | |
| .numpy() | |
| ) | |
| del ( | |
| x_tst, | |
| tones, | |
| lang_ids, | |
| bert, | |
| x_tst_lengths, | |
| speakers, | |
| emo, | |
| ) # , emo | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| return audio | |
| def audiobook(inputFile, groupsize, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime,filepath,raw_text): | |
| directory_path = filepath if torch.cuda.is_available() else "books" | |
| if os.path.exists(directory_path): | |
| shutil.rmtree(directory_path) | |
| os.makedirs(directory_path) | |
| if inputFile: | |
| text = extract_text_from_file(inputFile.name) | |
| else: | |
| text = raw_text | |
| sentences = extrac(extract_and_convert(text)) | |
| GROUP_SIZE = groupsize | |
| for i in range(0, len(sentences), GROUP_SIZE): | |
| group = sentences[i:i+GROUP_SIZE] | |
| if spealerList == "": | |
| spealerList = "无" | |
| result = generate_audio_and_srt_for_group(group,directory_path, i//GROUP_SIZE + 1, 44100, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,spealerList,silenceTime) | |
| if not torch.cuda.is_available(): | |
| return result | |
| return result | |
| def loadmodel(model): | |
| _ = net_g.eval() | |
| _ = utils.load_checkpoint(model, net_g, None, skip_optimizer=True) | |
| return "success" | |
| if __name__ == "__main__": | |
| modelPaths = [] | |
| for dirpath, dirnames, filenames in os.walk('Data/BangDream/models/'): | |
| for filename in filenames: | |
| modelPaths.append(os.path.join(dirpath, filename)) | |
| hps = utils.get_hparams_from_file('Data/BangDream//config.json') | |
| net_g = get_net_g( | |
| model_path=modelPaths[-1], device=device, hps=hps | |
| ) | |
| speaker_ids = hps.data.spk2id | |
| speakers = list(speaker_ids.keys()) | |
| with gr.Blocks() as app: | |
| for band in BandList: | |
| with gr.TabItem(band): | |
| for name in BandList[band]: | |
| with gr.TabItem(name): | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Row(): | |
| gr.Markdown( | |
| '<div align="center">' | |
| f'<img style="width:auto;height:400px;" src="https://mahiruoshi-bangdream-bert-vits2.hf.space/file/image/{name}.png">' | |
| '</div>' | |
| ) | |
| length_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=1, step=0.01, label="语速调节" | |
| ) | |
| emotion = gr.Textbox( | |
| label="情感标注文本", | |
| value = 'なんではるひかげやったの?!!' | |
| ) | |
| style_weight = gr.Slider( | |
| minimum=0.1, maximum=2, value=1, step=0.01, label="感情比重" | |
| ) | |
| with gr.Accordion(label="参数设定", open=False): | |
| sdp_ratio = gr.Slider( | |
| minimum=0, maximum=1, value=0.5, step=0.01, label="SDP/DP混合比" | |
| ) | |
| noise_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节" | |
| ) | |
| noise_scale_w = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度" | |
| ) | |
| speaker = gr.Dropdown( | |
| choices=speakers, value=name, label="说话人" | |
| ) | |
| skip_start = gr.Checkbox(label="跳过开头") | |
| skip_end = gr.Checkbox(label="跳过结尾") | |
| with gr.Accordion(label="切换模型", open=False): | |
| modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value") | |
| btnMod = gr.Button("载入模型") | |
| statusa = gr.TextArea() | |
| btnMod.click(loadmodel, inputs=[modelstrs], outputs = [statusa]) | |
| with gr.Column(): | |
| text = gr.TextArea( | |
| label="输入纯日语", | |
| placeholder="输入纯日语", | |
| value="なんではるひかげやったの?!!", | |
| ) | |
| reference_audio = gr.Audio(label="情感参考音频)", type="filepath") | |
| btn = gr.Button("点击生成", variant="primary") | |
| audio_output = gr.Audio(label="Output Audio") | |
| btn.click( | |
| infer, | |
| inputs=[ | |
| text, | |
| sdp_ratio, | |
| noise_scale, | |
| noise_scale_w, | |
| length_scale, | |
| speaker, | |
| emotion, | |
| reference_audio, | |
| skip_start, | |
| skip_end, | |
| emotion, | |
| style_weight, | |
| ], | |
| outputs=[audio_output], | |
| ) | |
| with gr.Tab('拓展功能'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown( | |
| f"从 <a href='https://nijigaku.top/2023/10/03/BangDreamTTS/'>我的博客站点</a> 查看自制galgame使用说明\n</a>" | |
| ) | |
| inputFile = gr.UploadButton(label="txt文件输入") | |
| raw_text = gr.TextArea( | |
| label="文本输入", | |
| info="输入纯日语", | |
| value="つくし|なんではるひかげやったの?!!", | |
| ) | |
| groupSize = gr.Slider( | |
| minimum=10, maximum=1000000 if torch.cuda.is_available() else 50,value = 50, step=1, label="单个音频文件包含的最大字数" | |
| ) | |
| silenceTime = gr.Slider( | |
| minimum=0, maximum=1, value=0.5, step=0.01, label="句子的间隔" | |
| ) | |
| filepath = gr.TextArea( | |
| label="本地合成时的音频存储文件夹(会清空文件夹)", | |
| value = "D:/audiobook/book1", | |
| ) | |
| spealerList = gr.TextArea( | |
| label="角色对应表,左边是你想要在每一句话合成中用到的speaker(见角色清单)右边是你上传文本时分隔符左边设置的说话人:{ChoseSpeakerFromConfigList}|{SeakerInUploadText}", | |
| value = "ましろ|真白\n七深|七深\n透子|透子\nつくし|筑紫\n瑠唯|瑠唯\nそよ|素世\n祥子|祥子", | |
| ) | |
| speaker = gr.Dropdown( | |
| choices=speakers, value = "ましろ", label="选择默认说话人" | |
| ) | |
| with gr.Column(): | |
| sdp_ratio = gr.Slider( | |
| minimum=0, maximum=1, value=0.5, step=0.01, label="SDP/DP混合比" | |
| ) | |
| noise_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节" | |
| ) | |
| noise_scale_w = gr.Slider( | |
| minimum=0.1, maximum=2, value=0.667, step=0.01, label="音素长度" | |
| ) | |
| length_scale = gr.Slider( | |
| minimum=0.1, maximum=2, value=1, step=0.01, label="生成长度" | |
| ) | |
| LastAudioOutput = gr.Audio(label="当使用cuda时才能在本地文件夹浏览全部文件") | |
| btn2 = gr.Button("点击生成", variant="primary") | |
| btn2.click( | |
| audiobook, | |
| inputs=[ | |
| inputFile, | |
| groupSize, | |
| speaker, | |
| sdp_ratio, | |
| noise_scale, | |
| noise_scale_w, | |
| length_scale, | |
| spealerList, | |
| silenceTime, | |
| filepath, | |
| raw_text | |
| ], | |
| outputs=[LastAudioOutput], | |
| ) | |
| print("推理页面已开启!") | |
| app.launch() |