repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/api.py
api.py
""" # api.py usage ` python api.py -dr "123.wav" -dt "一二三。" -dl "zh" ` ## 执行参数: `-s` - `SoVITS模型路径, 可在 config.py 中指定` `-g` - `GPT模型路径, 可在 config.py 中指定` 调用请求缺少参考音频时使用 `-dr` - `默认参考音频路径` `-dt` - `默认参考音频文本` `-dl` - `默认参考音频语种, "中文","英文","日文","韩文","粤语,"zh","en","ja","ko","yue"` `-d` - `推理设备, "cuda","cpu"` `-a` - `绑定地址, 默认"127.0.0.1"` `-p` - `绑定端口, 默认9880, 可在 config.py 中指定` `-fp` - `覆盖 config.py 使用全精度` `-hp` - `覆盖 config.py 使用半精度` `-sm` - `流式返回模式, 默认不启用, "close","c", "normal","n", "keepalive","k"` ·-mt` - `返回的音频编码格式, 流式默认ogg, 非流式默认wav, "wav", "ogg", "aac"` ·-st` - `返回的音频数据类型, 默认int16, "int16", "int32"` ·-cp` - `文本切分符号设定, 默认为空, 以",.,。"字符串的方式传入` `-hb` - `cnhubert路径` `-b` - `bert路径` ## 调用: ### 推理 endpoint: `/` 使用执行参数指定的参考音频: GET: `http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh` POST: ```json { "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。", "text_language": "zh" } ``` 使用执行参数指定的参考音频并设定分割符号: GET: `http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&cut_punc=,。` POST: ```json { "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。", "text_language": "zh", "cut_punc": ",。", } ``` 手动指定当次推理所使用的参考音频: GET: `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh` POST: ```json { "refer_wav_path": "123.wav", "prompt_text": "一二三。", "prompt_language": "zh", "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。", "text_language": "zh" } ``` RESP: 成功: 直接返回 wav 音频流, http code 200 失败: 返回包含错误信息的 json, http code 400 手动指定当次推理所使用的参考音频,并提供参数: GET: `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&top_k=20&top_p=0.6&temperature=0.6&speed=1&inp_refs="456.wav"&inp_refs="789.wav"` POST: ```json { "refer_wav_path": "123.wav", "prompt_text": "一二三。", "prompt_language": "zh", "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。", "text_language": "zh", "top_k": 20, "top_p": 0.6, "temperature": 0.6, "speed": 1, "inp_refs": ["456.wav","789.wav"] } ``` RESP: 成功: 直接返回 wav 音频流, http code 200 失败: 返回包含错误信息的 json, http code 400 ### 更换默认参考音频 endpoint: `/change_refer` key与推理端一样 GET: `http://127.0.0.1:9880/change_refer?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh` POST: ```json { "refer_wav_path": "123.wav", "prompt_text": "一二三。", "prompt_language": "zh" } ``` RESP: 成功: json, http code 200 失败: json, 400 ### 命令控制 endpoint: `/control` command: "restart": 重新运行 "exit": 结束运行 GET: `http://127.0.0.1:9880/control?command=restart` POST: ```json { "command": "restart" } ``` RESP: 无 """ import argparse import os import re import sys now_dir = os.getcwd() sys.path.append(now_dir) sys.path.append("%s/GPT_SoVITS" % (now_dir)) import signal from text.LangSegmenter import LangSegmenter from time import time as ttime import torch import torchaudio import librosa import soundfile as sf from fastapi import FastAPI, Request, Query from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np from feature_extractor import cnhubert from io import BytesIO from module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 from peft import LoraConfig, get_peft_model from AR.models.t2s_lightning_module import Text2SemanticLightningModule from text import cleaned_text_to_sequence from text.cleaner import clean_text from module.mel_processing import spectrogram_torch import config as global_config import logging import subprocess class DefaultRefer: def __init__(self, path, text, language): self.path = args.default_refer_path self.text = args.default_refer_text self.language = args.default_refer_language def is_ready(self) -> bool: return is_full(self.path, self.text, self.language) def is_empty(*items): # 任意一项不为空返回False for item in items: if item is not None and item != "": return False return True def is_full(*items): # 任意一项为空返回False for item in items: if item is None or item == "": return False return True bigvgan_model = hifigan_model = sv_cn_model = None def clean_hifigan_model(): global hifigan_model if hifigan_model: hifigan_model = hifigan_model.cpu() hifigan_model = None try: torch.cuda.empty_cache() except: pass def clean_bigvgan_model(): global bigvgan_model if bigvgan_model: bigvgan_model = bigvgan_model.cpu() bigvgan_model = None try: torch.cuda.empty_cache() except: pass def clean_sv_cn_model(): global sv_cn_model if sv_cn_model: sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu() sv_cn_model = None try: torch.cuda.empty_cache() except: pass def init_bigvgan(): global bigvgan_model, hifigan_model, sv_cn_model from BigVGAN import bigvgan bigvgan_model = bigvgan.BigVGAN.from_pretrained( "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False, ) # if True, RuntimeError: Ninja is required to load C++ extensions # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() bigvgan_model = bigvgan_model.eval() if is_half == True: bigvgan_model = bigvgan_model.half().to(device) else: bigvgan_model = bigvgan_model.to(device) def init_hifigan(): global hifigan_model, bigvgan_model, sv_cn_model hifigan_model = Generator( initial_channel=100, resblock="1", resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], upsample_rates=[10, 6, 2, 2, 2], upsample_initial_channel=512, upsample_kernel_sizes=[20, 12, 4, 4, 4], gin_channels=0, is_bias=True, ) hifigan_model.eval() hifigan_model.remove_weight_norm() state_dict_g = torch.load( "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), map_location="cpu", weights_only=False, ) print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) if is_half == True: hifigan_model = hifigan_model.half().to(device) else: hifigan_model = hifigan_model.to(device) from sv import SV def init_sv_cn(): global hifigan_model, bigvgan_model, sv_cn_model sv_cn_model = SV(device, is_half) resample_transform_dict = {} def resample(audio_tensor, sr0, sr1, device): global resample_transform_dict key = "%s-%s-%s" % (sr0, sr1, str(device)) if key not in resample_transform_dict: resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device) return resample_transform_dict[key](audio_tensor) from module.mel_processing import mel_spectrogram_torch spec_min = -12 spec_max = 2 def norm_spec(x): return (x - spec_min) / (spec_max - spec_min) * 2 - 1 def denorm_spec(x): return (x + 1) / 2 * (spec_max - spec_min) + spec_min mel_fn = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1024, "win_size": 1024, "hop_size": 256, "num_mels": 100, "sampling_rate": 24000, "fmin": 0, "fmax": None, "center": False, }, ) mel_fn_v4 = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1280, "win_size": 1280, "hop_size": 320, "num_mels": 100, "sampling_rate": 32000, "fmin": 0, "fmax": None, "center": False, }, ) sr_model = None def audio_sr(audio, sr): global sr_model if sr_model == None: from tools.audio_sr import AP_BWE try: sr_model = AP_BWE(device, DictToAttrRecursive) except FileNotFoundError: logger.info("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载") return audio.cpu().detach().numpy(), sr return sr_model(audio, sr) class Speaker: def __init__(self, name, gpt, sovits, phones=None, bert=None, prompt=None): self.name = name self.sovits = sovits self.gpt = gpt self.phones = phones self.bert = bert self.prompt = prompt speaker_list = {} class Sovits: def __init__(self, vq_model, hps): self.vq_model = vq_model self.hps = hps from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new def get_sovits_weights(sovits_path): from config import pretrained_sovits_name path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] is_exist_s2gv3 = os.path.exists(path_sovits_v3) is_exist_s2gv4 = os.path.exists(path_sovits_v4) version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 if if_lora_v3 == True and is_exist == False: logger.info("SoVITS %s 底模缺失,无法加载相应 LoRA 权重" % model_version) dict_s2 = load_sovits_new(sovits_path) hps = dict_s2["config"] hps = DictToAttrRecursive(hps) hps.model.semantic_frame_rate = "25hz" if "enc_p.text_embedding.weight" not in dict_s2["weight"]: hps.model.version = "v2" # v3model,v2sybomls elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: hps.model.version = "v1" else: hps.model.version = "v2" model_params_dict = vars(hps.model) if model_version not in {"v3", "v4"}: if "Pro" in model_version: hps.model.version = model_version if sv_cn_model == None: init_sv_cn() vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **model_params_dict, ) else: hps.model.version = model_version vq_model = SynthesizerTrnV3( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **model_params_dict, ) if model_version == "v3": init_bigvgan() if model_version == "v4": init_hifigan() model_version = hps.model.version logger.info(f"模型版本: {model_version}") if "pretrained" not in sovits_path: try: del vq_model.enc_q except: pass if is_half == True: vq_model = vq_model.half().to(device) else: vq_model = vq_model.to(device) vq_model.eval() if if_lora_v3 == False: vq_model.load_state_dict(dict_s2["weight"], strict=False) else: path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 vq_model.load_state_dict(load_sovits_new(path_sovits)["weight"], strict=False) lora_rank = dict_s2["lora_rank"] lora_config = LoraConfig( target_modules=["to_k", "to_q", "to_v", "to_out.0"], r=lora_rank, lora_alpha=lora_rank, init_lora_weights=True, ) vq_model.cfm = get_peft_model(vq_model.cfm, lora_config) vq_model.load_state_dict(dict_s2["weight"], strict=False) vq_model.cfm = vq_model.cfm.merge_and_unload() # torch.save(vq_model.state_dict(),"merge_win.pth") vq_model.eval() sovits = Sovits(vq_model, hps) return sovits class Gpt: def __init__(self, max_sec, t2s_model): self.max_sec = max_sec self.t2s_model = t2s_model global hz hz = 50 def get_gpt_weights(gpt_path): dict_s1 = torch.load(gpt_path, map_location="cpu", weights_only=False) config = dict_s1["config"] max_sec = config["data"]["max_sec"] t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) t2s_model.load_state_dict(dict_s1["weight"]) if is_half == True: t2s_model = t2s_model.half() t2s_model = t2s_model.to(device) t2s_model.eval() # total = sum([param.nelement() for param in t2s_model.parameters()]) # logger.info("Number of parameter: %.2fM" % (total / 1e6)) gpt = Gpt(max_sec, t2s_model) return gpt def change_gpt_sovits_weights(gpt_path, sovits_path): try: gpt = get_gpt_weights(gpt_path) sovits = get_sovits_weights(sovits_path) except Exception as e: return JSONResponse({"code": 400, "message": str(e)}, status_code=400) speaker_list["default"] = Speaker(name="default", gpt=gpt, sovits=sovits) return JSONResponse({"code": 0, "message": "Success"}, status_code=200) def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") for i in inputs: inputs[i] = inputs[i].to(device) #####输入是long不用管精度问题,精度随bert_model res = bert_model(**inputs, output_hidden_states=True) res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] assert len(word2ph) == len(text) phone_level_feature = [] for i in range(len(word2ph)): repeat_feature = res[i].repeat(word2ph[i], 1) phone_level_feature.append(repeat_feature) phone_level_feature = torch.cat(phone_level_feature, dim=0) # if(is_half==True):phone_level_feature=phone_level_feature.half() return phone_level_feature.T def clean_text_inf(text, language, version): language = language.replace("all_", "") phones, word2ph, norm_text = clean_text(text, language, version) phones = cleaned_text_to_sequence(phones, version) return phones, word2ph, norm_text def get_bert_inf(phones, word2ph, norm_text, language): language = language.replace("all_", "") if language == "zh": bert = get_bert_feature(norm_text, word2ph).to(device) # .to(dtype) else: bert = torch.zeros( (1024, len(phones)), dtype=torch.float16 if is_half == True else torch.float32, ).to(device) return bert from text import chinese def get_phones_and_bert(text, language, version, final=False): text = re.sub(r' {2,}', ' ', text) textlist = [] langlist = [] if language == "all_zh": for tmp in LangSegmenter.getTexts(text,"zh"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_yue": for tmp in LangSegmenter.getTexts(text,"zh"): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ja": for tmp in LangSegmenter.getTexts(text,"ja"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ko": for tmp in LangSegmenter.getTexts(text,"ko"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "en": langlist.append("en") textlist.append(text) elif language == "auto": for tmp in LangSegmenter.getTexts(text): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "auto_yue": for tmp in LangSegmenter.getTexts(text): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) else: for tmp in LangSegmenter.getTexts(text): if langlist: if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): textlist[-1] += tmp["text"] continue if tmp["lang"] == "en": langlist.append(tmp["lang"]) else: # 因无法区别中日韩文汉字,以用户输入为准 langlist.append(language) textlist.append(tmp["text"]) phones_list = [] bert_list = [] norm_text_list = [] for i in range(len(textlist)): lang = langlist[i] phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) bert = get_bert_inf(phones, word2ph, norm_text, lang) phones_list.append(phones) norm_text_list.append(norm_text) bert_list.append(bert) bert = torch.cat(bert_list, dim=1) phones = sum(phones_list, []) norm_text = "".join(norm_text_list) if not final and len(phones) < 6: return get_phones_and_bert("." + text, language, version, final=True) return phones, bert.to(torch.float16 if is_half == True else torch.float32), norm_text class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) for key, value in input_dict.items(): if isinstance(value, dict): value = DictToAttrRecursive(value) self[key] = value setattr(self, key, value) def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def __setattr__(self, key, value): if isinstance(value, dict): value = DictToAttrRecursive(value) super(DictToAttrRecursive, self).__setitem__(key, value) super().__setattr__(key, value) def __delattr__(self, item): try: del self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def get_spepc(hps, filename, dtype, device, is_v2pro=False): sr1 = int(hps.data.sampling_rate) audio, sr0 = torchaudio.load(filename) if sr0 != sr1: audio = audio.to(device) if audio.shape[0] == 2: audio = audio.mean(0).unsqueeze(0) audio = resample(audio, sr0, sr1, device) else: audio = audio.to(device) if audio.shape[0] == 2: audio = audio.mean(0).unsqueeze(0) maxx = audio.abs().max() if maxx > 1: audio /= min(2, maxx) spec = spectrogram_torch( audio, hps.data.filter_length, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, center=False, ) spec = spec.to(dtype) if is_v2pro == True: audio = resample(audio, sr1, 16000, device).to(dtype) return spec, audio def pack_audio(audio_bytes, data, rate): if media_type == "ogg": audio_bytes = pack_ogg(audio_bytes, data, rate) elif media_type == "aac": audio_bytes = pack_aac(audio_bytes, data, rate) else: # wav无法流式, 先暂存raw audio_bytes = pack_raw(audio_bytes, data, rate) return audio_bytes def pack_ogg(audio_bytes, data, rate): # Author: AkagawaTsurunaki # Issue: # Stack overflow probabilistically occurs # when the function `sf_writef_short` of `libsndfile_64bit.dll` is called # using the Python library `soundfile` # Note: # This is an issue related to `libsndfile`, not this project itself. # It happens when you generate a large audio tensor (about 499804 frames in my PC) # and try to convert it to an ogg file. # Related: # https://github.com/RVC-Boss/GPT-SoVITS/issues/1199 # https://github.com/libsndfile/libsndfile/issues/1023 # https://github.com/bastibe/python-soundfile/issues/396 # Suggestion: # Or split the whole audio data into smaller audio segment to avoid stack overflow? def handle_pack_ogg(): with sf.SoundFile(audio_bytes, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file: audio_file.write(data) import threading # See: https://docs.python.org/3/library/threading.html # The stack size of this thread is at least 32768 # If stack overflow error still occurs, just modify the `stack_size`. # stack_size = n * 4096, where n should be a positive integer. # Here we chose n = 4096. stack_size = 4096 * 4096 try: threading.stack_size(stack_size) pack_ogg_thread = threading.Thread(target=handle_pack_ogg) pack_ogg_thread.start() pack_ogg_thread.join() except RuntimeError as e: # If changing the thread stack size is unsupported, a RuntimeError is raised. print("RuntimeError: {}".format(e)) print("Changing the thread stack size is unsupported.") except ValueError as e: # If the specified stack size is invalid, a ValueError is raised and the stack size is unmodified. print("ValueError: {}".format(e)) print("The specified stack size is invalid.") return audio_bytes def pack_raw(audio_bytes, data, rate): audio_bytes.write(data.tobytes()) return audio_bytes def pack_wav(audio_bytes, rate): if is_int32: data = np.frombuffer(audio_bytes.getvalue(), dtype=np.int32) wav_bytes = BytesIO() sf.write(wav_bytes, data, rate, format="WAV", subtype="PCM_32") else: data = np.frombuffer(audio_bytes.getvalue(), dtype=np.int16) wav_bytes = BytesIO() sf.write(wav_bytes, data, rate, format="WAV") return wav_bytes def pack_aac(audio_bytes, data, rate): if is_int32: pcm = "s32le" bit_rate = "256k" else: pcm = "s16le" bit_rate = "128k" process = subprocess.Popen( [ "ffmpeg", "-f", pcm, # 输入16位有符号小端整数PCM "-ar", str(rate), # 设置采样率 "-ac", "1", # 单声道 "-i", "pipe:0", # 从管道读取输入 "-c:a", "aac", # 音频编码器为AAC "-b:a", bit_rate, # 比特率 "-vn", # 不包含视频 "-f", "adts", # 输出AAC数据流格式 "pipe:1", # 将输出写入管道 ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, _ = process.communicate(input=data.tobytes()) audio_bytes.write(out) return audio_bytes def read_clean_buffer(audio_bytes): audio_chunk = audio_bytes.getvalue() audio_bytes.truncate(0) audio_bytes.seek(0) return audio_bytes, audio_chunk def cut_text(text, punc): punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}] if len(punc_list) > 0: punds = r"[" + "".join(punc_list) + r"]" text = text.strip("\n") items = re.split(f"({punds})", text) mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])] # 在句子不存在符号或句尾无符号的时候保证文本完整 if len(items) % 2 == 1: mergeitems.append(items[-1]) text = "\n".join(mergeitems) while "\n\n" in text: text = text.replace("\n\n", "\n") return text def only_punc(text): return not any(t.isalnum() or t.isalpha() for t in text) splits = { ",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", } def get_tts_wav( ref_wav_path, prompt_text, prompt_language, text, text_language, top_k=15, top_p=0.6, temperature=0.6, speed=1, inp_refs=None, sample_steps=32, if_sr=False, spk="default", ): infer_sovits = speaker_list[spk].sovits vq_model = infer_sovits.vq_model hps = infer_sovits.hps version = vq_model.version infer_gpt = speaker_list[spk].gpt t2s_model = infer_gpt.t2s_model max_sec = infer_gpt.max_sec if version == "v3": if sample_steps not in [4, 8, 16, 32, 64, 128]: sample_steps = 32 elif version == "v4": if sample_steps not in [4, 8, 16, 32]: sample_steps = 8 if if_sr and version != "v3": if_sr = False t0 = ttime() prompt_text = prompt_text.strip("\n") if prompt_text[-1] not in splits: prompt_text += "。" if prompt_language != "en" else "." prompt_language, text = prompt_language, text.strip("\n") dtype = torch.float16 if is_half == True else torch.float32 zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32) with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: wav16k = wav16k.to(device) zero_wav_torch = zero_wav_torch.to(device) wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompt = prompt_semantic.unsqueeze(0).to(device) is_v2pro = version in {"v2Pro", "v2ProPlus"} if version not in {"v3", "v4"}: refers = [] if is_v2pro: sv_emb = [] if sv_cn_model == None: init_sv_cn() if inp_refs: for path in inp_refs: try: #####这里加上提取sv的逻辑,要么一堆sv一堆refer,要么单个sv单个refer refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro) refers.append(refer) if is_v2pro: sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor)) except Exception as e: logger.error(e) if len(refers) == 0: refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro) refers = [refers] if is_v2pro: sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)] else: refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device) t1 = ttime() # os.environ['version'] = version prompt_language = dict_language[prompt_language.lower()] text_language = dict_language[text_language.lower()] phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version) texts = text.split("\n") audio_bytes = BytesIO() for text in texts: # 简单防止纯符号引发参考音频泄露 if only_punc(text): continue audio_opt = [] if text[-1] not in splits: text += "。" if text_language != "en" else "." phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version) bert = torch.cat([bert1, bert2], 1) all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) bert = bert.to(device).unsqueeze(0) all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) t2 = ttime() with torch.no_grad(): pred_semantic, idx = t2s_model.model.infer_panel( all_phoneme_ids, all_phoneme_len, prompt, bert, # prompt_phone_len=ph_offset, top_k=top_k, top_p=top_p, temperature=temperature, early_stop_num=hz * max_sec, ) pred_semantic = pred_semantic[:, -idx:].unsqueeze(0) t3 = ttime() if version not in {"v3", "v4"}: if is_v2pro: audio = ( vq_model.decode( pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed, sv_emb=sv_emb, ) .detach() .cpu() .numpy()[0, 0] ) else: audio = ( vq_model.decode( pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed ) .detach() .cpu() .numpy()[0, 0] ) else: phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0) phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) ref_audio, sr = torchaudio.load(ref_wav_path) ref_audio = ref_audio.to(device).float() if ref_audio.shape[0] == 2: ref_audio = ref_audio.mean(0).unsqueeze(0) tgt_sr = 24000 if version == "v3" else 32000 if sr != tgt_sr: ref_audio = resample(ref_audio, sr, tgt_sr, device) mel2 = mel_fn(ref_audio) if version == "v3" else mel_fn_v4(ref_audio) mel2 = norm_spec(mel2) T_min = min(mel2.shape[2], fea_ref.shape[2]) mel2 = mel2[:, :, :T_min] fea_ref = fea_ref[:, :, :T_min] Tref = 468 if version == "v3" else 500 Tchunk = 934 if version == "v3" else 1000 if T_min > Tref: mel2 = mel2[:, :, -Tref:] fea_ref = fea_ref[:, :, -Tref:] T_min = Tref chunk_len = Tchunk - T_min mel2 = mel2.to(dtype) fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge, speed) cfm_resss = [] idx = 0 while 1: fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len] if fea_todo_chunk.shape[-1] == 0: break idx += chunk_len fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1) cfm_res = vq_model.cfm.inference( fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0 ) cfm_res = cfm_res[:, :, mel2.shape[2] :] mel2 = cfm_res[:, :, -T_min:] fea_ref = fea_todo_chunk[:, :, -T_min:] cfm_resss.append(cfm_res) cfm_res = torch.cat(cfm_resss, 2) cfm_res = denorm_spec(cfm_res) if version == "v3": if bigvgan_model == None: init_bigvgan() else: # v4 if hifigan_model == None: init_hifigan() vocoder_model = bigvgan_model if version == "v3" else hifigan_model with torch.inference_mode(): wav_gen = vocoder_model(cfm_res) audio = wav_gen[0][0].cpu().detach().numpy() max_audio = np.abs(audio).max()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/webui.py
webui.py
import os import sys os.environ["version"] = version = "v2Pro" now_dir = os.getcwd() sys.path.insert(0, now_dir) import warnings warnings.filterwarnings("ignore") import json import platform import shutil import signal import psutil import torch import yaml os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO" torch.manual_seed(233333) tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp if os.path.exists(tmp): for name in os.listdir(tmp): if name == "jieba.cache": continue path = "%s/%s" % (tmp, name) delete = os.remove if os.path.isfile(path) else shutil.rmtree try: delete(path) except Exception as e: print(str(e)) pass import site import traceback site_packages_roots = [] for path in site.getsitepackages(): if "packages" in path: site_packages_roots.append(path) if site_packages_roots == []: site_packages_roots = ["%s/runtime/Lib/site-packages" % now_dir] # os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" os.environ["all_proxy"] = "" for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): try: with open("%s/users.pth" % (site_packages_root), "w") as f: f.write( # "%s\n%s/runtime\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5" "%s\n%s/GPT_SoVITS/BigVGAN\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5" % (now_dir, now_dir, now_dir, now_dir, now_dir, now_dir) ) break except PermissionError: traceback.print_exc() import shutil import subprocess from subprocess import Popen from tools.assets import css, js, top_html from tools.i18n.i18n import I18nAuto, scan_language_list language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto" os.environ["language"] = language i18n = I18nAuto(language=language) from multiprocessing import cpu_count from config import ( GPU_INDEX, GPU_INFOS, IS_GPU, exp_root, infer_device, is_half, is_share, memset, python_exec, webui_port_infer_tts, webui_port_main, webui_port_subfix, webui_port_uvr5, ) from tools import my_utils from tools.my_utils import check_details, check_for_existance os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu import gradio as gr n_cpu = cpu_count() set_gpu_numbers = GPU_INDEX gpu_infos = GPU_INFOS mem = memset is_gpu_ok = IS_GPU v3v4set = {"v3", "v4"} def set_default(): global \ default_batch_size, \ default_max_batch_size, \ gpu_info, \ default_sovits_epoch, \ default_sovits_save_every_epoch, \ max_sovits_epoch, \ max_sovits_save_every_epoch, \ default_batch_size_s1, \ if_force_ckpt if_force_ckpt = False gpu_info = "\n".join(gpu_infos) if is_gpu_ok: minmem = min(mem) default_batch_size = int(minmem // 2 if version not in v3v4set else minmem // 8) default_batch_size_s1 = int(minmem // 2) else: default_batch_size = default_batch_size_s1 = int(psutil.virtual_memory().total / 1024 / 1024 / 1024 / 4) if version not in v3v4set: default_sovits_epoch = 8 default_sovits_save_every_epoch = 4 max_sovits_epoch = 25 # 40 max_sovits_save_every_epoch = 25 # 10 else: default_sovits_epoch = 2 default_sovits_save_every_epoch = 1 max_sovits_epoch = 16 # 40 # 3 #训太多=作死 max_sovits_save_every_epoch = 10 # 10 # 3 default_batch_size = max(1, default_batch_size) default_batch_size_s1 = max(1, default_batch_size_s1) default_max_batch_size = default_batch_size * 3 set_default() gpus = "-".join(map(str, GPU_INDEX)) default_gpu_numbers = infer_device.index def fix_gpu_number(input): # 将越界的number强制改到界内 try: if int(input) not in set_gpu_numbers: return default_gpu_numbers except: return input return input def fix_gpu_numbers(inputs): output = [] try: for input in inputs.split(","): output.append(str(fix_gpu_number(input))) return ",".join(output) except: return inputs from config import pretrained_gpt_name, pretrained_sovits_name def check_pretrained_is_exist(version): pretrained_model_list = ( pretrained_sovits_name[version], pretrained_sovits_name[version].replace("s2G", "s2D"), pretrained_gpt_name[version], "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", "GPT_SoVITS/pretrained_models/chinese-hubert-base", ) _ = "" for i in pretrained_model_list: if "s2Dv3" not in i and "s2Dv4" not in i and os.path.exists(i) == False: _ += f"\n {i}" if _: print("warning: ", i18n("以下模型不存在:") + _) check_pretrained_is_exist(version) for key in pretrained_sovits_name.keys(): if os.path.exists(pretrained_sovits_name[key]) == False: pretrained_sovits_name[key] = "" for key in pretrained_gpt_name.keys(): if os.path.exists(pretrained_gpt_name[key]) == False: pretrained_gpt_name[key] = "" from config import ( GPT_weight_root, GPT_weight_version2root, SoVITS_weight_root, SoVITS_weight_version2root, change_choices, get_weights_names, ) for root in SoVITS_weight_root + GPT_weight_root: os.makedirs(root, exist_ok=True) SoVITS_names, GPT_names = get_weights_names() p_label = None p_uvr5 = None p_asr = None p_denoise = None p_tts_inference = None def kill_proc_tree(pid, including_parent=True): try: parent = psutil.Process(pid) except psutil.NoSuchProcess: # Process already terminated return children = parent.children(recursive=True) for child in children: try: os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL except OSError: pass if including_parent: try: os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL except OSError: pass system = platform.system() def kill_process(pid, process_name=""): if system == "Windows": cmd = "taskkill /t /f /pid %s" % pid # os.system(cmd) subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) else: kill_proc_tree(pid) print(process_name + i18n("进程已终止")) def process_info(process_name="", indicator=""): if indicator == "opened": return process_name + i18n("已开启") elif indicator == "open": return i18n("开启") + process_name elif indicator == "closed": return process_name + i18n("已关闭") elif indicator == "close": return i18n("关闭") + process_name elif indicator == "running": return process_name + i18n("运行中") elif indicator == "occupy": return process_name + i18n("占用中") + "," + i18n("需先终止才能开启下一次任务") elif indicator == "finish": return process_name + i18n("已完成") elif indicator == "failed": return process_name + i18n("失败") elif indicator == "info": return process_name + i18n("进程输出信息") else: return process_name process_name_subfix = i18n("音频标注WebUI") def change_label(path_list): global p_label if p_label is None: check_for_existance([path_list]) path_list = my_utils.clean_path(path_list) cmd = '"%s" -s tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s' % ( python_exec, path_list, webui_port_subfix, is_share, ) yield ( process_info(process_name_subfix, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) print(cmd) p_label = Popen(cmd, shell=True) else: kill_process(p_label.pid, process_name_subfix) p_label = None yield ( process_info(process_name_subfix, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) process_name_uvr5 = i18n("人声分离WebUI") def change_uvr5(): global p_uvr5 if p_uvr5 is None: cmd = '"%s" -s tools/uvr5/webui.py "%s" %s %s %s' % ( python_exec, infer_device, is_half, webui_port_uvr5, is_share, ) yield ( process_info(process_name_uvr5, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) print(cmd) p_uvr5 = Popen(cmd, shell=True) else: kill_process(p_uvr5.pid, process_name_uvr5) p_uvr5 = None yield ( process_info(process_name_uvr5, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) process_name_tts = i18n("TTS推理WebUI") def change_tts_inference(bert_path, cnhubert_base_path, gpu_number, gpt_path, sovits_path, batched_infer_enabled): global p_tts_inference if batched_infer_enabled: cmd = '"%s" -s GPT_SoVITS/inference_webui_fast.py "%s"' % (python_exec, language) else: cmd = '"%s" -s GPT_SoVITS/inference_webui.py "%s"' % (python_exec, language) # #####v3暂不支持加速推理 # if version=="v3": # cmd = '"%s" GPT_SoVITS/inference_webui.py "%s"'%(python_exec, language) if p_tts_inference is None: os.environ["gpt_path"] = gpt_path os.environ["sovits_path"] = sovits_path os.environ["cnhubert_base_path"] = cnhubert_base_path os.environ["bert_path"] = bert_path os.environ["_CUDA_VISIBLE_DEVICES"] = str(fix_gpu_number(gpu_number)) os.environ["is_half"] = str(is_half) os.environ["infer_ttswebui"] = str(webui_port_infer_tts) os.environ["is_share"] = str(is_share) yield ( process_info(process_name_tts, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) print(cmd) p_tts_inference = Popen(cmd, shell=True) else: kill_process(p_tts_inference.pid, process_name_tts) p_tts_inference = None yield ( process_info(process_name_tts, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) from tools.asr.config import asr_dict process_name_asr = i18n("语音识别") def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_precision): global p_asr if p_asr is None: asr_inp_dir = my_utils.clean_path(asr_inp_dir) asr_opt_dir = my_utils.clean_path(asr_opt_dir) check_for_existance([asr_inp_dir]) cmd = f'"{python_exec}" -s tools/asr/{asr_dict[asr_model]["path"]}' cmd += f' -i "{asr_inp_dir}"' cmd += f' -o "{asr_opt_dir}"' cmd += f" -s {asr_model_size}" cmd += f" -l {asr_lang}" cmd += f" -p {asr_precision}" output_file_name = os.path.basename(asr_inp_dir) output_folder = asr_opt_dir or "output/asr_opt" output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list") yield ( process_info(process_name_asr, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr = None yield ( process_info(process_name_asr, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, {"__type__": "update", "value": output_file_path}, {"__type__": "update", "value": output_file_path}, {"__type__": "update", "value": asr_inp_dir}, ) else: yield ( process_info(process_name_asr, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) def close_asr(): global p_asr if p_asr is not None: kill_process(p_asr.pid, process_name_asr) p_asr = None return ( process_info(process_name_asr, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) process_name_denoise = i18n("语音降噪") def open_denoise(denoise_inp_dir, denoise_opt_dir): global p_denoise if p_denoise == None: denoise_inp_dir = my_utils.clean_path(denoise_inp_dir) denoise_opt_dir = my_utils.clean_path(denoise_opt_dir) check_for_existance([denoise_inp_dir]) cmd = '"%s" -s tools/cmd-denoise.py -i "%s" -o "%s" -p %s' % ( python_exec, denoise_inp_dir, denoise_opt_dir, "float16" if is_half == True else "float32", ) yield ( process_info(process_name_denoise, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) print(cmd) p_denoise = Popen(cmd, shell=True) p_denoise.wait() p_denoise = None yield ( process_info(process_name_denoise, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, {"__type__": "update", "value": denoise_opt_dir}, {"__type__": "update", "value": denoise_opt_dir}, ) else: yield ( process_info(process_name_denoise, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) def close_denoise(): global p_denoise if p_denoise is not None: kill_process(p_denoise.pid, process_name_denoise) p_denoise = None return ( process_info(process_name_denoise, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) p_train_SoVITS = None process_name_sovits = i18n("SoVITS训练") def open1Ba( version, batch_size, total_epoch, exp_name, text_low_lr_rate, if_save_latest, if_save_every_weights, save_every_epoch, gpu_numbers1Ba, pretrained_s2G, pretrained_s2D, if_grad_ckpt, lora_rank, ): global p_train_SoVITS if p_train_SoVITS == None: exp_name = exp_name.rstrip(" ") config_file = ( "GPT_SoVITS/configs/s2.json" if version not in {"v2Pro", "v2ProPlus"} else f"GPT_SoVITS/configs/s2{version}.json" ) with open(config_file) as f: data = f.read() data = json.loads(data) s2_dir = "%s/%s" % (exp_root, exp_name) os.makedirs("%s/logs_s2_%s" % (s2_dir, version), exist_ok=True) if check_for_existance([s2_dir], is_train=True): check_details([s2_dir], is_train=True) if is_half == False: data["train"]["fp16_run"] = False batch_size = max(1, batch_size // 2) data["train"]["batch_size"] = batch_size data["train"]["epochs"] = total_epoch data["train"]["text_low_lr_rate"] = text_low_lr_rate data["train"]["pretrained_s2G"] = pretrained_s2G data["train"]["pretrained_s2D"] = pretrained_s2D data["train"]["if_save_latest"] = if_save_latest data["train"]["if_save_every_weights"] = if_save_every_weights data["train"]["save_every_epoch"] = save_every_epoch data["train"]["gpu_numbers"] = gpu_numbers1Ba data["train"]["grad_ckpt"] = if_grad_ckpt data["train"]["lora_rank"] = lora_rank data["model"]["version"] = version data["data"]["exp_dir"] = data["s2_ckpt_dir"] = s2_dir data["save_weight_dir"] = SoVITS_weight_version2root[version] data["name"] = exp_name data["version"] = version tmp_config_path = "%s/tmp_s2.json" % tmp with open(tmp_config_path, "w") as f: f.write(json.dumps(data)) if version in ["v1", "v2", "v2Pro", "v2ProPlus"]: cmd = '"%s" -s GPT_SoVITS/s2_train.py --config "%s"' % (python_exec, tmp_config_path) else: cmd = '"%s" -s GPT_SoVITS/s2_train_v3_lora.py --config "%s"' % (python_exec, tmp_config_path) yield ( process_info(process_name_sovits, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) print(cmd) p_train_SoVITS = Popen(cmd, shell=True) p_train_SoVITS.wait() p_train_SoVITS = None SoVITS_dropdown_update, GPT_dropdown_update = change_choices() yield ( process_info(process_name_sovits, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, SoVITS_dropdown_update, GPT_dropdown_update, ) else: yield ( process_info(process_name_sovits, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) def close1Ba(): global p_train_SoVITS if p_train_SoVITS is not None: kill_process(p_train_SoVITS.pid, process_name_sovits) p_train_SoVITS = None return ( process_info(process_name_sovits, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) p_train_GPT = None process_name_gpt = i18n("GPT训练") def open1Bb( batch_size, total_epoch, exp_name, if_dpo, if_save_latest, if_save_every_weights, save_every_epoch, gpu_numbers, pretrained_s1, ): global p_train_GPT if p_train_GPT == None: exp_name = exp_name.rstrip(" ") with open( "GPT_SoVITS/configs/s1longer.yaml" if version == "v1" else "GPT_SoVITS/configs/s1longer-v2.yaml" ) as f: data = f.read() data = yaml.load(data, Loader=yaml.FullLoader) s1_dir = "%s/%s" % (exp_root, exp_name) os.makedirs("%s/logs_s1" % (s1_dir), exist_ok=True) if check_for_existance([s1_dir], is_train=True): check_details([s1_dir], is_train=True) if is_half == False: data["train"]["precision"] = "32" batch_size = max(1, batch_size // 2) data["train"]["batch_size"] = batch_size data["train"]["epochs"] = total_epoch data["pretrained_s1"] = pretrained_s1 data["train"]["save_every_n_epoch"] = save_every_epoch data["train"]["if_save_every_weights"] = if_save_every_weights data["train"]["if_save_latest"] = if_save_latest data["train"]["if_dpo"] = if_dpo data["train"]["half_weights_save_dir"] = GPT_weight_version2root[version] data["train"]["exp_name"] = exp_name data["train_semantic_path"] = "%s/6-name2semantic.tsv" % s1_dir data["train_phoneme_path"] = "%s/2-name2text.txt" % s1_dir data["output_dir"] = "%s/logs_s1_%s" % (s1_dir, version) # data["version"]=version os.environ["_CUDA_VISIBLE_DEVICES"] = str(fix_gpu_numbers(gpu_numbers.replace("-", ","))) os.environ["hz"] = "25hz" tmp_config_path = "%s/tmp_s1.yaml" % tmp with open(tmp_config_path, "w") as f: f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) cmd = '"%s" -s GPT_SoVITS/s1_train.py --config_file "%s" ' % (python_exec, tmp_config_path) yield ( process_info(process_name_gpt, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) print(cmd) p_train_GPT = Popen(cmd, shell=True) p_train_GPT.wait() p_train_GPT = None SoVITS_dropdown_update, GPT_dropdown_update = change_choices() yield ( process_info(process_name_gpt, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, SoVITS_dropdown_update, GPT_dropdown_update, ) else: yield ( process_info(process_name_gpt, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, ) def close1Bb(): global p_train_GPT if p_train_GPT is not None: kill_process(p_train_GPT.pid, process_name_gpt) p_train_GPT = None return ( process_info(process_name_gpt, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) ps_slice = [] process_name_slice = i18n("语音切分") def open_slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, n_parts): global ps_slice inp = my_utils.clean_path(inp) opt_root = my_utils.clean_path(opt_root) check_for_existance([inp]) if os.path.exists(inp) == False: yield ( i18n("输入路径不存在"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) return if os.path.isfile(inp): n_parts = 1 elif os.path.isdir(inp): pass else: yield ( i18n("输入路径存在但不可用"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) return if ps_slice == []: for i_part in range(n_parts): cmd = '"%s" -s tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s' % ( python_exec, inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts, ) print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) yield ( process_info(process_name_slice, "opened"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) for p in ps_slice: p.wait() ps_slice = [] yield ( process_info(process_name_slice, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, {"__type__": "update", "value": opt_root}, {"__type__": "update", "value": opt_root}, {"__type__": "update", "value": opt_root}, ) else: yield ( process_info(process_name_slice, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}, ) def close_slice(): global ps_slice if ps_slice != []: for p_slice in ps_slice: try: kill_process(p_slice.pid, process_name_slice) except: traceback.print_exc() ps_slice = [] return ( process_info(process_name_slice, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) ps1a = [] process_name_1a = i18n("文本分词与特征提取") def open1a(inp_text, inp_wav_dir, exp_name, gpu_numbers, bert_pretrained_dir): global ps1a inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") if ps1a == []: opt_dir = "%s/%s" % (exp_root, exp_name) config = { "inp_text": inp_text, "inp_wav_dir": inp_wav_dir, "exp_name": exp_name, "opt_dir": opt_dir, "bert_pretrained_dir": bert_pretrained_dir, } gpu_names = gpu_numbers.split("-") all_parts = len(gpu_names) for i_part in range(all_parts): config.update( { "i_part": str(i_part), "all_parts": str(all_parts), "_CUDA_VISIBLE_DEVICES": str(fix_gpu_number(gpu_names[i_part])), "is_half": str(is_half), } ) os.environ.update(config) cmd = '"%s" -s GPT_SoVITS/prepare_datasets/1-get-text.py' % python_exec print(cmd) p = Popen(cmd, shell=True) ps1a.append(p) yield ( process_info(process_name_1a, "running"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) for p in ps1a: p.wait() opt = [] for i_part in range(all_parts): txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) with open(txt_path, "r", encoding="utf8") as f: opt += f.read().strip("\n").split("\n") os.remove(txt_path) path_text = "%s/2-name2text.txt" % opt_dir with open(path_text, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1a = [] if len("".join(opt)) > 0: yield ( process_info(process_name_1a, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) else: yield ( process_info(process_name_1a, "failed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) else: yield ( process_info(process_name_1a, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) def close1a(): global ps1a if ps1a != []: for p1a in ps1a: try: kill_process(p1a.pid, process_name_1a) except: traceback.print_exc() ps1a = [] return ( process_info(process_name_1a, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt" ps1b = [] process_name_1b = i18n("语音自监督特征提取") def open1b(version, inp_text, inp_wav_dir, exp_name, gpu_numbers, ssl_pretrained_dir): global ps1b inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") if ps1b == []: config = { "inp_text": inp_text, "inp_wav_dir": inp_wav_dir, "exp_name": exp_name, "opt_dir": "%s/%s" % (exp_root, exp_name), "cnhubert_base_dir": ssl_pretrained_dir, "sv_path": sv_path, "is_half": str(is_half), } gpu_names = gpu_numbers.split("-") all_parts = len(gpu_names) for i_part in range(all_parts): config.update( { "i_part": str(i_part), "all_parts": str(all_parts), "_CUDA_VISIBLE_DEVICES": str(fix_gpu_number(gpu_names[i_part])), } ) os.environ.update(config) cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py' % python_exec print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) yield ( process_info(process_name_1b, "running"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) for p in ps1b: p.wait() ps1b = [] if "Pro" in version: for i_part in range(all_parts): config.update( { "i_part": str(i_part), "all_parts": str(all_parts), "_CUDA_VISIBLE_DEVICES": str(fix_gpu_number(gpu_names[i_part])), } ) os.environ.update(config) cmd = '"%s" -s GPT_SoVITS/prepare_datasets/2-get-sv.py' % python_exec print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) for p in ps1b: p.wait() ps1b = [] yield ( process_info(process_name_1b, "finish"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) else: yield ( process_info(process_name_1b, "occupy"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, ) def close1b(): global ps1b if ps1b != []: for p1b in ps1b: try: kill_process(p1b.pid, process_name_1b) except: traceback.print_exc() ps1b = [] return ( process_info(process_name_1b, "closed"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}, ) ps1c = [] process_name_1c = i18n("语义Token提取") def open1c(version, inp_text, inp_wav_dir, exp_name, gpu_numbers, pretrained_s2G_path): global ps1c inp_text = my_utils.clean_path(inp_text) if check_for_existance([inp_text, inp_wav_dir], is_dataset_processing=True): check_details([inp_text, inp_wav_dir], is_dataset_processing=True) exp_name = exp_name.rstrip(" ") if ps1c == []: opt_dir = "%s/%s" % (exp_root, exp_name) config_file = ( "GPT_SoVITS/configs/s2.json" if version not in {"v2Pro", "v2ProPlus"} else f"GPT_SoVITS/configs/s2{version}.json" ) config = { "inp_text": inp_text, "exp_name": exp_name, "opt_dir": opt_dir, "pretrained_s2G": pretrained_s2G_path, "s2config_path": config_file, "is_half": str(is_half), } gpu_names = gpu_numbers.split("-") all_parts = len(gpu_names) for i_part in range(all_parts): config.update( { "i_part": str(i_part), "all_parts": str(all_parts), "_CUDA_VISIBLE_DEVICES": str(fix_gpu_number(gpu_names[i_part])), } ) os.environ.update(config)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/config.py
config.py
import os import re import sys import torch from tools.i18n.i18n import I18nAuto i18n = I18nAuto(language=os.environ.get("language", "Auto")) pretrained_sovits_name = { "v1": "GPT_SoVITS/pretrained_models/s2G488k.pth", "v2": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "v3": "GPT_SoVITS/pretrained_models/s2Gv3.pth", ###v3v4还要检查vocoder,算了。。。 "v4": "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth", "v2Pro": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth", "v2ProPlus": "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth", } pretrained_gpt_name = { "v1": "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", "v2": "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "v3": "GPT_SoVITS/pretrained_models/s1v3.ckpt", "v4": "GPT_SoVITS/pretrained_models/s1v3.ckpt", "v2Pro": "GPT_SoVITS/pretrained_models/s1v3.ckpt", "v2ProPlus": "GPT_SoVITS/pretrained_models/s1v3.ckpt", } name2sovits_path = { # i18n("不训练直接推v1底模!"): "GPT_SoVITS/pretrained_models/s2G488k.pth", i18n("不训练直接推v2底模!"): "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", # i18n("不训练直接推v3底模!"): "GPT_SoVITS/pretrained_models/s2Gv3.pth", # i18n("不训练直接推v4底模!"): "GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth", i18n("不训练直接推v2Pro底模!"): "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2Pro.pth", i18n("不训练直接推v2ProPlus底模!"): "GPT_SoVITS/pretrained_models/v2Pro/s2Gv2ProPlus.pth", } name2gpt_path = { # i18n("不训练直接推v1底模!"):"GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", i18n( "不训练直接推v2底模!" ): "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", i18n("不训练直接推v3底模!"): "GPT_SoVITS/pretrained_models/s1v3.ckpt", } SoVITS_weight_root = [ "SoVITS_weights", "SoVITS_weights_v2", "SoVITS_weights_v3", "SoVITS_weights_v4", "SoVITS_weights_v2Pro", "SoVITS_weights_v2ProPlus", ] GPT_weight_root = [ "GPT_weights", "GPT_weights_v2", "GPT_weights_v3", "GPT_weights_v4", "GPT_weights_v2Pro", "GPT_weights_v2ProPlus", ] SoVITS_weight_version2root = { "v1": "SoVITS_weights", "v2": "SoVITS_weights_v2", "v3": "SoVITS_weights_v3", "v4": "SoVITS_weights_v4", "v2Pro": "SoVITS_weights_v2Pro", "v2ProPlus": "SoVITS_weights_v2ProPlus", } GPT_weight_version2root = { "v1": "GPT_weights", "v2": "GPT_weights_v2", "v3": "GPT_weights_v3", "v4": "GPT_weights_v4", "v2Pro": "GPT_weights_v2Pro", "v2ProPlus": "GPT_weights_v2ProPlus", } def custom_sort_key(s): # 使用正则表达式提取字符串中的数字部分和非数字部分 parts = re.split("(\d+)", s) # 将数字部分转换为整数,非数字部分保持不变 parts = [int(part) if part.isdigit() else part for part in parts] return parts def get_weights_names(): SoVITS_names = [] for key in name2sovits_path: if os.path.exists(name2sovits_path[key]): SoVITS_names.append(key) for path in SoVITS_weight_root: if not os.path.exists(path): continue for name in os.listdir(path): if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name)) if not SoVITS_names: SoVITS_names = [""] GPT_names = [] for key in name2gpt_path: if os.path.exists(name2gpt_path[key]): GPT_names.append(key) for path in GPT_weight_root: if not os.path.exists(path): continue for name in os.listdir(path): if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name)) SoVITS_names = sorted(SoVITS_names, key=custom_sort_key) GPT_names = sorted(GPT_names, key=custom_sort_key) if not GPT_names: GPT_names = [""] return SoVITS_names, GPT_names def change_choices(): SoVITS_names, GPT_names = get_weights_names() return {"choices": SoVITS_names, "__type__": "update"}, { "choices": GPT_names, "__type__": "update", } # 推理用的指定模型 sovits_path = "" gpt_path = "" is_half_str = os.environ.get("is_half", "True") is_half = True if is_half_str.lower() == "true" else False is_share_str = os.environ.get("is_share", "False") is_share = True if is_share_str.lower() == "true" else False cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" bert_path = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" pretrained_sovits_path = "GPT_SoVITS/pretrained_models/s2G488k.pth" pretrained_gpt_path = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" exp_root = "logs" python_exec = sys.executable or "python" webui_port_main = 9874 webui_port_uvr5 = 9873 webui_port_infer_tts = 9872 webui_port_subfix = 9871 api_port = 9880 # Thanks to the contribution of @Karasukaigan and @XXXXRT666 def get_device_dtype_sm(idx: int) -> tuple[torch.device, torch.dtype, float, float]: cpu = torch.device("cpu") cuda = torch.device(f"cuda:{idx}") if not torch.cuda.is_available(): return cpu, torch.float32, 0.0, 0.0 device_idx = idx capability = torch.cuda.get_device_capability(device_idx) name = torch.cuda.get_device_name(device_idx) mem_bytes = torch.cuda.get_device_properties(device_idx).total_memory mem_gb = mem_bytes / (1024**3) + 0.4 major, minor = capability sm_version = major + minor / 10.0 is_16_series = bool(re.search(r"16\d{2}", name)) and sm_version == 7.5 if mem_gb < 4 or sm_version < 5.3: return cpu, torch.float32, 0.0, 0.0 if sm_version == 6.1 or is_16_series == True: return cuda, torch.float32, sm_version, mem_gb if sm_version > 6.1: return cuda, torch.float16, sm_version, mem_gb return cpu, torch.float32, 0.0, 0.0 IS_GPU = True GPU_INFOS: list[str] = [] GPU_INDEX: set[int] = set() GPU_COUNT = torch.cuda.device_count() CPU_INFO: str = "0\tCPU " + i18n("CPU训练,较慢") tmp: list[tuple[torch.device, torch.dtype, float, float]] = [] memset: set[float] = set() for i in range(max(GPU_COUNT, 1)): tmp.append(get_device_dtype_sm(i)) for j in tmp: device = j[0] memset.add(j[3]) if device.type != "cpu": GPU_INFOS.append(f"{device.index}\t{torch.cuda.get_device_name(device.index)}") GPU_INDEX.add(device.index) if not GPU_INFOS: IS_GPU = False GPU_INFOS.append(CPU_INFO) GPU_INDEX.add(0) infer_device = max(tmp, key=lambda x: (x[2], x[3]))[0] is_half = any(dtype == torch.float16 for _, dtype, _, _ in tmp) class Config: def __init__(self): self.sovits_path = sovits_path self.gpt_path = gpt_path self.is_half = is_half self.cnhubert_path = cnhubert_path self.bert_path = bert_path self.pretrained_sovits_path = pretrained_sovits_path self.pretrained_gpt_path = pretrained_gpt_path self.exp_root = exp_root self.python_exec = python_exec self.infer_device = infer_device self.webui_port_main = webui_port_main self.webui_port_uvr5 = webui_port_uvr5 self.webui_port_infer_tts = webui_port_infer_tts self.webui_port_subfix = webui_port_subfix self.api_port = api_port
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/api_v2.py
api_v2.py
""" # WebAPI文档 ` python api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml ` ## 执行参数: `-a` - `绑定地址, 默认"127.0.0.1"` `-p` - `绑定端口, 默认9880` `-c` - `TTS配置文件路径, 默认"GPT_SoVITS/configs/tts_infer.yaml"` ## 调用: ### 推理 endpoint: `/tts` GET: ``` http://127.0.0.1:9880/tts?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_lang=zh&ref_audio_path=archive_jingyuan_1.wav&prompt_lang=zh&prompt_text=我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=true ``` POST: ```json { "text": "", # str.(required) text to be synthesized "text_lang: "", # str.(required) language of the text to be synthesized "ref_audio_path": "", # str.(required) reference audio path "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion "prompt_text": "", # str.(optional) prompt text for the reference audio "prompt_lang": "", # str.(required) language of the prompt text for the reference audio "top_k": 15, # int. top k sampling "top_p": 1, # float. top p sampling "temperature": 1, # float. temperature for sampling "text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details. "batch_size": 1, # int. batch size for inference "batch_threshold": 0.75, # float. threshold for batch splitting. "split_bucket": True, # bool. whether to split the batch into multiple buckets. "speed_factor":1.0, # float. control the speed of the synthesized audio. "fragment_interval":0.3, # float. to control the interval of the audio fragment. "seed": -1, # int. random seed for reproducibility. "parallel_infer": True, # bool. whether to use parallel inference. "repetition_penalty": 1.35, # float. repetition penalty for T2S model. "sample_steps": 32, # int. number of sampling steps for VITS model V3. "super_sampling": False, # bool. whether to use super-sampling for audio when using VITS model V3. "streaming_mode": False, # bool or int. return audio chunk by chunk.T he available options are: 0,1,2,3 or True/False (0/False: Disabled | 1/True: Best Quality, Slowest response speed (old version streaming_mode) | 2: Medium Quality, Slow response speed | 3: Lower Quality, Faster response speed ) "overlap_length": 2, # int. overlap length of semantic tokens for streaming mode. "min_chunk_length": 16, # int. The minimum chunk length of semantic tokens for streaming mode. (affects audio chunk size) } ``` RESP: 成功: 直接返回 wav 音频流, http code 200 失败: 返回包含错误信息的 json, http code 400 ### 命令控制 endpoint: `/control` command: "restart": 重新运行 "exit": 结束运行 GET: ``` http://127.0.0.1:9880/control?command=restart ``` POST: ```json { "command": "restart" } ``` RESP: 无 ### 切换GPT模型 endpoint: `/set_gpt_weights` GET: ``` http://127.0.0.1:9880/set_gpt_weights?weights_path=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt ``` RESP: 成功: 返回"success", http code 200 失败: 返回包含错误信息的 json, http code 400 ### 切换Sovits模型 endpoint: `/set_sovits_weights` GET: ``` http://127.0.0.1:9880/set_sovits_weights?weights_path=GPT_SoVITS/pretrained_models/s2G488k.pth ``` RESP: 成功: 返回"success", http code 200 失败: 返回包含错误信息的 json, http code 400 """ import os import sys import traceback from typing import Generator, Union now_dir = os.getcwd() sys.path.append(now_dir) sys.path.append("%s/GPT_SoVITS" % (now_dir)) import argparse import subprocess import wave import signal import numpy as np import soundfile as sf from fastapi import FastAPI, Response from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from io import BytesIO from tools.i18n.i18n import I18nAuto from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names from pydantic import BaseModel import threading # print(sys.path) i18n = I18nAuto() cut_method_names = get_cut_method_names() parser = argparse.ArgumentParser(description="GPT-SoVITS api") parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880") args = parser.parse_args() config_path = args.tts_config # device = args.device port = args.port host = args.bind_addr argv = sys.argv if config_path in [None, ""]: config_path = "GPT-SoVITS/configs/tts_infer.yaml" tts_config = TTS_Config(config_path) print(tts_config) tts_pipeline = TTS(tts_config) APP = FastAPI() class TTS_Request(BaseModel): text: str = None text_lang: str = None ref_audio_path: str = None aux_ref_audio_paths: list = None prompt_lang: str = None prompt_text: str = "" top_k: int = 15 top_p: float = 1 temperature: float = 1 text_split_method: str = "cut5" batch_size: int = 1 batch_threshold: float = 0.75 split_bucket: bool = True speed_factor: float = 1.0 fragment_interval: float = 0.3 seed: int = -1 media_type: str = "wav" streaming_mode: Union[bool, int] = False parallel_infer: bool = True repetition_penalty: float = 1.35 sample_steps: int = 32 super_sampling: bool = False overlap_length: int = 2 min_chunk_length: int = 16 def pack_ogg(io_buffer: BytesIO, data: np.ndarray, rate: int): # Author: AkagawaTsurunaki # Issue: # Stack overflow probabilistically occurs # when the function `sf_writef_short` of `libsndfile_64bit.dll` is called # using the Python library `soundfile` # Note: # This is an issue related to `libsndfile`, not this project itself. # It happens when you generate a large audio tensor (about 499804 frames in my PC) # and try to convert it to an ogg file. # Related: # https://github.com/RVC-Boss/GPT-SoVITS/issues/1199 # https://github.com/libsndfile/libsndfile/issues/1023 # https://github.com/bastibe/python-soundfile/issues/396 # Suggestion: # Or split the whole audio data into smaller audio segment to avoid stack overflow? def handle_pack_ogg(): with sf.SoundFile(io_buffer, mode="w", samplerate=rate, channels=1, format="ogg") as audio_file: audio_file.write(data) # See: https://docs.python.org/3/library/threading.html # The stack size of this thread is at least 32768 # If stack overflow error still occurs, just modify the `stack_size`. # stack_size = n * 4096, where n should be a positive integer. # Here we chose n = 4096. stack_size = 4096 * 4096 try: threading.stack_size(stack_size) pack_ogg_thread = threading.Thread(target=handle_pack_ogg) pack_ogg_thread.start() pack_ogg_thread.join() except RuntimeError as e: # If changing the thread stack size is unsupported, a RuntimeError is raised. print("RuntimeError: {}".format(e)) print("Changing the thread stack size is unsupported.") except ValueError as e: # If the specified stack size is invalid, a ValueError is raised and the stack size is unmodified. print("ValueError: {}".format(e)) print("The specified stack size is invalid.") return io_buffer def pack_raw(io_buffer: BytesIO, data: np.ndarray, rate: int): io_buffer.write(data.tobytes()) return io_buffer def pack_wav(io_buffer: BytesIO, data: np.ndarray, rate: int): io_buffer = BytesIO() sf.write(io_buffer, data, rate, format="wav") return io_buffer def pack_aac(io_buffer: BytesIO, data: np.ndarray, rate: int): process = subprocess.Popen( [ "ffmpeg", "-f", "s16le", # 输入16位有符号小端整数PCM "-ar", str(rate), # 设置采样率 "-ac", "1", # 单声道 "-i", "pipe:0", # 从管道读取输入 "-c:a", "aac", # 音频编码器为AAC "-b:a", "192k", # 比特率 "-vn", # 不包含视频 "-f", "adts", # 输出AAC数据流格式 "pipe:1", # 将输出写入管道 ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, _ = process.communicate(input=data.tobytes()) io_buffer.write(out) return io_buffer def pack_audio(io_buffer: BytesIO, data: np.ndarray, rate: int, media_type: str): if media_type == "ogg": io_buffer = pack_ogg(io_buffer, data, rate) elif media_type == "aac": io_buffer = pack_aac(io_buffer, data, rate) elif media_type == "wav": io_buffer = pack_wav(io_buffer, data, rate) else: io_buffer = pack_raw(io_buffer, data, rate) io_buffer.seek(0) return io_buffer # from https://huggingface.co/spaces/coqui/voice-chat-with-mistral/blob/main/app.py def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000): # This will create a wave header then append the frame input # It should be first on a streaming wav file # Other frames better should not have it (else you will hear some artifacts each chunk start) wav_buf = BytesIO() with wave.open(wav_buf, "wb") as vfout: vfout.setnchannels(channels) vfout.setsampwidth(sample_width) vfout.setframerate(sample_rate) vfout.writeframes(frame_input) wav_buf.seek(0) return wav_buf.read() def handle_control(command: str): if command == "restart": os.execl(sys.executable, sys.executable, *argv) elif command == "exit": os.kill(os.getpid(), signal.SIGTERM) exit(0) def check_params(req: dict): text: str = req.get("text", "") text_lang: str = req.get("text_lang", "") ref_audio_path: str = req.get("ref_audio_path", "") streaming_mode: bool = req.get("streaming_mode", False) media_type: str = req.get("media_type", "wav") prompt_lang: str = req.get("prompt_lang", "") text_split_method: str = req.get("text_split_method", "cut5") if ref_audio_path in [None, ""]: return JSONResponse(status_code=400, content={"message": "ref_audio_path is required"}) if text in [None, ""]: return JSONResponse(status_code=400, content={"message": "text is required"}) if text_lang in [None, ""]: return JSONResponse(status_code=400, content={"message": "text_lang is required"}) elif text_lang.lower() not in tts_config.languages: return JSONResponse( status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"}, ) if prompt_lang in [None, ""]: return JSONResponse(status_code=400, content={"message": "prompt_lang is required"}) elif prompt_lang.lower() not in tts_config.languages: return JSONResponse( status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"}, ) if media_type not in ["wav", "raw", "ogg", "aac"]: return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"}) # elif media_type == "ogg" and not streaming_mode: # return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"}) if text_split_method not in cut_method_names: return JSONResponse( status_code=400, content={"message": f"text_split_method:{text_split_method} is not supported"} ) return None async def tts_handle(req: dict): """ Text to speech handler. Args: req (dict): { "text": "", # str.(required) text to be synthesized "text_lang: "", # str.(required) language of the text to be synthesized "ref_audio_path": "", # str.(required) reference audio path "aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion "prompt_text": "", # str.(optional) prompt text for the reference audio "prompt_lang": "", # str.(required) language of the prompt text for the reference audio "top_k": 15, # int. top k sampling "top_p": 1, # float. top p sampling "temperature": 1, # float. temperature for sampling "text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details. "batch_size": 1, # int. batch size for inference "batch_threshold": 0.75, # float. threshold for batch splitting. "split_bucket": True, # bool. whether to split the batch into multiple buckets. "speed_factor":1.0, # float. control the speed of the synthesized audio. "fragment_interval":0.3, # float. to control the interval of the audio fragment. "seed": -1, # int. random seed for reproducibility. "parallel_infer": True, # bool. whether to use parallel inference. "repetition_penalty": 1.35, # float. repetition penalty for T2S model. "sample_steps": 32, # int. number of sampling steps for VITS model V3. "super_sampling": False, # bool. whether to use super-sampling for audio when using VITS model V3. "streaming_mode": False, # bool or int. return audio chunk by chunk.T he available options are: 0,1,2,3 or True/False (0/False: Disabled | 1/True: Best Quality, Slowest response speed (old version streaming_mode) | 2: Medium Quality, Slow response speed | 3: Lower Quality, Faster response speed ) "overlap_length": 2, # int. overlap length of semantic tokens for streaming mode. "min_chunk_length": 16, # int. The minimum chunk length of semantic tokens for streaming mode. (affects audio chunk size) } returns: StreamingResponse: audio stream response. """ streaming_mode = req.get("streaming_mode", False) return_fragment = req.get("return_fragment", False) media_type = req.get("media_type", "wav") check_res = check_params(req) if check_res is not None: return check_res if streaming_mode == 0: streaming_mode = False return_fragment = False fixed_length_chunk = False elif streaming_mode == 1: streaming_mode = False return_fragment = True fixed_length_chunk = False elif streaming_mode == 2: streaming_mode = True return_fragment = False fixed_length_chunk = False elif streaming_mode == 3: streaming_mode = True return_fragment = False fixed_length_chunk = True else: return JSONResponse(status_code=400, content={"message": f"the value of streaming_mode must be 0, 1, 2, 3(int) or true/false(bool)"}) req["streaming_mode"] = streaming_mode req["return_fragment"] = return_fragment req["fixed_length_chunk"] = fixed_length_chunk print(f"{streaming_mode} {return_fragment} {fixed_length_chunk}") streaming_mode = streaming_mode or return_fragment try: tts_generator = tts_pipeline.run(req) if streaming_mode: def streaming_generator(tts_generator: Generator, media_type: str): if_frist_chunk = True for sr, chunk in tts_generator: if if_frist_chunk and media_type == "wav": yield wave_header_chunk(sample_rate=sr) media_type = "raw" if_frist_chunk = False yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue() # _media_type = f"audio/{media_type}" if not (streaming_mode and media_type in ["wav", "raw"]) else f"audio/x-{media_type}" return StreamingResponse( streaming_generator( tts_generator, media_type, ), media_type=f"audio/{media_type}", ) else: sr, audio_data = next(tts_generator) audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue() return Response(audio_data, media_type=f"audio/{media_type}") except Exception as e: return JSONResponse(status_code=400, content={"message": "tts failed", "Exception": str(e)}) @APP.get("/control") async def control(command: str = None): if command is None: return JSONResponse(status_code=400, content={"message": "command is required"}) handle_control(command) @APP.get("/tts") async def tts_get_endpoint( text: str = None, text_lang: str = None, ref_audio_path: str = None, aux_ref_audio_paths: list = None, prompt_lang: str = None, prompt_text: str = "", top_k: int = 15, top_p: float = 1, temperature: float = 1, text_split_method: str = "cut5", batch_size: int = 1, batch_threshold: float = 0.75, split_bucket: bool = True, speed_factor: float = 1.0, fragment_interval: float = 0.3, seed: int = -1, media_type: str = "wav", parallel_infer: bool = True, repetition_penalty: float = 1.35, sample_steps: int = 32, super_sampling: bool = False, streaming_mode: Union[bool, int] = False, overlap_length: int = 2, min_chunk_length: int = 16, ): req = { "text": text, "text_lang": text_lang.lower(), "ref_audio_path": ref_audio_path, "aux_ref_audio_paths": aux_ref_audio_paths, "prompt_text": prompt_text, "prompt_lang": prompt_lang.lower(), "top_k": top_k, "top_p": top_p, "temperature": temperature, "text_split_method": text_split_method, "batch_size": int(batch_size), "batch_threshold": float(batch_threshold), "speed_factor": float(speed_factor), "split_bucket": split_bucket, "fragment_interval": fragment_interval, "seed": seed, "media_type": media_type, "streaming_mode": streaming_mode, "parallel_infer": parallel_infer, "repetition_penalty": float(repetition_penalty), "sample_steps": int(sample_steps), "super_sampling": super_sampling, "overlap_length": int(overlap_length), "min_chunk_length": int(min_chunk_length), } return await tts_handle(req) @APP.post("/tts") async def tts_post_endpoint(request: TTS_Request): req = request.dict() return await tts_handle(req) @APP.get("/set_refer_audio") async def set_refer_aduio(refer_audio_path: str = None): try: tts_pipeline.set_ref_audio(refer_audio_path) except Exception as e: return JSONResponse(status_code=400, content={"message": "set refer audio failed", "Exception": str(e)}) return JSONResponse(status_code=200, content={"message": "success"}) # @APP.post("/set_refer_audio") # async def set_refer_aduio_post(audio_file: UploadFile = File(...)): # try: # # 检查文件类型,确保是音频文件 # if not audio_file.content_type.startswith("audio/"): # return JSONResponse(status_code=400, content={"message": "file type is not supported"}) # os.makedirs("uploaded_audio", exist_ok=True) # save_path = os.path.join("uploaded_audio", audio_file.filename) # # 保存音频文件到服务器上的一个目录 # with open(save_path , "wb") as buffer: # buffer.write(await audio_file.read()) # tts_pipeline.set_ref_audio(save_path) # except Exception as e: # return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)}) # return JSONResponse(status_code=200, content={"message": "success"}) @APP.get("/set_gpt_weights") async def set_gpt_weights(weights_path: str = None): try: if weights_path in ["", None]: return JSONResponse(status_code=400, content={"message": "gpt weight path is required"}) tts_pipeline.init_t2s_weights(weights_path) except Exception as e: return JSONResponse(status_code=400, content={"message": "change gpt weight failed", "Exception": str(e)}) return JSONResponse(status_code=200, content={"message": "success"}) @APP.get("/set_sovits_weights") async def set_sovits_weights(weights_path: str = None): try: if weights_path in ["", None]: return JSONResponse(status_code=400, content={"message": "sovits weight path is required"}) tts_pipeline.init_vits_weights(weights_path) except Exception as e: return JSONResponse(status_code=400, content={"message": "change sovits weight failed", "Exception": str(e)}) return JSONResponse(status_code=200, content={"message": "success"}) if __name__ == "__main__": try: if host == "None": # 在调用时使用 -a None 参数,可以让api监听双栈 host = None uvicorn.run(app=APP, host=host, port=port, workers=1) except Exception: traceback.print_exc() os.kill(os.getpid(), signal.SIGTERM) exit(0)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/assets.py
tools/assets.py
js = """ function deleteTheme() { const params = new URLSearchParams(window.location.search); if (params.has('__theme')) { params.delete('__theme'); const newUrl = `${window.location.pathname}?${params.toString()}`; window.location.replace(newUrl); } } """ css = """ /* CSSStyleRule */ .markdown { padding: 6px 10px; } @media (prefers-color-scheme: light) { .markdown { background-color: lightblue; color: #000; } } @media (prefers-color-scheme: dark) { .markdown { background-color: #4b4b4b; color: rgb(244, 244, 245); } } ::selection { background: #ffc078 !important; } footer { height: 50px !important; /* 设置页脚高度 */ background-color: transparent !important; /* 背景透明 */ display: flex; justify-content: center; /* 居中对齐 */ align-items: center; /* 垂直居中 */ } footer * { display: none !important; /* 隐藏所有子元素 */ } """ top_html = """ <div align="center"> <div style="margin-bottom: 5px; font-size: 15px;">{}</div> <div style="display: flex; gap: 60px; justify-content: center;"> <a href="https://github.com/RVC-Boss/GPT-SoVITS" target="_blank"> <img src="https://img.shields.io/badge/GitHub-GPT--SoVITS-blue.svg?style=for-the-badge&logo=github" style="width: auto; height: 30px;"> </a> <a href="https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e" target="_blank"> <img src="https://img.shields.io/badge/简体中文-阅读文档-blue?style=for-the-badge&logo=googledocs&logoColor=white" style="width: auto; height: 30px;"> </a> <a href="https://lj1995-gpt-sovits-proplus.hf.space/" target="_blank"> <img src="https://img.shields.io/badge/免费在线体验-free_online_demo-yellow.svg?style=for-the-badge&logo=huggingface" style="width: auto; height: 30px;"> </a> <a href="https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e" target="_blank"> <img src="https://img.shields.io/badge/English-READ%20DOCS-blue?style=for-the-badge&logo=googledocs&logoColor=white" style="width: auto; height: 30px;"> </a> <a href="https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE" target="_blank"> <img src="https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge&logo=opensourceinitiative" style="width: auto; height: 30px;"> </a> </div> </div> """
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/audio_sr.py
tools/audio_sr.py
from __future__ import absolute_import, division, print_function, unicode_literals import sys import os AP_BWE_main_dir_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "AP_BWE_main") sys.path.append(AP_BWE_main_dir_path) import json import torch import torchaudio.functional as aF # from attrdict import AttrDict####will be bug in py3.10 from datasets1.dataset import amp_pha_stft, amp_pha_istft from models.model import APNet_BWE_Model class AP_BWE: def __init__(self, device, DictToAttrRecursive, checkpoint_file=None): if checkpoint_file == None: checkpoint_file = "%s/24kto48k/g_24kto48k.zip" % (AP_BWE_main_dir_path) if os.path.exists(checkpoint_file) == False: raise FileNotFoundError config_file = os.path.join(os.path.split(checkpoint_file)[0], "config.json") with open(config_file) as f: data = f.read() json_config = json.loads(data) # h = AttrDict(json_config) h = DictToAttrRecursive(json_config) model = APNet_BWE_Model(h).to(device) state_dict = torch.load(checkpoint_file, map_location="cpu", weights_only=False) model.load_state_dict(state_dict["generator"]) model.eval() self.device = device self.model = model self.h = h def to(self, *arg, **kwargs): self.model.to(*arg, **kwargs) self.device = self.model.conv_pre_mag.weight.device return self def __call__(self, audio, orig_sampling_rate): with torch.no_grad(): # audio, orig_sampling_rate = torchaudio.load(inp_path) # audio = audio.to(self.device) audio = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.h.hr_sampling_rate) amp_nb, pha_nb, com_nb = amp_pha_stft(audio, self.h.n_fft, self.h.hop_size, self.h.win_size) amp_wb_g, pha_wb_g, com_wb_g = self.model(amp_nb, pha_nb) audio_hr_g = amp_pha_istft(amp_wb_g, pha_wb_g, self.h.n_fft, self.h.hop_size, self.h.win_size) # sf.write(opt_path, audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate, 'PCM_16') return audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/cmd-denoise.py
tools/cmd-denoise.py
import os import argparse import traceback from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks from tqdm import tqdm path_denoise = "tools/denoise-model/speech_frcrn_ans_cirm_16k" path_denoise = path_denoise if os.path.exists(path_denoise) else "damo/speech_frcrn_ans_cirm_16k" ans = pipeline(Tasks.acoustic_noise_suppression, model=path_denoise) def execute_denoise(input_folder, output_folder): os.makedirs(output_folder, exist_ok=True) # print(input_folder) # print(list(os.listdir(input_folder).sort())) for name in tqdm(os.listdir(input_folder)): try: ans("%s/%s" % (input_folder, name), output_path="%s/%s" % (output_folder, name)) except: traceback.print_exc() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files." ) parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.") parser.add_argument( "-p", "--precision", type=str, default="float16", choices=["float16", "float32"], help="fp16 or fp32" ) # 还没接入 cmd = parser.parse_args() execute_denoise( input_folder=cmd.input_folder, output_folder=cmd.output_folder, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/slicer2.py
tools/slicer2.py
import numpy as np # This function is obtained from librosa. def get_rms( y, frame_length=2048, hop_length=512, pad_mode="constant", ): padding = (int(frame_length // 2), int(frame_length // 2)) y = np.pad(y, padding, mode=pad_mode) axis = -1 # put our new within-frame axis at the end for now out_strides = y.strides + tuple([y.strides[axis]]) # Reduce the shape on the framing axis x_shape_trimmed = list(y.shape) x_shape_trimmed[axis] -= frame_length - 1 out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) if axis < 0: target_axis = axis - 1 else: target_axis = axis + 1 xw = np.moveaxis(xw, -1, target_axis) # Downsample along the target axis slices = [slice(None)] * xw.ndim slices[axis] = slice(0, None, hop_length) x = xw[tuple(slices)] # Calculate power power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) return np.sqrt(power) class Slicer: def __init__( self, sr: int, threshold: float = -40.0, min_length: int = 5000, min_interval: int = 300, hop_size: int = 20, max_sil_kept: int = 5000, ): if not min_length >= min_interval >= hop_size: raise ValueError("The following condition must be satisfied: min_length >= min_interval >= hop_size") if not max_sil_kept >= hop_size: raise ValueError("The following condition must be satisfied: max_sil_kept >= hop_size") min_interval = sr * min_interval / 1000 self.threshold = 10 ** (threshold / 20.0) self.hop_size = round(sr * hop_size / 1000) self.win_size = min(round(min_interval), 4 * self.hop_size) self.min_length = round(sr * min_length / 1000 / self.hop_size) self.min_interval = round(min_interval / self.hop_size) self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) def _apply_slice(self, waveform, begin, end): if len(waveform.shape) > 1: return waveform[:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)] else: return waveform[begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)] # @timeit def slice(self, waveform): if len(waveform.shape) > 1: samples = waveform.mean(axis=0) else: samples = waveform if samples.shape[0] <= self.min_length: return [waveform] rms_list = get_rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) sil_tags = [] silence_start = None clip_start = 0 for i, rms in enumerate(rms_list): # Keep looping while frame is silent. if rms < self.threshold: # Record start of silent frames. if silence_start is None: silence_start = i continue # Keep looping while frame is not silent and silence start has not been recorded. if silence_start is None: continue # Clear recorded silence start if interval is not enough or clip is too short is_leading_silence = silence_start == 0 and i > self.max_sil_kept need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length if not is_leading_silence and not need_slice_middle: silence_start = None continue # Need slicing. Record the range of silent frames to be removed. if i - silence_start <= self.max_sil_kept: pos = rms_list[silence_start : i + 1].argmin() + silence_start if silence_start == 0: sil_tags.append((0, pos)) else: sil_tags.append((pos, pos)) clip_start = pos elif i - silence_start <= self.max_sil_kept * 2: pos = rms_list[i - self.max_sil_kept : silence_start + self.max_sil_kept + 1].argmin() pos += i - self.max_sil_kept pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept if silence_start == 0: sil_tags.append((0, pos_r)) clip_start = pos_r else: sil_tags.append((min(pos_l, pos), max(pos_r, pos))) clip_start = max(pos_r, pos) else: pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept if silence_start == 0: sil_tags.append((0, pos_r)) else: sil_tags.append((pos_l, pos_r)) clip_start = pos_r silence_start = None # Deal with trailing silence. total_frames = rms_list.shape[0] if silence_start is not None and total_frames - silence_start >= self.min_interval: silence_end = min(total_frames, silence_start + self.max_sil_kept) pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start sil_tags.append((pos, total_frames + 1)) # Apply and return slices. ####音频+起始时间+终止时间 if len(sil_tags) == 0: return [[waveform, 0, int(total_frames * self.hop_size)]] else: chunks = [] if sil_tags[0][0] > 0: chunks.append([self._apply_slice(waveform, 0, sil_tags[0][0]), 0, int(sil_tags[0][0] * self.hop_size)]) for i in range(len(sil_tags) - 1): chunks.append( [ self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]), int(sil_tags[i][1] * self.hop_size), int(sil_tags[i + 1][0] * self.hop_size), ] ) if sil_tags[-1][1] < total_frames: chunks.append( [ self._apply_slice(waveform, sil_tags[-1][1], total_frames), int(sil_tags[-1][1] * self.hop_size), int(total_frames * self.hop_size), ] ) return chunks def main(): import os.path from argparse import ArgumentParser import librosa import soundfile parser = ArgumentParser() parser.add_argument("audio", type=str, help="The audio to be sliced") parser.add_argument("--out", type=str, help="Output directory of the sliced audio clips") parser.add_argument( "--db_thresh", type=float, required=False, default=-40, help="The dB threshold for silence detection", ) parser.add_argument( "--min_length", type=int, required=False, default=5000, help="The minimum milliseconds required for each sliced audio clip", ) parser.add_argument( "--min_interval", type=int, required=False, default=300, help="The minimum milliseconds for a silence part to be sliced", ) parser.add_argument( "--hop_size", type=int, required=False, default=10, help="Frame length in milliseconds", ) parser.add_argument( "--max_sil_kept", type=int, required=False, default=500, help="The maximum silence length kept around the sliced clip, presented in milliseconds", ) args = parser.parse_args() out = args.out if out is None: out = os.path.dirname(os.path.abspath(args.audio)) audio, sr = librosa.load(args.audio, sr=None, mono=False) slicer = Slicer( sr=sr, threshold=args.db_thresh, min_length=args.min_length, min_interval=args.min_interval, hop_size=args.hop_size, max_sil_kept=args.max_sil_kept, ) chunks = slicer.slice(audio) if not os.path.exists(out): os.makedirs(out) for i, chunk in enumerate(chunks): if len(chunk.shape) > 1: chunk = chunk.T soundfile.write( os.path.join( out, "%s_%d.wav" % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i), ), chunk, sr, ) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/slice_audio.py
tools/slice_audio.py
import os import sys import numpy as np import traceback from scipy.io import wavfile # parent_directory = os.path.dirname(os.path.abspath(__file__)) # sys.path.append(parent_directory) from tools.my_utils import load_audio from slicer2 import Slicer def slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, all_part): os.makedirs(opt_root, exist_ok=True) if os.path.isfile(inp): input = [inp] elif os.path.isdir(inp): input = [os.path.join(inp, name) for name in sorted(list(os.listdir(inp)))] else: return "输入路径存在但既不是文件也不是文件夹" slicer = Slicer( sr=32000, # 长音频采样率 threshold=int(threshold), # 音量小于这个值视作静音的备选切割点 min_length=int(min_length), # 每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值 min_interval=int(min_interval), # 最短切割间隔 hop_size=int(hop_size), # 怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好) max_sil_kept=int(max_sil_kept), # 切完后静音最多留多长 ) _max = float(_max) alpha = float(alpha) for inp_path in input[int(i_part) :: int(all_part)]: # print(inp_path) try: name = os.path.basename(inp_path) audio = load_audio(inp_path, 32000) # print(audio.shape) for chunk, start, end in slicer.slice(audio): # start和end是帧数 tmp_max = np.abs(chunk).max() if tmp_max > 1: chunk /= tmp_max chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk wavfile.write( "%s/%s_%010d_%010d.wav" % (opt_root, name, start, end), 32000, # chunk.astype(np.float32), (chunk * 32767).astype(np.int16), ) except: print(inp_path, "->fail->", traceback.format_exc()) return "执行完毕,请检查输出文件" print(slice(*sys.argv[1:]))
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/subfix_webui.py
tools/subfix_webui.py
import sys from tools.i18n.i18n import I18nAuto, scan_language_list language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto" i18n = I18nAuto(language=language) import argparse import copy import json import os import uuid try: import gradio.analytics as analytics analytics.version_check = lambda: None except: ... import gradio as gr import librosa import numpy as np import soundfile g_json_key_text = "" g_json_key_path = "" g_load_file = "" g_load_format = "" g_max_json_index = 0 g_index = 0 g_batch = 10 g_text_list = [] g_audio_list = [] g_checkbox_list = [] g_data_json = [] def reload_data(index, batch): global g_index g_index = index global g_batch g_batch = batch datas = g_data_json[index : index + batch] output = [] for d in datas: output.append({g_json_key_text: d[g_json_key_text], g_json_key_path: d[g_json_key_path]}) return output def b_change_index(index, batch): global g_index, g_batch g_index, g_batch = index, batch datas = reload_data(index, batch) output = [] for i, _ in enumerate(datas): output.append( # gr.Textbox( # label=f"Text {i+index}", # value=_[g_json_key_text]#text # ) {"__type__": "update", "label": f"Text {i + index}", "value": _[g_json_key_text]} ) for _ in range(g_batch - len(datas)): output.append( # gr.Textbox( # label=f"Text", # value="" # ) {"__type__": "update", "label": "Text", "value": ""} ) for _ in datas: output.append(_[g_json_key_path]) for _ in range(g_batch - len(datas)): output.append(None) for _ in range(g_batch): output.append(False) return output def b_next_index(index, batch): b_save_file() if (index + batch) <= g_max_json_index: return index + batch, *b_change_index(index + batch, batch) else: return index, *b_change_index(index, batch) def b_previous_index(index, batch): b_save_file() if (index - batch) >= 0: return index - batch, *b_change_index(index - batch, batch) else: return 0, *b_change_index(0, batch) def b_submit_change(*text_list): global g_data_json change = False for i, new_text in enumerate(text_list): if g_index + i <= g_max_json_index: new_text = new_text.strip() + " " if g_data_json[g_index + i][g_json_key_text] != new_text: g_data_json[g_index + i][g_json_key_text] = new_text change = True if change: b_save_file() return g_index, *b_change_index(g_index, g_batch) def b_delete_audio(*checkbox_list): global g_data_json, g_index, g_max_json_index b_save_file() change = False for i, checkbox in reversed(list(enumerate(checkbox_list))): if g_index + i < len(g_data_json): if checkbox == True: g_data_json.pop(g_index + i) change = True g_max_json_index = len(g_data_json) - 1 if g_index > g_max_json_index: g_index = g_max_json_index g_index = g_index if g_index >= 0 else 0 if change: b_save_file() # return gr.Slider(value=g_index, maximum=(g_max_json_index if g_max_json_index>=0 else 0)), *b_change_index(g_index, g_batch) return { "value": g_index, "__type__": "update", "maximum": (g_max_json_index if g_max_json_index >= 0 else 0), }, *b_change_index(g_index, g_batch) def b_invert_selection(*checkbox_list): new_list = [not item if item is True else True for item in checkbox_list] return new_list def get_next_path(filename): base_dir = os.path.dirname(filename) base_name = os.path.splitext(os.path.basename(filename))[0] for i in range(100): new_path = os.path.join(base_dir, f"{base_name}_{str(i).zfill(2)}.wav") if not os.path.exists(new_path): return new_path return os.path.join(base_dir, f"{str(uuid.uuid4())}.wav") def b_audio_split(audio_breakpoint, *checkbox_list): global g_data_json, g_max_json_index checked_index = [] for i, checkbox in enumerate(checkbox_list): if checkbox == True and g_index + i < len(g_data_json): checked_index.append(g_index + i) if len(checked_index) == 1: index = checked_index[0] audio_json = copy.deepcopy(g_data_json[index]) path = audio_json[g_json_key_path] data, sample_rate = librosa.load(path, sr=None, mono=True) audio_maxframe = len(data) break_frame = int(audio_breakpoint * sample_rate) if break_frame >= 1 and break_frame < audio_maxframe: audio_first = data[0:break_frame] audio_second = data[break_frame:] nextpath = get_next_path(path) soundfile.write(nextpath, audio_second, sample_rate) soundfile.write(path, audio_first, sample_rate) g_data_json.insert(index + 1, audio_json) g_data_json[index + 1][g_json_key_path] = nextpath b_save_file() g_max_json_index = len(g_data_json) - 1 # return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch) return {"value": g_index, "maximum": g_max_json_index, "__type__": "update"}, *b_change_index(g_index, g_batch) def b_merge_audio(interval_r, *checkbox_list): global g_data_json, g_max_json_index b_save_file() checked_index = [] audios_path = [] audios_text = [] for i, checkbox in enumerate(checkbox_list): if checkbox == True and g_index + i < len(g_data_json): checked_index.append(g_index + i) if len(checked_index) > 1: for i in checked_index: audios_path.append(g_data_json[i][g_json_key_path]) audios_text.append(g_data_json[i][g_json_key_text]) for i in reversed(checked_index[1:]): g_data_json.pop(i) base_index = checked_index[0] base_path = audios_path[0] g_data_json[base_index][g_json_key_text] = "".join(audios_text) audio_list = [] l_sample_rate = None for i, path in enumerate(audios_path): data, sample_rate = librosa.load(path, sr=l_sample_rate, mono=True) l_sample_rate = sample_rate if i > 0: silence = np.zeros(int(l_sample_rate * interval_r)) audio_list.append(silence) audio_list.append(data) audio_concat = np.concatenate(audio_list) soundfile.write(base_path, audio_concat, l_sample_rate) b_save_file() g_max_json_index = len(g_data_json) - 1 # return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch) return {"value": g_index, "maximum": g_max_json_index, "__type__": "update"}, *b_change_index(g_index, g_batch) def b_save_json(): with open(g_load_file, "w", encoding="utf-8") as file: for data in g_data_json: file.write(f"{json.dumps(data, ensure_ascii=False)}\n") def b_save_list(): with open(g_load_file, "w", encoding="utf-8") as file: for data in g_data_json: wav_path = data["wav_path"] speaker_name = data["speaker_name"] language = data["language"] text = data["text"] file.write(f"{wav_path}|{speaker_name}|{language}|{text}".strip() + "\n") def b_load_json(): global g_data_json, g_max_json_index with open(g_load_file, "r", encoding="utf-8") as file: g_data_json = file.readlines() g_data_json = [json.loads(line) for line in g_data_json] g_max_json_index = len(g_data_json) - 1 def b_load_list(): global g_data_json, g_max_json_index with open(g_load_file, "r", encoding="utf-8") as source: data_list = source.readlines() for _ in data_list: data = _.split("|") if len(data) == 4: wav_path, speaker_name, language, text = data g_data_json.append( {"wav_path": wav_path, "speaker_name": speaker_name, "language": language, "text": text.strip()} ) else: print("error line:", data) g_max_json_index = len(g_data_json) - 1 def b_save_file(): if g_load_format == "json": b_save_json() elif g_load_format == "list": b_save_list() def b_load_file(): if g_load_format == "json": b_load_json() elif g_load_format == "list": b_load_list() def set_global(load_json, load_list, json_key_text, json_key_path, batch): global g_json_key_text, g_json_key_path, g_load_file, g_load_format, g_batch g_batch = int(batch) if load_json != "None": g_load_format = "json" g_load_file = load_json elif load_list != "None": g_load_format = "list" g_load_file = load_list else: g_load_format = "list" g_load_file = "demo.list" g_json_key_text = json_key_text g_json_key_path = json_key_path b_load_file() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("--load_json", default="None", help="source file, like demo.json") parser.add_argument("--is_share", default="False", help="whether webui is_share=True") parser.add_argument("--load_list", default="None", help="source file, like demo.list") parser.add_argument("--webui_port_subfix", default=9871, help="source file, like demo.list") parser.add_argument("--json_key_text", default="text", help="the text key name in json, Default: text") parser.add_argument("--json_key_path", default="wav_path", help="the path key name in json, Default: wav_path") parser.add_argument("--g_batch", default=10, help="max number g_batch wav to display, Default: 10") args = parser.parse_args() set_global(args.load_json, args.load_list, args.json_key_text, args.json_key_path, args.g_batch) with gr.Blocks(analytics_enabled=False) as demo: gr.Markdown( value=i18n( "Submit Text: 将当前页所有文本框内容手工保存到内存和文件(翻页前后或者退出标注页面前如果没点这个按钮,你再翻回来就回滚了,白忙活。)" ) ) with gr.Row(): btn_change_index = gr.Button("Change Index") btn_submit_change = gr.Button("Submit Text") btn_merge_audio = gr.Button("Merge Audio") btn_delete_audio = gr.Button("Delete Audio") btn_previous_index = gr.Button("Previous Index") btn_next_index = gr.Button("Next Index") with gr.Row(): index_slider = gr.Slider(minimum=0, maximum=g_max_json_index, value=g_index, step=1, label="Index", scale=3) splitpoint_slider = gr.Slider( minimum=0, maximum=120.0, value=0, step=0.1, label="Audio Split Point(s)", scale=3 ) btn_audio_split = gr.Button("Split Audio", scale=1) btn_save_json = gr.Button("Save File", visible=True, scale=1) btn_invert_selection = gr.Button("Invert Selection", scale=1) with gr.Row(): with gr.Column(): for _ in range(0, g_batch): with gr.Row(): text = gr.Textbox(label="Text", visible=True, scale=5) audio_output = gr.Audio(label="Output Audio", visible=True, scale=5) audio_check = gr.Checkbox(label="Yes", show_label=True, info="Choose Audio", scale=1) g_text_list.append(text) g_audio_list.append(audio_output) g_checkbox_list.append(audio_check) with gr.Row(): batchsize_slider = gr.Slider( minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False ) interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3) btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1) btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1) btn_change_index.click( b_change_index, inputs=[ index_slider, batchsize_slider, ], outputs=[*g_text_list, *g_audio_list, *g_checkbox_list], ) btn_submit_change.click( b_submit_change, inputs=[ *g_text_list, ], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_previous_index.click( b_previous_index, inputs=[ index_slider, batchsize_slider, ], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_next_index.click( b_next_index, inputs=[ index_slider, batchsize_slider, ], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_delete_audio.click( b_delete_audio, inputs=[*g_checkbox_list], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_merge_audio.click( b_merge_audio, inputs=[interval_slider, *g_checkbox_list], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_audio_split.click( b_audio_split, inputs=[splitpoint_slider, *g_checkbox_list], outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], ) btn_invert_selection.click(b_invert_selection, inputs=[*g_checkbox_list], outputs=[*g_checkbox_list]) btn_save_json.click(b_save_file) demo.load( b_change_index, inputs=[ index_slider, batchsize_slider, ], outputs=[*g_text_list, *g_audio_list, *g_checkbox_list], ) demo.launch( server_name="0.0.0.0", inbrowser=True, # quiet=True, share=eval(args.is_share), server_port=int(args.webui_port_subfix), )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/__init__.py
tools/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/my_utils.py
tools/my_utils.py
import ctypes import os import sys from pathlib import Path import ffmpeg import gradio as gr import numpy as np import pandas as pd from tools.i18n.i18n import I18nAuto i18n = I18nAuto(language=os.environ.get("language", "Auto")) def load_audio(file, sr): try: # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 # This launches a subprocess to decode audio while down-mixing and resampling as necessary. # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车 if os.path.exists(file) is False: raise RuntimeError("You input a wrong audio path that does not exists, please fix it!") out, _ = ( ffmpeg.input(file, threads=0) .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) ) except Exception: out, _ = ( ffmpeg.input(file, threads=0) .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True) ) # Expose the Error raise RuntimeError(i18n("音频加载失败")) return np.frombuffer(out, np.float32).flatten() def clean_path(path_str: str): if path_str.endswith(("\\", "/")): return clean_path(path_str[0:-1]) path_str = path_str.replace("/", os.sep).replace("\\", os.sep) return path_str.strip( " '\n\"\u202a" ) # path_str.strip(" ").strip('\'').strip("\n").strip('"').strip(" ").strip("\u202a") def check_for_existance(file_list: list = None, is_train=False, is_dataset_processing=False): files_status = [] if is_train == True and file_list: file_list.append(os.path.join(file_list[0], "2-name2text.txt")) file_list.append(os.path.join(file_list[0], "3-bert")) file_list.append(os.path.join(file_list[0], "4-cnhubert")) file_list.append(os.path.join(file_list[0], "5-wav32k")) file_list.append(os.path.join(file_list[0], "6-name2semantic.tsv")) for file in file_list: if os.path.exists(file): files_status.append(True) else: files_status.append(False) if sum(files_status) != len(files_status): if is_train: for file, status in zip(file_list, files_status): if status: pass else: gr.Warning(file) gr.Warning(i18n("以下文件或文件夹不存在")) return False elif is_dataset_processing: if files_status[0]: return True elif not files_status[0]: gr.Warning(file_list[0]) elif not files_status[1] and file_list[1]: gr.Warning(file_list[1]) gr.Warning(i18n("以下文件或文件夹不存在")) return False else: if file_list[0]: gr.Warning(file_list[0]) gr.Warning(i18n("以下文件或文件夹不存在")) else: gr.Warning(i18n("路径不能为空")) return False return True def check_details(path_list=None, is_train=False, is_dataset_processing=False): if is_dataset_processing: list_path, audio_path = path_list if not list_path.endswith(".list"): gr.Warning(i18n("请填入正确的List路径")) return if audio_path: if not os.path.isdir(audio_path): gr.Warning(i18n("请填入正确的音频文件夹路径")) return with open(list_path, "r", encoding="utf8") as f: line = f.readline().strip("\n").split("\n") wav_name, _, __, ___ = line[0].split("|") wav_name = clean_path(wav_name) if audio_path != "" and audio_path != None: wav_name = os.path.basename(wav_name) wav_path = "%s/%s" % (audio_path, wav_name) else: wav_path = wav_name if os.path.exists(wav_path): ... else: gr.Warning(wav_path + i18n("路径错误")) return if is_train: path_list.append(os.path.join(path_list[0], "2-name2text.txt")) path_list.append(os.path.join(path_list[0], "4-cnhubert")) path_list.append(os.path.join(path_list[0], "5-wav32k")) path_list.append(os.path.join(path_list[0], "6-name2semantic.tsv")) phone_path, hubert_path, wav_path, semantic_path = path_list[1:] with open(phone_path, "r", encoding="utf-8") as f: if f.read(1): ... else: gr.Warning(i18n("缺少音素数据集")) if os.listdir(hubert_path): ... else: gr.Warning(i18n("缺少Hubert数据集")) if os.listdir(wav_path): ... else: gr.Warning(i18n("缺少音频数据集")) df = pd.read_csv(semantic_path, delimiter="\t", encoding="utf-8") if len(df) >= 1: ... else: gr.Warning(i18n("缺少语义数据集")) def load_cudnn(): import torch if not torch.cuda.is_available(): print("[INFO] CUDA is not available, skipping cuDNN setup.") return if sys.platform == "win32": torch_lib_dir = Path(torch.__file__).parent / "lib" if torch_lib_dir.exists(): os.add_dll_directory(str(torch_lib_dir)) print(f"[INFO] Added DLL directory: {torch_lib_dir}") matching_files = sorted(torch_lib_dir.glob("cudnn_cnn*.dll")) if not matching_files: print(f"[ERROR] No cudnn_cnn*.dll found in {torch_lib_dir}") return for dll_path in matching_files: dll_name = os.path.basename(dll_path) try: ctypes.CDLL(dll_name) print(f"[INFO] Loaded: {dll_name}") except OSError as e: print(f"[WARNING] Failed to load {dll_name}: {e}") else: print(f"[WARNING] Torch lib directory not found: {torch_lib_dir}") elif sys.platform == "linux": site_packages = Path(torch.__file__).resolve().parents[1] cudnn_dir = site_packages / "nvidia" / "cudnn" / "lib" if not cudnn_dir.exists(): print(f"[ERROR] cudnn dir not found: {cudnn_dir}") return matching_files = sorted(cudnn_dir.glob("libcudnn_cnn*.so*")) if not matching_files: print(f"[ERROR] No libcudnn_cnn*.so* found in {cudnn_dir}") return for so_path in matching_files: try: ctypes.CDLL(so_path, mode=ctypes.RTLD_GLOBAL) # type: ignore print(f"[INFO] Loaded: {so_path}") except OSError as e: print(f"[WARNING] Failed to load {so_path}: {e}") def load_nvrtc(): import torch if not torch.cuda.is_available(): print("[INFO] CUDA is not available, skipping nvrtc setup.") return if sys.platform == "win32": torch_lib_dir = Path(torch.__file__).parent / "lib" if torch_lib_dir.exists(): os.add_dll_directory(str(torch_lib_dir)) print(f"[INFO] Added DLL directory: {torch_lib_dir}") matching_files = sorted(torch_lib_dir.glob("nvrtc*.dll")) if not matching_files: print(f"[ERROR] No nvrtc*.dll found in {torch_lib_dir}") return for dll_path in matching_files: dll_name = os.path.basename(dll_path) try: ctypes.CDLL(dll_name) print(f"[INFO] Loaded: {dll_name}") except OSError as e: print(f"[WARNING] Failed to load {dll_name}: {e}") else: print(f"[WARNING] Torch lib directory not found: {torch_lib_dir}") elif sys.platform == "linux": site_packages = Path(torch.__file__).resolve().parents[1] nvrtc_dir = site_packages / "nvidia" / "cuda_nvrtc" / "lib" if not nvrtc_dir.exists(): print(f"[ERROR] nvrtc dir not found: {nvrtc_dir}") return matching_files = sorted(nvrtc_dir.glob("libnvrtc*.so*")) if not matching_files: print(f"[ERROR] No libnvrtc*.so* found in {nvrtc_dir}") return for so_path in matching_files: try: ctypes.CDLL(so_path, mode=ctypes.RTLD_GLOBAL) # type: ignore print(f"[INFO] Loaded: {so_path}") except OSError as e: print(f"[WARNING] Failed to load {so_path}: {e}")
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/asr/fasterwhisper_asr.py
tools/asr/fasterwhisper_asr.py
import argparse import os import traceback import requests import torch from faster_whisper import WhisperModel from huggingface_hub import snapshot_download as snapshot_download_hf from modelscope import snapshot_download as snapshot_download_ms from tqdm import tqdm from tools.asr.config import get_models from tools.asr.funasr_asr import only_asr from tools.my_utils import load_cudnn # fmt: off language_code_list = [ "af", "am", "ar", "as", "az", "ba", "be", "bg", "bn", "bo", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fo", "fr", "gl", "gu", "ha", "haw", "he", "hi", "hr", "ht", "hu", "hy", "id", "is", "it", "ja", "jw", "ka", "kk", "km", "kn", "ko", "la", "lb", "ln", "lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne", "nl", "nn", "no", "oc", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sd", "si", "sk", "sl", "sn", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "tg", "th", "tk", "tl", "tr", "tt", "uk", "ur", "uz", "vi", "yi", "yo", "zh", "yue", "auto"] # fmt: on def download_model(model_size: str): url = "https://huggingface.co/api/models/gpt2" try: requests.get(url, timeout=3) source = "HF" except Exception: source = "ModelScope" model_path = "" if source == "HF": if "distil" in model_size: if "3.5" in model_size: repo_id = "distil-whisper/distil-large-v3.5-ct2" model_path = "tools/asr/models/faster-distil-whisper-large-v3.5" else: repo_id = "Systran/faster-{}-whisper-{}".format(*model_size.split("-", maxsplit=1)) elif model_size == "large-v3-turbo": repo_id = "mobiuslabsgmbh/faster-whisper-large-v3-turbo" model_path = "tools/asr/models/faster-whisper-large-v3-turbo" else: repo_id = f"Systran/faster-whisper-{model_size}" model_path = ( model_path or f"tools/asr/models/{repo_id.replace('Systran/', '').replace('distil-whisper/', '', 1)}" ) else: repo_id = "XXXXRT/faster-whisper" model_path = "tools/asr/models" files: list[str] = [ "config.json", "model.bin", "tokenizer.json", "vocabulary.txt", ] if "large-v3" in model_size or "distil" in model_size: files.append("preprocessor_config.json") files.append("vocabulary.json") files.remove("vocabulary.txt") if source == "ModelScope": files = [f"faster-whisper-{model_size}/{file}".replace("whisper-distil", "distil-whisper") for file in files] if source == "HF": print(f"Downloading model from HuggingFace: {repo_id} to {model_path}") snapshot_download_hf( repo_id, local_dir=model_path, local_dir_use_symlinks=False, allow_patterns=files, ) else: print(f"Downloading model from ModelScope: {repo_id} to {model_path}") snapshot_download_ms( repo_id, local_dir=model_path, allow_patterns=files, ) return model_path + f"/faster-whisper-{model_size}".replace("whisper-distil", "distil-whisper") return model_path def execute_asr(input_folder, output_folder, model_path, language, precision): if language == "auto": language = None # 不设置语种由模型自动输出概率最高的语种 print("loading faster whisper model:", model_path, model_path) device = "cuda" if torch.cuda.is_available() else "cpu" model = WhisperModel(model_path, device=device, compute_type=precision) input_file_names = os.listdir(input_folder) input_file_names.sort() output = [] output_file_name = os.path.basename(input_folder) for file_name in tqdm(input_file_names): try: file_path = os.path.join(input_folder, file_name) segments, info = model.transcribe( audio=file_path, beam_size=5, vad_filter=True, vad_parameters=dict(min_silence_duration_ms=700), language=language, ) text = "" if info.language in ["zh", "yue"]: print("检测为中文文本, 转 FunASR 处理") text = only_asr(file_path, language=info.language.lower()) if text == "": for segment in segments: text += segment.text output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}") except Exception as e: print(e) traceback.print_exc() output_folder = output_folder or "output/asr_opt" os.makedirs(output_folder, exist_ok=True) output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list") with open(output_file_path, "w", encoding="utf-8") as f: f.write("\n".join(output)) print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") return output_file_path load_cudnn() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files." ) parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.") parser.add_argument( "-s", "--model_size", type=str, default="large-v3", choices=get_models(), help="Model Size of Faster Whisper", ) parser.add_argument( "-l", "--language", type=str, default="ja", choices=language_code_list, help="Language of the audio files." ) parser.add_argument( "-p", "--precision", type=str, default="float16", choices=["float16", "float32", "int8"], help="fp16, int8 or fp32", ) cmd = parser.parse_args() model_size = cmd.model_size if model_size == "large": model_size = "large-v3" model_path = download_model(model_size) output_file_path = execute_asr( input_folder=cmd.input_folder, output_folder=cmd.output_folder, model_path=model_path, language=cmd.language, precision=cmd.precision, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/asr/config.py
tools/asr/config.py
def get_models(): model_size_list = [ "medium", "medium.en", "large-v2", "large-v3", "large-v3-turbo", #"distil-large-v2", #"distil-large-v3", #"distil-large-v3.5", ] return model_size_list asr_dict = { "达摩 ASR (中文)": {"lang": ["zh", "yue"], "size": ["large"], "path": "funasr_asr.py", "precision": ["float32"]}, "Faster Whisper (多语种)": { "lang": ["auto", "en", "ja", "ko"], "size": get_models(), "path": "fasterwhisper_asr.py", "precision": ["float32", "float16", "int8"], }, }
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/asr/funasr_asr.py
tools/asr/funasr_asr.py
# -*- coding:utf-8 -*- import argparse import os import traceback from funasr import AutoModel from modelscope import snapshot_download from tqdm import tqdm funasr_models = {} # 存储模型避免重复加载 def only_asr(input_file, language): try: model = create_model(language) text = model.generate(input=input_file)[0]["text"] except Exception: text = "" print(traceback.format_exc()) return text def create_model(language="zh"): if language == "zh": path_vad = "tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch" path_punc = "tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch" path_asr = "tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" snapshot_download( "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", local_dir="tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch", ) snapshot_download( "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch", local_dir="tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch", ) snapshot_download( "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", local_dir="tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", ) model_revision = "v2.0.4" elif language == "yue": path_asr = "tools/asr/models/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online" snapshot_download( "iic/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online", local_dir="tools/asr/models/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online", ) path_vad = path_punc = None vad_model_revision = punc_model_revision = "" model_revision = "master" else: raise ValueError(f"{language} is not supported") vad_model_revision = punc_model_revision = "v2.0.4" if language in funasr_models: return funasr_models[language] else: model = AutoModel( model=path_asr, model_revision=model_revision, vad_model=path_vad, vad_model_revision=vad_model_revision, punc_model=path_punc, punc_model_revision=punc_model_revision, ) print(f"FunASR 模型加载完成: {language.upper()}") funasr_models[language] = model return model def execute_asr(input_folder, output_folder, model_size, language): input_file_names = os.listdir(input_folder) input_file_names.sort() output = [] output_file_name = os.path.basename(input_folder) model = create_model(language) for file_name in tqdm(input_file_names): try: print("\n" + file_name) file_path = os.path.join(input_folder, file_name) text = model.generate(input=file_path)[0]["text"] output.append(f"{file_path}|{output_file_name}|{language.upper()}|{text}") except Exception: print(traceback.format_exc()) output_folder = output_folder or "output/asr_opt" os.makedirs(output_folder, exist_ok=True) output_file_path = os.path.abspath(f"{output_folder}/{output_file_name}.list") with open(output_file_path, "w", encoding="utf-8") as f: f.write("\n".join(output)) print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") return output_file_path if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files." ) parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.") parser.add_argument("-s", "--model_size", type=str, default="large", help="Model Size of FunASR is Large") parser.add_argument( "-l", "--language", type=str, default="zh", choices=["zh", "yue", "auto"], help="Language of the audio files." ) parser.add_argument( "-p", "--precision", type=str, default="float16", choices=["float16", "float32"], help="fp16 or fp32" ) # 还没接入 cmd = parser.parse_args() execute_asr( input_folder=cmd.input_folder, output_folder=cmd.output_folder, model_size=cmd.model_size, language=cmd.language, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/i18n/scan_i18n.py
tools/i18n/scan_i18n.py
import ast import glob import json import os from collections import OrderedDict I18N_JSON_DIR: os.PathLike = os.path.join(os.path.dirname(os.path.relpath(__file__)), "locale") DEFAULT_LANGUAGE: str = "zh_CN" # 默认语言 TITLE_LEN: int = 60 # 标题显示长度 KEY_LEN: int = 30 # 键名显示长度 SHOW_KEYS: bool = False # 是否显示键信息 SORT_KEYS: bool = False # 是否按全局键名写入文件 def extract_i18n_strings(node): i18n_strings = [] if isinstance(node, ast.Call) and isinstance(node.func, ast.Name) and node.func.id == "i18n": for arg in node.args: if isinstance(arg, ast.Str): i18n_strings.append(arg.s) for child_node in ast.iter_child_nodes(node): i18n_strings.extend(extract_i18n_strings(child_node)) return i18n_strings def scan_i18n_strings(): """ scan the directory for all .py files (recursively) for each file, parse the code into an AST for each AST, extract the i18n strings """ strings = [] print(" Scanning Files and Extracting i18n Strings ".center(TITLE_LEN, "=")) for filename in glob.iglob("**/*.py", recursive=True): try: with open(filename, "r", encoding="utf-8") as f: code = f.read() if "I18nAuto" in code: tree = ast.parse(code) i18n_strings = extract_i18n_strings(tree) print(f"{filename.ljust(KEY_LEN * 3 // 2)}: {len(i18n_strings)}") if SHOW_KEYS: print("\n".join([s for s in i18n_strings])) strings.extend(i18n_strings) except Exception as e: print(f"\033[31m[Failed] Error occur at {filename}: {e}\033[0m") code_keys = set(strings) print(f"{'Total Unique'.ljust(KEY_LEN * 3 // 2)}: {len(code_keys)}") return code_keys def update_i18n_json(json_file, standard_keys): standard_keys = sorted(standard_keys) print(f" Process {json_file} ".center(TITLE_LEN, "=")) # 读取 JSON 文件 with open(json_file, "r", encoding="utf-8") as f: json_data = json.load(f, object_pairs_hook=OrderedDict) # 打印处理前的 JSON 条目数 len_before = len(json_data) print(f"{'Total Keys'.ljust(KEY_LEN)}: {len_before}") # 识别缺失的键并补全 miss_keys = set(standard_keys) - set(json_data.keys()) if len(miss_keys) > 0: print(f"{'Missing Keys (+)'.ljust(KEY_LEN)}: {len(miss_keys)}") for key in miss_keys: if DEFAULT_LANGUAGE in json_file: # 默认语言的键值相同. json_data[key] = key else: # 其他语言的值设置为 #! + 键名以标注未被翻译. json_data[key] = "#!" + key if SHOW_KEYS: print(f"{'Added Missing Key'.ljust(KEY_LEN)}: {key}") # 识别多余的键并删除 diff_keys = set(json_data.keys()) - set(standard_keys) if len(diff_keys) > 0: print(f"{'Unused Keys (-)'.ljust(KEY_LEN)}: {len(diff_keys)}") for key in diff_keys: del json_data[key] if SHOW_KEYS: print(f"{'Removed Unused Key'.ljust(KEY_LEN)}: {key}") # 按键顺序排序 json_data = OrderedDict( sorted( json_data.items(), key=lambda x: ( list(standard_keys).index(x[0]) if x[0] in standard_keys and not x[1].startswith("#!") else len(json_data), ), ) ) # 打印处理后的 JSON 条目数 if len(miss_keys) != 0 or len(diff_keys) != 0: print(f"{'Total Keys (After)'.ljust(KEY_LEN)}: {len(json_data)}") # 识别有待翻译的键 num_miss_translation = 0 duplicate_items = {} for key, value in json_data.items(): if value.startswith("#!"): num_miss_translation += 1 if SHOW_KEYS: print(f"{'Missing Translation'.ljust(KEY_LEN)}: {key}") if value in duplicate_items: duplicate_items[value].append(key) else: duplicate_items[value] = [key] # 打印是否有重复的值 for value, keys in duplicate_items.items(): if len(keys) > 1: print( "\n".join( [f"\033[31m{'[Failed] Duplicate Value'.ljust(KEY_LEN)}: {key} -> {value}\033[0m" for key in keys] ) ) if num_miss_translation > 0: print(f"\033[31m{'[Failed] Missing Translation'.ljust(KEY_LEN)}: {num_miss_translation}\033[0m") else: print("\033[32m[Passed] All Keys Translated\033[0m") # 将处理后的结果写入 JSON 文件 with open(json_file, "w", encoding="utf-8") as f: json.dump(json_data, f, ensure_ascii=False, indent=4, sort_keys=SORT_KEYS) f.write("\n") print(f" Updated {json_file} ".center(TITLE_LEN, "=") + "\n") if __name__ == "__main__": code_keys = scan_i18n_strings() for json_file in os.listdir(I18N_JSON_DIR): if json_file.endswith(r".json"): json_file = os.path.join(I18N_JSON_DIR, json_file) update_i18n_json(json_file, code_keys)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/i18n/i18n.py
tools/i18n/i18n.py
import json import locale import os I18N_JSON_DIR: os.PathLike = os.path.join(os.path.dirname(os.path.relpath(__file__)), "locale") def load_language_list(language): with open(os.path.join(I18N_JSON_DIR, f"{language}.json"), "r", encoding="utf-8") as f: language_list = json.load(f) return language_list def scan_language_list(): language_list = [] for name in os.listdir(I18N_JSON_DIR): if name.endswith(".json"): language_list.append(name.split(".")[0]) return language_list class I18nAuto: def __init__(self, language=None): if language in ["Auto", None]: language = locale.getdefaultlocale()[0] # getlocale can't identify the system's language ((None, None)) if not os.path.exists(os.path.join(I18N_JSON_DIR, f"{language}.json")): language = "en_US" self.language = language self.language_map = load_language_list(language) def __call__(self, key): return self.language_map.get(key, key) def __repr__(self): return "Use Language: " + self.language if __name__ == "__main__": i18n = I18nAuto(language="en_US") print(i18n)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/AP_BWE_main/models/model.py
tools/AP_BWE_main/models/model.py
import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.utils import weight_norm, spectral_norm # from utils import init_weights, get_padding def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: m.weight.data.normal_(mean, std) import numpy as np from typing import Tuple, List LRELU_SLOPE = 0.1 class ConvNeXtBlock(nn.Module): """ConvNeXt Block adapted from https://github.com/facebookresearch/ConvNeXt to 1D audio signal. Args: dim (int): Number of input channels. intermediate_dim (int): Dimensionality of the intermediate layer. layer_scale_init_value (float, optional): Initial value for the layer scale. None means no scaling. Defaults to None. adanorm_num_embeddings (int, optional): Number of embeddings for AdaLayerNorm. None means non-conditional LayerNorm. Defaults to None. """ def __init__( self, dim: int, layer_scale_init_value=None, adanorm_num_embeddings=None, ): super().__init__() self.dwconv = nn.Conv1d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv self.adanorm = adanorm_num_embeddings is not None self.norm = nn.LayerNorm(dim, eps=1e-6) self.pwconv1 = nn.Linear(dim, dim * 3) # pointwise/1x1 convs, implemented with linear layers self.act = nn.GELU() self.pwconv2 = nn.Linear(dim * 3, dim) self.gamma = ( nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True) if layer_scale_init_value > 0 else None ) def forward(self, x, cond_embedding_id=None): residual = x x = self.dwconv(x) x = x.transpose(1, 2) # (B, C, T) -> (B, T, C) if self.adanorm: assert cond_embedding_id is not None x = self.norm(x, cond_embedding_id) else: x = self.norm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.gamma is not None: x = self.gamma * x x = x.transpose(1, 2) # (B, T, C) -> (B, C, T) x = residual + x return x class APNet_BWE_Model(torch.nn.Module): def __init__(self, h): super(APNet_BWE_Model, self).__init__() self.h = h self.adanorm_num_embeddings = None layer_scale_init_value = 1 / h.ConvNeXt_layers self.conv_pre_mag = nn.Conv1d(h.n_fft // 2 + 1, h.ConvNeXt_channels, 7, 1, padding=get_padding(7, 1)) self.norm_pre_mag = nn.LayerNorm(h.ConvNeXt_channels, eps=1e-6) self.conv_pre_pha = nn.Conv1d(h.n_fft // 2 + 1, h.ConvNeXt_channels, 7, 1, padding=get_padding(7, 1)) self.norm_pre_pha = nn.LayerNorm(h.ConvNeXt_channels, eps=1e-6) self.convnext_mag = nn.ModuleList( [ ConvNeXtBlock( dim=h.ConvNeXt_channels, layer_scale_init_value=layer_scale_init_value, adanorm_num_embeddings=self.adanorm_num_embeddings, ) for _ in range(h.ConvNeXt_layers) ] ) self.convnext_pha = nn.ModuleList( [ ConvNeXtBlock( dim=h.ConvNeXt_channels, layer_scale_init_value=layer_scale_init_value, adanorm_num_embeddings=self.adanorm_num_embeddings, ) for _ in range(h.ConvNeXt_layers) ] ) self.norm_post_mag = nn.LayerNorm(h.ConvNeXt_channels, eps=1e-6) self.norm_post_pha = nn.LayerNorm(h.ConvNeXt_channels, eps=1e-6) self.apply(self._init_weights) self.linear_post_mag = nn.Linear(h.ConvNeXt_channels, h.n_fft // 2 + 1) self.linear_post_pha_r = nn.Linear(h.ConvNeXt_channels, h.n_fft // 2 + 1) self.linear_post_pha_i = nn.Linear(h.ConvNeXt_channels, h.n_fft // 2 + 1) def _init_weights(self, m): if isinstance(m, (nn.Conv1d, nn.Linear)): nn.init.trunc_normal_(m.weight, std=0.02) nn.init.constant_(m.bias, 0) def forward(self, mag_nb, pha_nb): x_mag = self.conv_pre_mag(mag_nb) x_pha = self.conv_pre_pha(pha_nb) x_mag = self.norm_pre_mag(x_mag.transpose(1, 2)).transpose(1, 2) x_pha = self.norm_pre_pha(x_pha.transpose(1, 2)).transpose(1, 2) for conv_block_mag, conv_block_pha in zip(self.convnext_mag, self.convnext_pha): x_mag = x_mag + x_pha x_pha = x_pha + x_mag x_mag = conv_block_mag(x_mag, cond_embedding_id=None) x_pha = conv_block_pha(x_pha, cond_embedding_id=None) x_mag = self.norm_post_mag(x_mag.transpose(1, 2)) mag_wb = mag_nb + self.linear_post_mag(x_mag).transpose(1, 2) x_pha = self.norm_post_pha(x_pha.transpose(1, 2)) x_pha_r = self.linear_post_pha_r(x_pha) x_pha_i = self.linear_post_pha_i(x_pha) pha_wb = torch.atan2(x_pha_i, x_pha_r).transpose(1, 2) com_wb = torch.stack((torch.exp(mag_wb) * torch.cos(pha_wb), torch.exp(mag_wb) * torch.sin(pha_wb)), dim=-1) return mag_wb, pha_wb, com_wb class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() self.period = period norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = nn.ModuleList( [ norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), ] ) self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) def forward(self, x): fmap = [] # 1d to 2d b, c, t = x.shape if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = F.pad(x, (0, n_pad), "reflect") t = t + n_pad x = x.view(b, c, t // self.period, self.period) for i, l in enumerate(self.convs): x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) if i > 0: fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self): super(MultiPeriodDiscriminator, self).__init__() self.discriminators = nn.ModuleList( [ DiscriminatorP(2), DiscriminatorP(3), DiscriminatorP(5), DiscriminatorP(7), DiscriminatorP(11), ] ) def forward(self, y, y_hat): y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class MultiResolutionAmplitudeDiscriminator(nn.Module): def __init__( self, resolutions: Tuple[Tuple[int, int, int]] = ((512, 128, 512), (1024, 256, 1024), (2048, 512, 2048)), num_embeddings: int = None, ): super().__init__() self.discriminators = nn.ModuleList( [DiscriminatorAR(resolution=r, num_embeddings=num_embeddings) for r in resolutions] ) def forward( self, y: torch.Tensor, y_hat: torch.Tensor, bandwidth_id: torch.Tensor = None ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]]]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for d in self.discriminators: y_d_r, fmap_r = d(x=y, cond_embedding_id=bandwidth_id) y_d_g, fmap_g = d(x=y_hat, cond_embedding_id=bandwidth_id) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorAR(nn.Module): def __init__( self, resolution: Tuple[int, int, int], channels: int = 64, in_channels: int = 1, num_embeddings: int = None, ): super().__init__() self.resolution = resolution self.in_channels = in_channels self.convs = nn.ModuleList( [ weight_norm(nn.Conv2d(in_channels, channels, kernel_size=(7, 5), stride=(2, 2), padding=(3, 2))), weight_norm(nn.Conv2d(channels, channels, kernel_size=(5, 3), stride=(2, 1), padding=(2, 1))), weight_norm(nn.Conv2d(channels, channels, kernel_size=(5, 3), stride=(2, 2), padding=(2, 1))), weight_norm(nn.Conv2d(channels, channels, kernel_size=3, stride=(2, 1), padding=1)), weight_norm(nn.Conv2d(channels, channels, kernel_size=3, stride=(2, 2), padding=1)), ] ) if num_embeddings is not None: self.emb = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=channels) torch.nn.init.zeros_(self.emb.weight) self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), padding=(1, 1))) def forward( self, x: torch.Tensor, cond_embedding_id: torch.Tensor = None ) -> Tuple[torch.Tensor, List[torch.Tensor]]: fmap = [] x = x.squeeze(1) x = self.spectrogram(x) x = x.unsqueeze(1) for l in self.convs: x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) if cond_embedding_id is not None: emb = self.emb(cond_embedding_id) h = (emb.view(1, -1, 1, 1) * x).sum(dim=1, keepdims=True) else: h = 0 x = self.conv_post(x) fmap.append(x) x += h x = torch.flatten(x, 1, -1) return x, fmap def spectrogram(self, x: torch.Tensor) -> torch.Tensor: n_fft, hop_length, win_length = self.resolution amplitude_spectrogram = torch.stft( x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=None, # interestingly rectangular window kind of works here center=True, return_complex=True, ).abs() return amplitude_spectrogram class MultiResolutionPhaseDiscriminator(nn.Module): def __init__( self, resolutions: Tuple[Tuple[int, int, int]] = ((512, 128, 512), (1024, 256, 1024), (2048, 512, 2048)), num_embeddings: int = None, ): super().__init__() self.discriminators = nn.ModuleList( [DiscriminatorPR(resolution=r, num_embeddings=num_embeddings) for r in resolutions] ) def forward( self, y: torch.Tensor, y_hat: torch.Tensor, bandwidth_id: torch.Tensor = None ) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]]]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for d in self.discriminators: y_d_r, fmap_r = d(x=y, cond_embedding_id=bandwidth_id) y_d_g, fmap_g = d(x=y_hat, cond_embedding_id=bandwidth_id) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorPR(nn.Module): def __init__( self, resolution: Tuple[int, int, int], channels: int = 64, in_channels: int = 1, num_embeddings: int = None, ): super().__init__() self.resolution = resolution self.in_channels = in_channels self.convs = nn.ModuleList( [ weight_norm(nn.Conv2d(in_channels, channels, kernel_size=(7, 5), stride=(2, 2), padding=(3, 2))), weight_norm(nn.Conv2d(channels, channels, kernel_size=(5, 3), stride=(2, 1), padding=(2, 1))), weight_norm(nn.Conv2d(channels, channels, kernel_size=(5, 3), stride=(2, 2), padding=(2, 1))), weight_norm(nn.Conv2d(channels, channels, kernel_size=3, stride=(2, 1), padding=1)), weight_norm(nn.Conv2d(channels, channels, kernel_size=3, stride=(2, 2), padding=1)), ] ) if num_embeddings is not None: self.emb = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=channels) torch.nn.init.zeros_(self.emb.weight) self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), padding=(1, 1))) def forward( self, x: torch.Tensor, cond_embedding_id: torch.Tensor = None ) -> Tuple[torch.Tensor, List[torch.Tensor]]: fmap = [] x = x.squeeze(1) x = self.spectrogram(x) x = x.unsqueeze(1) for l in self.convs: x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) if cond_embedding_id is not None: emb = self.emb(cond_embedding_id) h = (emb.view(1, -1, 1, 1) * x).sum(dim=1, keepdims=True) else: h = 0 x = self.conv_post(x) fmap.append(x) x += h x = torch.flatten(x, 1, -1) return x, fmap def spectrogram(self, x: torch.Tensor) -> torch.Tensor: n_fft, hop_length, win_length = self.resolution phase_spectrogram = torch.stft( x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=None, # interestingly rectangular window kind of works here center=True, return_complex=True, ).angle() return phase_spectrogram def feature_loss(fmap_r, fmap_g): loss = 0 for dr, dg in zip(fmap_r, fmap_g): for rl, gl in zip(dr, dg): loss += torch.mean(torch.abs(rl - gl)) return loss def discriminator_loss(disc_real_outputs, disc_generated_outputs): loss = 0 r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): r_loss = torch.mean(torch.clamp(1 - dr, min=0)) g_loss = torch.mean(torch.clamp(1 + dg, min=0)) loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) return loss, r_losses, g_losses def generator_loss(disc_outputs): loss = 0 gen_losses = [] for dg in disc_outputs: l = torch.mean(torch.clamp(1 - dg, min=0)) gen_losses.append(l) loss += l return loss, gen_losses def phase_losses(phase_r, phase_g): ip_loss = torch.mean(anti_wrapping_function(phase_r - phase_g)) gd_loss = torch.mean(anti_wrapping_function(torch.diff(phase_r, dim=1) - torch.diff(phase_g, dim=1))) iaf_loss = torch.mean(anti_wrapping_function(torch.diff(phase_r, dim=2) - torch.diff(phase_g, dim=2))) return ip_loss, gd_loss, iaf_loss def anti_wrapping_function(x): return torch.abs(x - torch.round(x / (2 * np.pi)) * 2 * np.pi) def stft_mag(audio, n_fft=2048, hop_length=512): hann_window = torch.hann_window(n_fft).to(audio.device) stft_spec = torch.stft(audio, n_fft, hop_length, window=hann_window, return_complex=True) stft_mag = torch.abs(stft_spec) return stft_mag def cal_snr(pred, target): snr = (20 * torch.log10(torch.norm(target, dim=-1) / torch.norm(pred - target, dim=-1).clamp(min=1e-8))).mean() return snr def cal_lsd(pred, target): sp = torch.log10(stft_mag(pred).square().clamp(1e-8)) st = torch.log10(stft_mag(target).square().clamp(1e-8)) return (sp - st).square().mean(dim=1).sqrt().mean()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/AP_BWE_main/models/__init__.py
tools/AP_BWE_main/models/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/AP_BWE_main/datasets1/dataset.py
tools/AP_BWE_main/datasets1/dataset.py
import os import random import torch import torchaudio import torch.utils.data import torchaudio.functional as aF def amp_pha_stft(audio, n_fft, hop_size, win_size, center=True): hann_window = torch.hann_window(win_size).to(audio.device) stft_spec = torch.stft( audio, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center, pad_mode="reflect", normalized=False, return_complex=True, ) log_amp = torch.log(torch.abs(stft_spec) + 1e-4) pha = torch.angle(stft_spec) com = torch.stack((torch.exp(log_amp) * torch.cos(pha), torch.exp(log_amp) * torch.sin(pha)), dim=-1) return log_amp, pha, com def amp_pha_istft(log_amp, pha, n_fft, hop_size, win_size, center=True): amp = torch.exp(log_amp) com = torch.complex(amp * torch.cos(pha), amp * torch.sin(pha)) hann_window = torch.hann_window(win_size).to(com.device) audio = torch.istft(com, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center) return audio def get_dataset_filelist(a): with open(a.input_training_file, "r", encoding="utf-8") as fi: training_indexes = [x.split("|")[0] for x in fi.read().split("\n") if len(x) > 0] with open(a.input_validation_file, "r", encoding="utf-8") as fi: validation_indexes = [x.split("|")[0] for x in fi.read().split("\n") if len(x) > 0] return training_indexes, validation_indexes class Dataset(torch.utils.data.Dataset): def __init__( self, training_indexes, wavs_dir, segment_size, hr_sampling_rate, lr_sampling_rate, split=True, shuffle=True, n_cache_reuse=1, device=None, ): self.audio_indexes = training_indexes random.seed(1234) if shuffle: random.shuffle(self.audio_indexes) self.wavs_dir = wavs_dir self.segment_size = segment_size self.hr_sampling_rate = hr_sampling_rate self.lr_sampling_rate = lr_sampling_rate self.split = split self.cached_wav = None self.n_cache_reuse = n_cache_reuse self._cache_ref_count = 0 self.device = device def __getitem__(self, index): filename = self.audio_indexes[index] if self._cache_ref_count == 0: audio, orig_sampling_rate = torchaudio.load(os.path.join(self.wavs_dir, filename + ".wav")) self.cached_wav = audio self._cache_ref_count = self.n_cache_reuse else: audio = self.cached_wav self._cache_ref_count -= 1 if orig_sampling_rate == self.hr_sampling_rate: audio_hr = audio else: audio_hr = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.hr_sampling_rate) audio_lr = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.lr_sampling_rate) audio_lr = aF.resample(audio_lr, orig_freq=self.lr_sampling_rate, new_freq=self.hr_sampling_rate) audio_lr = audio_lr[:, : audio_hr.size(1)] if self.split: if audio_hr.size(1) >= self.segment_size: max_audio_start = audio_hr.size(1) - self.segment_size audio_start = random.randint(0, max_audio_start) audio_hr = audio_hr[:, audio_start : audio_start + self.segment_size] audio_lr = audio_lr[:, audio_start : audio_start + self.segment_size] else: audio_hr = torch.nn.functional.pad(audio_hr, (0, self.segment_size - audio_hr.size(1)), "constant") audio_lr = torch.nn.functional.pad(audio_lr, (0, self.segment_size - audio_lr.size(1)), "constant") return (audio_hr.squeeze(), audio_lr.squeeze()) def __len__(self): return len(self.audio_indexes)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/AP_BWE_main/datasets1/__init__.py
tools/AP_BWE_main/datasets1/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/vr.py
tools/uvr5/vr.py
import os parent_directory = os.path.dirname(os.path.abspath(__file__)) import logging logger = logging.getLogger(__name__) import librosa import numpy as np import soundfile as sf import torch from lib.lib_v5 import nets_61968KB as Nets from lib.lib_v5 import spec_utils from lib.lib_v5.model_param_init import ModelParameters from lib.lib_v5.nets_new import CascadedNet from lib.utils import inference class AudioPre: def __init__(self, agg, model_path, device, is_half, tta=False): self.model_path = model_path self.device = device self.data = { # Processing Options "postprocess": False, "tta": tta, # Constants "window_size": 512, "agg": agg, "high_end_process": "mirroring", } mp = ModelParameters("%s/lib/lib_v5/modelparams/4band_v2.json" % parent_directory) model = Nets.CascadedASPPNet(mp.param["bins"] * 2) cpk = torch.load(model_path, map_location="cpu") model.load_state_dict(cpk) model.eval() if is_half: model = model.half().to(device) else: model = model.to(device) self.mp = mp self.model = model def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac", is_hp3=False): if ins_root is None and vocal_root is None: return "No save root." name = os.path.basename(music_file) if ins_root is not None: os.makedirs(ins_root, exist_ok=True) if vocal_root is not None: os.makedirs(vocal_root, exist_ok=True) X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} bands_n = len(self.mp.param["band"]) # print(bands_n) for d in range(bands_n, 0, -1): bp = self.mp.param["band"][d] if d == bands_n: # high-end band ( X_wave[d], _, ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 music_file, sr=bp["sr"], mono=False, dtype=np.float32, res_type=bp["res_type"], ) if X_wave[d].ndim == 1: X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) else: # lower bands X_wave[d] = librosa.core.resample( X_wave[d + 1], orig_sr=self.mp.param["band"][d + 1]["sr"], target_sr=bp["sr"], res_type=bp["res_type"], ) # Stft of wave source X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( X_wave[d], bp["hl"], bp["n_fft"], self.mp.param["mid_side"], self.mp.param["mid_side_b2"], self.mp.param["reverse"], ) # pdb.set_trace() if d == bands_n and self.data["high_end_process"] != "none": input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] ) input_high_end = X_spec_s[d][:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :] X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) aggresive_set = float(self.data["agg"] / 100) aggressiveness = { "value": aggresive_set, "split_bin": self.mp.param["band"][1]["crop_stop"], } with torch.no_grad(): pred, X_mag, X_phase = inference(X_spec_m, self.device, self.model, aggressiveness, self.data) # Postprocess if self.data["postprocess"]: pred_inv = np.clip(X_mag - pred, 0, np.inf) pred = spec_utils.mask_silence(pred, pred_inv) y_spec_m = pred * X_phase v_spec_m = X_spec_m - y_spec_m if is_hp3 == True: ins_root, vocal_root = vocal_root, ins_root if ins_root is not None: if self.data["high_end_process"].startswith("mirroring"): input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], y_spec_m, input_high_end, self.mp) wav_instrument = spec_utils.cmb_spectrogram_to_wave( y_spec_m, self.mp, input_high_end_h, input_high_end_ ) else: wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) logger.info("%s instruments done" % name) if is_hp3 == True: head = "vocal_" else: head = "instrument_" if format in ["wav", "flac"]: sf.write( os.path.join( ins_root, head + "{}_{}.{}".format(name, self.data["agg"], format), ), (np.array(wav_instrument) * 32768).astype("int16"), self.mp.param["sr"], ) # else: path = os.path.join(ins_root, head + "{}_{}.wav".format(name, self.data["agg"])) sf.write( path, (np.array(wav_instrument) * 32768).astype("int16"), self.mp.param["sr"], ) if os.path.exists(path): opt_format_path = path[:-4] + ".%s" % format cmd = 'ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path, opt_format_path) print(cmd) os.system(cmd) if os.path.exists(opt_format_path): try: os.remove(path) except: pass if vocal_root is not None: if is_hp3 == True: head = "instrument_" else: head = "vocal_" if self.data["high_end_process"].startswith("mirroring"): input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], v_spec_m, input_high_end, self.mp) wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp, input_high_end_h, input_high_end_) else: wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) logger.info("%s vocals done" % name) if format in ["wav", "flac"]: sf.write( os.path.join( vocal_root, head + "{}_{}.{}".format(name, self.data["agg"], format), ), (np.array(wav_vocals) * 32768).astype("int16"), self.mp.param["sr"], ) else: path = os.path.join(vocal_root, head + "{}_{}.wav".format(name, self.data["agg"])) sf.write( path, (np.array(wav_vocals) * 32768).astype("int16"), self.mp.param["sr"], ) if os.path.exists(path): opt_format_path = path[:-4] + ".%s" % format cmd = 'ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path, opt_format_path) print(cmd) os.system(cmd) if os.path.exists(opt_format_path): try: os.remove(path) except: pass class AudioPreDeEcho: def __init__(self, agg, model_path, device, is_half, tta=False): self.model_path = model_path self.device = device self.data = { # Processing Options "postprocess": False, "tta": tta, # Constants "window_size": 512, "agg": agg, "high_end_process": "mirroring", } mp = ModelParameters("%s/lib/lib_v5/modelparams/4band_v3.json" % parent_directory) nout = 64 if "DeReverb" in model_path else 48 model = CascadedNet(mp.param["bins"] * 2, nout) cpk = torch.load(model_path, map_location="cpu") model.load_state_dict(cpk) model.eval() if is_half: model = model.half().to(device) else: model = model.to(device) self.mp = mp self.model = model def _path_audio_( self, music_file, vocal_root=None, ins_root=None, format="flac", is_hp3=False ): # 3个VR模型vocal和ins是反的 if ins_root is None and vocal_root is None: return "No save root." name = os.path.basename(music_file) if ins_root is not None: os.makedirs(ins_root, exist_ok=True) if vocal_root is not None: os.makedirs(vocal_root, exist_ok=True) X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} bands_n = len(self.mp.param["band"]) # print(bands_n) for d in range(bands_n, 0, -1): bp = self.mp.param["band"][d] if d == bands_n: # high-end band ( X_wave[d], _, ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 music_file, sr=bp["sr"], mono=False, dtype=np.float32, res_type=bp["res_type"], ) if X_wave[d].ndim == 1: X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) else: # lower bands X_wave[d] = librosa.core.resample( X_wave[d + 1], orig_sr=self.mp.param["band"][d + 1]["sr"], target_sr=bp["sr"], res_type=bp["res_type"], ) # Stft of wave source X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( X_wave[d], bp["hl"], bp["n_fft"], self.mp.param["mid_side"], self.mp.param["mid_side_b2"], self.mp.param["reverse"], ) # pdb.set_trace() if d == bands_n and self.data["high_end_process"] != "none": input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] ) input_high_end = X_spec_s[d][:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :] X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) aggresive_set = float(self.data["agg"] / 100) aggressiveness = { "value": aggresive_set, "split_bin": self.mp.param["band"][1]["crop_stop"], } with torch.no_grad(): pred, X_mag, X_phase = inference(X_spec_m, self.device, self.model, aggressiveness, self.data) # Postprocess if self.data["postprocess"]: pred_inv = np.clip(X_mag - pred, 0, np.inf) pred = spec_utils.mask_silence(pred, pred_inv) y_spec_m = pred * X_phase v_spec_m = X_spec_m - y_spec_m if ins_root is not None: if self.data["high_end_process"].startswith("mirroring"): input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], y_spec_m, input_high_end, self.mp) wav_instrument = spec_utils.cmb_spectrogram_to_wave( y_spec_m, self.mp, input_high_end_h, input_high_end_ ) else: wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) logger.info("%s instruments done" % name) if format in ["wav", "flac"]: sf.write( os.path.join( ins_root, "vocal_{}_{}.{}".format(name, self.data["agg"], format), ), (np.array(wav_instrument) * 32768).astype("int16"), self.mp.param["sr"], ) # else: path = os.path.join(ins_root, "vocal_{}_{}.wav".format(name, self.data["agg"])) sf.write( path, (np.array(wav_instrument) * 32768).astype("int16"), self.mp.param["sr"], ) if os.path.exists(path): opt_format_path = path[:-4] + ".%s" % format cmd = 'ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path, opt_format_path) print(cmd) os.system(cmd) if os.path.exists(opt_format_path): try: os.remove(path) except: pass if vocal_root is not None: if self.data["high_end_process"].startswith("mirroring"): input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], v_spec_m, input_high_end, self.mp) wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp, input_high_end_h, input_high_end_) else: wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) logger.info("%s vocals done" % name) if format in ["wav", "flac"]: sf.write( os.path.join( vocal_root, "instrument_{}_{}.{}".format(name, self.data["agg"], format), ), (np.array(wav_vocals) * 32768).astype("int16"), self.mp.param["sr"], ) else: path = os.path.join(vocal_root, "instrument_{}_{}.wav".format(name, self.data["agg"])) sf.write( path, (np.array(wav_vocals) * 32768).astype("int16"), self.mp.param["sr"], ) if os.path.exists(path): opt_format_path = path[:-4] + ".%s" % format cmd = 'ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path, opt_format_path) print(cmd) os.system(cmd) if os.path.exists(opt_format_path): try: os.remove(path) except: pass
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/webui.py
tools/uvr5/webui.py
import logging import os import traceback import gradio as gr from tools.i18n.i18n import I18nAuto from tools.my_utils import clean_path i18n = I18nAuto() logger = logging.getLogger(__name__) import sys import ffmpeg import torch from bsroformer import Roformer_Loader from mdxnet import MDXNetDereverb from vr import AudioPre, AudioPreDeEcho weight_uvr5_root = "tools/uvr5/uvr5_weights" uvr5_names = [] for name in os.listdir(weight_uvr5_root): if name.endswith(".pth") or name.endswith(".ckpt") or "onnx" in name: uvr5_names.append(name.replace(".pth", "").replace(".ckpt", "")) device = sys.argv[1] is_half = eval(sys.argv[2]) webui_port_uvr5 = int(sys.argv[3]) is_share = eval(sys.argv[4]) def html_left(text, label="p"): return f"""<div style="text-align: left; margin: 0; padding: 0;"> <{label} style="margin: 0; padding: 0;">{text}</{label}> </div>""" def html_center(text, label="p"): return f"""<div style="text-align: center; margin: 100; padding: 50;"> <{label} style="margin: 0; padding: 0;">{text}</{label}> </div>""" def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] try: inp_root = clean_path(inp_root) save_root_vocal = clean_path(save_root_vocal) save_root_ins = clean_path(save_root_ins) is_hp3 = "HP3" in model_name if model_name == "onnx_dereverb_By_FoxJoy": pre_fun = MDXNetDereverb(15) elif "roformer" in model_name.lower(): func = Roformer_Loader pre_fun = func( model_path=os.path.join(weight_uvr5_root, model_name + ".ckpt"), config_path=os.path.join(weight_uvr5_root, model_name + ".yaml"), device=device, is_half=is_half, ) if not os.path.exists(os.path.join(weight_uvr5_root, model_name + ".yaml")): infos.append( "Warning: You are using a model without a configuration file. The program will automatically use the default configuration file. However, the default configuration file cannot guarantee that all models will run successfully. You can manually place the model configuration file into 'tools/uvr5/uvr5w_weights' and ensure that the configuration file is named as '<model_name>.yaml' then try it again. (For example, the configuration file corresponding to the model 'bs_roformer_ep_368_sdr_12.9628.ckpt' should be 'bs_roformer_ep_368_sdr_12.9628.yaml'.) Or you can just ignore this warning." ) yield "\n".join(infos) else: func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho pre_fun = func( agg=int(agg), model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), device=device, is_half=is_half, ) if inp_root != "": paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] else: paths = [path.name for path in paths] for path in paths: inp_path = os.path.join(inp_root, path) if os.path.isfile(inp_path) == False: continue need_reformat = 1 done = 0 try: info = ffmpeg.probe(inp_path, cmd="ffprobe") if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100": need_reformat = 0 pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0, is_hp3) done = 1 except: need_reformat = 1 traceback.print_exc() if need_reformat == 1: tmp_path = "%s/%s.reformatted.wav" % ( os.path.join(os.environ["TEMP"]), os.path.basename(inp_path), ) os.system(f'ffmpeg -i "{inp_path}" -vn -acodec pcm_s16le -ac 2 -ar 44100 "{tmp_path}" -y') inp_path = tmp_path try: if done == 0: pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0, is_hp3) infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) except: infos.append("%s->%s" % (os.path.basename(inp_path), traceback.format_exc())) yield "\n".join(infos) except: infos.append(traceback.format_exc()) yield "\n".join(infos) finally: try: if model_name == "onnx_dereverb_By_FoxJoy": del pre_fun.pred.model del pre_fun.pred.model_ else: del pre_fun.model del pre_fun except: traceback.print_exc() print("clean_empty_cache") if torch.cuda.is_available(): torch.cuda.empty_cache() yield "\n".join(infos) with gr.Blocks(title="UVR5 WebUI", analytics_enabled=False) as app: gr.Markdown( value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.") + "<br>" + i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) with gr.Group(): gr.Markdown(html_center(i18n("伴奏人声分离&去混响&去回声"), "h2")) with gr.Group(): gr.Markdown( value=html_left( i18n("人声伴奏分离批量处理, 使用UVR5模型。") + "<br>" + i18n( "合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。" ) + "<br>" + i18n("模型分为三类:") + "<br>" + i18n( "1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;" ) + "<br>" + i18n("2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;") + "<br>" + i18n("3、去混响、去延迟模型(by FoxJoy):") + "<br>  " + i18n("(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;") + "<br>&emsp;" + i18n( "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。" ) + "<br>" + i18n("去混响/去延迟,附:") + "<br>" + i18n("1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;") + "<br>" + i18n("2、MDX-Net-Dereverb模型挺慢的;") + "<br>" + i18n("3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。"), "h4", ) ) with gr.Row(): with gr.Column(): model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) dir_wav_input = gr.Textbox( label=i18n("输入待处理音频文件夹路径"), placeholder="C:\\Users\\Desktop\\todo-songs", ) wav_inputs = gr.File( file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") ) with gr.Column(): agg = gr.Slider( minimum=0, maximum=20, step=1, label=i18n("人声提取激进程度"), value=10, interactive=True, visible=False, # 先不开放调整 ) opt_vocal_root = gr.Textbox(label=i18n("指定输出主人声文件夹"), value="output/uvr5_opt") opt_ins_root = gr.Textbox(label=i18n("指定输出非主人声文件夹"), value="output/uvr5_opt") format0 = gr.Radio( label=i18n("导出文件格式"), choices=["wav", "flac", "mp3", "m4a"], value="flac", interactive=True, ) with gr.Column(): with gr.Row(): but2 = gr.Button(i18n("转换"), variant="primary") with gr.Row(): vc_output4 = gr.Textbox(label=i18n("输出信息"), lines=3) but2.click( uvr, [ model_choose, dir_wav_input, opt_vocal_root, wav_inputs, opt_ins_root, agg, format0, ], [vc_output4], api_name="uvr_convert", ) app.queue().launch( # concurrency_count=511, max_size=1022 server_name="0.0.0.0", inbrowser=True, share=is_share, server_port=webui_port_uvr5, # quiet=True, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/bsroformer.py
tools/uvr5/bsroformer.py
# This code is modified from https://github.com/ZFTurbo/ import os import warnings import librosa import numpy as np import soundfile as sf import torch import torch.nn as nn import yaml from tqdm import tqdm warnings.filterwarnings("ignore") class Roformer_Loader: def get_config(self, config_path): with open(config_path, "r", encoding="utf-8") as f: # use fullloader to load tag !!python/tuple, code can be improved config = yaml.load(f, Loader=yaml.FullLoader) return config def get_default_config(self): default_config = None if self.model_type == "bs_roformer": # Use model_bs_roformer_ep_368_sdr_12.9628.yaml and model_bs_roformer_ep_317_sdr_12.9755.yaml as default configuration files # Other BS_Roformer models may not be compatible # fmt: off default_config = { "audio": {"chunk_size": 352800, "sample_rate": 44100}, "model": { "dim": 512, "depth": 12, "stereo": True, "num_stems": 1, "time_transformer_depth": 1, "freq_transformer_depth": 1, "linear_transformer_depth": 0, "freqs_per_bands": (2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 24, 24, 24, 24, 48, 48, 48, 48, 48, 48, 48, 48, 128, 129), "dim_head": 64, "heads": 8, "attn_dropout": 0.1, "ff_dropout": 0.1, "flash_attn": True, "dim_freqs_in": 1025, "stft_n_fft": 2048, "stft_hop_length": 441, "stft_win_length": 2048, "stft_normalized": False, "mask_estimator_depth": 2, "multi_stft_resolution_loss_weight": 1.0, "multi_stft_resolutions_window_sizes": (4096, 2048, 1024, 512, 256), "multi_stft_hop_size": 147, "multi_stft_normalized": False, }, "training": {"instruments": ["vocals", "other"], "target_instrument": "vocals"}, "inference": {"batch_size": 2, "num_overlap": 2}, } # fmt: on elif self.model_type == "mel_band_roformer": # Use model_mel_band_roformer_ep_3005_sdr_11.4360.yaml as default configuration files # Other Mel_Band_Roformer models may not be compatible default_config = { "audio": {"chunk_size": 352800, "sample_rate": 44100}, "model": { "dim": 384, "depth": 12, "stereo": True, "num_stems": 1, "time_transformer_depth": 1, "freq_transformer_depth": 1, "linear_transformer_depth": 0, "num_bands": 60, "dim_head": 64, "heads": 8, "attn_dropout": 0.1, "ff_dropout": 0.1, "flash_attn": True, "dim_freqs_in": 1025, "sample_rate": 44100, "stft_n_fft": 2048, "stft_hop_length": 441, "stft_win_length": 2048, "stft_normalized": False, "mask_estimator_depth": 2, "multi_stft_resolution_loss_weight": 1.0, "multi_stft_resolutions_window_sizes": (4096, 2048, 1024, 512, 256), "multi_stft_hop_size": 147, "multi_stft_normalized": False, }, "training": {"instruments": ["vocals", "other"], "target_instrument": "vocals"}, "inference": {"batch_size": 2, "num_overlap": 2}, } return default_config def get_model_from_config(self): if self.model_type == "bs_roformer": from bs_roformer.bs_roformer import BSRoformer model = BSRoformer(**dict(self.config["model"])) elif self.model_type == "mel_band_roformer": from bs_roformer.mel_band_roformer import MelBandRoformer model = MelBandRoformer(**dict(self.config["model"])) else: print("Error: Unknown model: {}".format(self.model_type)) model = None return model def demix_track(self, model, mix, device): C = self.config["audio"]["chunk_size"] # chunk_size N = self.config["inference"]["num_overlap"] fade_size = C // 10 step = int(C // N) border = C - step batch_size = self.config["inference"]["batch_size"] length_init = mix.shape[-1] progress_bar = tqdm(total=length_init // step + 1, desc="Processing", leave=False) # Do pad from the beginning and end to account floating window results better if length_init > 2 * border and (border > 0): mix = nn.functional.pad(mix, (border, border), mode="reflect") # Prepare windows arrays (do 1 time for speed up). This trick repairs click problems on the edges of segment window_size = C fadein = torch.linspace(0, 1, fade_size) fadeout = torch.linspace(1, 0, fade_size) window_start = torch.ones(window_size) window_middle = torch.ones(window_size) window_finish = torch.ones(window_size) window_start[-fade_size:] *= fadeout # First audio chunk, no fadein window_finish[:fade_size] *= fadein # Last audio chunk, no fadeout window_middle[-fade_size:] *= fadeout window_middle[:fade_size] *= fadein with torch.amp.autocast("cuda"): with torch.inference_mode(): if self.config["training"]["target_instrument"] is None: req_shape = (len(self.config["training"]["instruments"]),) + tuple(mix.shape) else: req_shape = (1,) + tuple(mix.shape) result = torch.zeros(req_shape, dtype=torch.float32) counter = torch.zeros(req_shape, dtype=torch.float32) i = 0 batch_data = [] batch_locations = [] while i < mix.shape[1]: part = mix[:, i : i + C].to(device) length = part.shape[-1] if length < C: if length > C // 2 + 1: part = nn.functional.pad(input=part, pad=(0, C - length), mode="reflect") else: part = nn.functional.pad(input=part, pad=(0, C - length, 0, 0), mode="constant", value=0) if self.is_half: part = part.half() batch_data.append(part) batch_locations.append((i, length)) i += step progress_bar.update(1) if len(batch_data) >= batch_size or (i >= mix.shape[1]): arr = torch.stack(batch_data, dim=0) # print(23333333,arr.dtype) x = model(arr) window = window_middle if i - step == 0: # First audio chunk, no fadein window = window_start elif i >= mix.shape[1]: # Last audio chunk, no fadeout window = window_finish for j in range(len(batch_locations)): start, l = batch_locations[j] result[..., start : start + l] += x[j][..., :l].cpu() * window[..., :l] counter[..., start : start + l] += window[..., :l] batch_data = [] batch_locations = [] estimated_sources = result / counter estimated_sources = estimated_sources.cpu().numpy() np.nan_to_num(estimated_sources, copy=False, nan=0.0) if length_init > 2 * border and (border > 0): # Remove pad estimated_sources = estimated_sources[..., border:-border] progress_bar.close() if self.config["training"]["target_instrument"] is None: return {k: v for k, v in zip(self.config["training"]["instruments"], estimated_sources)} else: return {k: v for k, v in zip([self.config["training"]["target_instrument"]], estimated_sources)} def run_folder(self, input, vocal_root, others_root, format): self.model.eval() path = input os.makedirs(vocal_root, exist_ok=True) os.makedirs(others_root, exist_ok=True) file_base_name = os.path.splitext(os.path.basename(path))[0] sample_rate = 44100 if "sample_rate" in self.config["audio"]: sample_rate = self.config["audio"]["sample_rate"] try: mix, sr = librosa.load(path, sr=sample_rate, mono=False) except Exception as e: print("Can read track: {}".format(path)) print("Error message: {}".format(str(e))) return # in case if model only supports mono tracks isstereo = self.config["model"].get("stereo", True) if not isstereo and len(mix.shape) != 1: mix = np.mean(mix, axis=0) # if more than 2 channels, take mean print("Warning: Track has more than 1 channels, but model is mono, taking mean of all channels.") mix_orig = mix.copy() mixture = torch.tensor(mix, dtype=torch.float32) res = self.demix_track(self.model, mixture, self.device) if self.config["training"]["target_instrument"] is not None: # if target instrument is specified, save target instrument as vocal and other instruments as others # other instruments are caculated by subtracting target instrument from mixture target_instrument = self.config["training"]["target_instrument"] other_instruments = [i for i in self.config["training"]["instruments"] if i != target_instrument] other = mix_orig - res[target_instrument] # caculate other instruments path_vocal = "{}/{}_{}.wav".format(vocal_root, file_base_name, target_instrument) path_other = "{}/{}_{}.wav".format(others_root, file_base_name, other_instruments[0]) self.save_audio(path_vocal, res[target_instrument].T, sr, format) self.save_audio(path_other, other.T, sr, format) else: # if target instrument is not specified, save the first instrument as vocal and the rest as others vocal_inst = self.config["training"]["instruments"][0] path_vocal = "{}/{}_{}.wav".format(vocal_root, file_base_name, vocal_inst) self.save_audio(path_vocal, res[vocal_inst].T, sr, format) for other in self.config["training"]["instruments"][1:]: # save other instruments path_other = "{}/{}_{}.wav".format(others_root, file_base_name, other) self.save_audio(path_other, res[other].T, sr, format) def save_audio(self, path, data, sr, format): # input path should be endwith '.wav' if format in ["wav", "flac"]: if format == "flac": path = path[:-3] + "flac" sf.write(path, data, sr) else: sf.write(path, data, sr) os.system('ffmpeg -i "{}" -vn "{}" -q:a 2 -y'.format(path, path[:-3] + format)) try: os.remove(path) except: pass def __init__(self, model_path, config_path, device, is_half): self.device = device self.is_half = is_half self.model_type = None self.config = None # get model_type, first try: if "bs_roformer" in model_path.lower() or "bsroformer" in model_path.lower(): self.model_type = "bs_roformer" elif "mel_band_roformer" in model_path.lower() or "melbandroformer" in model_path.lower(): self.model_type = "mel_band_roformer" if not os.path.exists(config_path): if self.model_type is None: # if model_type is still None, raise an error raise ValueError( "Error: Unknown model type. If you are using a model without a configuration file, Ensure that your model name includes 'bs_roformer', 'bsroformer', 'mel_band_roformer', or 'melbandroformer'. Otherwise, you can manually place the model configuration file into 'tools/uvr5/uvr5w_weights' and ensure that the configuration file is named as '<model_name>.yaml' then try it again." ) self.config = self.get_default_config() else: # if there is a configuration file self.config = self.get_config(config_path) if self.model_type is None: # if model_type is still None, second try, get model_type from the configuration file if "freqs_per_bands" in self.config["model"]: # if freqs_per_bands in config, it's a bs_roformer model self.model_type = "bs_roformer" else: # else it's a mel_band_roformer model self.model_type = "mel_band_roformer" print("Detected model type: {}".format(self.model_type)) model = self.get_model_from_config() state_dict = torch.load(model_path, map_location="cpu") model.load_state_dict(state_dict) if is_half == False: self.model = model.to(device) else: self.model = model.half().to(device) def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False): self.run_folder(input, vocal_root, others_root, format)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/mdxnet.py
tools/uvr5/mdxnet.py
import os import logging logger = logging.getLogger(__name__) import librosa import numpy as np import soundfile as sf import torch from tqdm import tqdm cpu = torch.device("cpu") class ConvTDFNetTrim: def __init__(self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024): super(ConvTDFNetTrim, self).__init__() self.dim_f = dim_f self.dim_t = 2**dim_t self.n_fft = n_fft self.hop = hop self.n_bins = self.n_fft // 2 + 1 self.chunk_size = hop * (self.dim_t - 1) self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device) self.target_name = target_name self.blender = "blender" in model_name self.dim_c = 4 out_c = self.dim_c * 4 if target_name == "*" else self.dim_c self.freq_pad = torch.zeros([1, out_c, self.n_bins - self.dim_f, self.dim_t]).to(device) self.n = L // 2 def stft(self, x): x = x.reshape([-1, self.chunk_size]) x = torch.stft( x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True, ) x = torch.view_as_real(x) x = x.permute([0, 3, 1, 2]) x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, self.dim_c, self.n_bins, self.dim_t]) return x[:, :, : self.dim_f] def istft(self, x, freq_pad=None): freq_pad = self.freq_pad.repeat([x.shape[0], 1, 1, 1]) if freq_pad is None else freq_pad x = torch.cat([x, freq_pad], -2) c = 4 * 2 if self.target_name == "*" else 2 x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape([-1, 2, self.n_bins, self.dim_t]) x = x.permute([0, 2, 3, 1]) x = x.contiguous() x = torch.view_as_complex(x) x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True) return x.reshape([-1, c, self.chunk_size]) def get_models(device, dim_f, dim_t, n_fft): return ConvTDFNetTrim( device=device, model_name="Conv-TDF", target_name="vocals", L=11, dim_f=dim_f, dim_t=dim_t, n_fft=n_fft, ) class Predictor: def __init__(self, args): import onnxruntime as ort logger.info(ort.get_available_providers()) self.args = args self.model_ = get_models(device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft) self.model = ort.InferenceSession( os.path.join(args.onnx, self.model_.target_name + ".onnx"), providers=[ "CUDAExecutionProvider", "DmlExecutionProvider", "CPUExecutionProvider", ], ) logger.info("ONNX load done") def demix(self, mix): samples = mix.shape[-1] margin = self.args.margin chunk_size = self.args.chunks * 44100 assert not margin == 0, "margin cannot be zero!" if margin > chunk_size: margin = chunk_size segmented_mix = {} if self.args.chunks == 0 or samples < chunk_size: chunk_size = samples counter = -1 for skip in range(0, samples, chunk_size): counter += 1 s_margin = 0 if counter == 0 else margin end = min(skip + chunk_size + margin, samples) start = skip - s_margin segmented_mix[skip] = mix[:, start:end].copy() if end == samples: break sources = self.demix_base(segmented_mix, margin_size=margin) """ mix:(2,big_sample) segmented_mix:offset->(2,small_sample) sources:(1,2,big_sample) """ return sources def demix_base(self, mixes, margin_size): chunked_sources = [] progress_bar = tqdm(total=len(mixes)) progress_bar.set_description("Processing") for mix in mixes: cmix = mixes[mix] sources = [] n_sample = cmix.shape[1] model = self.model_ trim = model.n_fft // 2 gen_size = model.chunk_size - 2 * trim pad = gen_size - n_sample % gen_size mix_p = np.concatenate((np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1) mix_waves = [] i = 0 while i < n_sample + pad: waves = np.array(mix_p[:, i : i + model.chunk_size]) mix_waves.append(waves) i += gen_size mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) with torch.no_grad(): _ort = self.model spek = model.stft(mix_waves) if self.args.denoise: spec_pred = ( -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 ) tar_waves = model.istft(torch.tensor(spec_pred)) else: tar_waves = model.istft(torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])) tar_signal = tar_waves[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).numpy()[:, :-pad] start = 0 if mix == 0 else margin_size end = None if mix == list(mixes.keys())[::-1][0] else -margin_size if margin_size == 0: end = None sources.append(tar_signal[:, start:end]) progress_bar.update(1) chunked_sources.append(sources) _sources = np.concatenate(chunked_sources, axis=-1) # del self.model progress_bar.close() return _sources def prediction(self, m, vocal_root, others_root, format): os.makedirs(vocal_root, exist_ok=True) os.makedirs(others_root, exist_ok=True) basename = os.path.basename(m) mix, rate = librosa.load(m, mono=False, sr=44100) if mix.ndim == 1: mix = np.asfortranarray([mix, mix]) mix = mix.T sources = self.demix(mix.T) opt = sources[0].T if format in ["wav", "flac"]: sf.write("%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate) sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) else: path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) path_other = "%s/%s_others.wav" % (others_root, basename) sf.write(path_vocal, mix - opt, rate) sf.write(path_other, opt, rate) opt_path_vocal = path_vocal[:-4] + ".%s" % format opt_path_other = path_other[:-4] + ".%s" % format if os.path.exists(path_vocal): os.system('ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path_vocal, opt_path_vocal)) if os.path.exists(opt_path_vocal): try: os.remove(path_vocal) except: pass if os.path.exists(path_other): os.system('ffmpeg -i "%s" -vn "%s" -q:a 2 -y' % (path_other, opt_path_other)) if os.path.exists(opt_path_other): try: os.remove(path_other) except: pass class MDXNetDereverb: def __init__(self, chunks): self.onnx = "%s/uvr5_weights/onnx_dereverb_By_FoxJoy" % os.path.dirname(os.path.abspath(__file__)) self.shifts = 10 # 'Predict with randomised equivariant stabilisation' self.mixing = "min_mag" # ['default','min_mag','max_mag'] self.chunks = chunks self.margin = 44100 self.dim_t = 9 self.dim_f = 3072 self.n_fft = 6144 self.denoise = True self.pred = Predictor(self) self.device = cpu def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False): self.pred.prediction(input, vocal_root, others_root, format)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/bs_roformer/attend.py
tools/uvr5/bs_roformer/attend.py
from packaging import version import torch from torch import nn, einsum import torch.nn.functional as F def exists(val): return val is not None def default(v, d): return v if exists(v) else d class Attend(nn.Module): def __init__(self, dropout=0.0, flash=False, scale=None): super().__init__() self.scale = scale self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse("2.0.0")), ( "in order to use flash attention, you must be using pytorch 2.0 or above" ) def flash_attn(self, q, k, v): # _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device if exists(self.scale): default_scale = q.shape[-1] ** -0.5 q = q * (self.scale / default_scale) # pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): return F.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout if self.training else 0.0) def forward(self, q, k, v): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ # q_len, k_len, device = q.shape[-2], k.shape[-2], q.device scale = default(self.scale, q.shape[-1] ** -0.5) if self.flash: return self.flash_attn(q, k, v) # similarity sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale # attention attn = sim.softmax(dim=-1) attn = self.attn_dropout(attn) # aggregate values out = einsum("b h i j, b h j d -> b h i d", attn, v) return out
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/bs_roformer/mel_band_roformer.py
tools/uvr5/bs_roformer/mel_band_roformer.py
from functools import partial import torch from torch import nn from torch.nn import Module, ModuleList import torch.nn.functional as F from bs_roformer.attend import Attend from torch.utils.checkpoint import checkpoint from typing import Tuple, Optional, Callable # from beartype.typing import Tuple, Optional, List, Callable # from beartype import beartype from rotary_embedding_torch import RotaryEmbedding from einops import rearrange, pack, unpack, reduce, repeat from einops.layers.torch import Rearrange from librosa import filters # helper functions def exists(val): return val is not None def default(v, d): return v if exists(v) else d def pack_one(t, pattern): return pack([t], pattern) def unpack_one(t, ps, pattern): return unpack(t, ps, pattern)[0] def pad_at_dim(t, pad, dim=-1, value=0.0): dims_from_right = (-dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = (0, 0) * dims_from_right return F.pad(t, (*zeros, *pad), value=value) def l2norm(t): return F.normalize(t, dim=-1, p=2) # norm class RMSNorm(Module): def __init__(self, dim): super().__init__() self.scale = dim**0.5 self.gamma = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim=-1) * self.scale * self.gamma # attention class FeedForward(Module): def __init__(self, dim, mult=4, dropout=0.0): super().__init__() dim_inner = int(dim * mult) self.net = nn.Sequential( RMSNorm(dim), nn.Linear(dim, dim_inner), nn.GELU(), nn.Dropout(dropout), nn.Linear(dim_inner, dim), nn.Dropout(dropout), ) def forward(self, x): return self.net(x) class Attention(Module): def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True): super().__init__() self.heads = heads self.scale = dim_head**-0.5 dim_inner = heads * dim_head self.rotary_embed = rotary_embed self.attend = Attend(flash=flash, dropout=dropout) self.norm = RMSNorm(dim) self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) self.to_gates = nn.Linear(dim, heads) self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout)) def forward(self, x): x = self.norm(x) q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads) if exists(self.rotary_embed): q = self.rotary_embed.rotate_queries_or_keys(q) k = self.rotary_embed.rotate_queries_or_keys(k) out = self.attend(q, k, v) gates = self.to_gates(x) out = out * rearrange(gates, "b n h -> b h n 1").sigmoid() out = rearrange(out, "b h n d -> b n (h d)") return self.to_out(out) class LinearAttention(Module): """ this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. """ # @beartype def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0): super().__init__() dim_inner = dim_head * heads self.norm = RMSNorm(dim) self.to_qkv = nn.Sequential( nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads) ) self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) self.attend = Attend(scale=scale, dropout=dropout, flash=flash) self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False)) def forward(self, x): x = self.norm(x) q, k, v = self.to_qkv(x) q, k = map(l2norm, (q, k)) q = q * self.temperature.exp() out = self.attend(q, k, v) return self.to_out(out) class Transformer(Module): def __init__( self, *, dim, depth, dim_head=64, heads=8, attn_dropout=0.0, ff_dropout=0.0, ff_mult=4, norm_output=True, rotary_embed=None, flash_attn=True, linear_attn=False, ): super().__init__() self.layers = ModuleList([]) for _ in range(depth): if linear_attn: attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) else: attn = Attention( dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, rotary_embed=rotary_embed, flash=flash_attn, ) self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)])) self.norm = RMSNorm(dim) if norm_output else nn.Identity() def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) # bandsplit module class BandSplit(Module): # @beartype def __init__(self, dim, dim_inputs: Tuple[int, ...]): super().__init__() self.dim_inputs = dim_inputs self.to_features = ModuleList([]) for dim_in in dim_inputs: net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim)) self.to_features.append(net) def forward(self, x): x = x.split(self.dim_inputs, dim=-1) outs = [] for split_input, to_feature in zip(x, self.to_features): split_output = to_feature(split_input) outs.append(split_output) return torch.stack(outs, dim=-2) def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh): dim_hidden = default(dim_hidden, dim_in) net = [] dims = (dim_in, *((dim_hidden,) * depth), dim_out) for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): is_last = ind == (len(dims) - 2) net.append(nn.Linear(layer_dim_in, layer_dim_out)) if is_last: continue net.append(activation()) return nn.Sequential(*net) class MaskEstimator(Module): # @beartype def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4): super().__init__() self.dim_inputs = dim_inputs self.to_freqs = ModuleList([]) dim_hidden = dim * mlp_expansion_factor for dim_in in dim_inputs: net = [] mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1)) self.to_freqs.append(mlp) def forward(self, x): x = x.unbind(dim=-2) outs = [] for band_features, mlp in zip(x, self.to_freqs): freq_out = mlp(band_features) outs.append(freq_out) return torch.cat(outs, dim=-1) # main class class MelBandRoformer(Module): # @beartype def __init__( self, dim, *, depth, stereo=False, num_stems=1, time_transformer_depth=2, freq_transformer_depth=2, linear_transformer_depth=0, num_bands=60, dim_head=64, heads=8, attn_dropout=0.1, ff_dropout=0.1, flash_attn=True, dim_freqs_in=1025, sample_rate=44100, # needed for mel filter bank from librosa stft_n_fft=2048, stft_hop_length=512, # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction stft_win_length=2048, stft_normalized=False, stft_window_fn: Optional[Callable] = None, mask_estimator_depth=1, multi_stft_resolution_loss_weight=1.0, multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), multi_stft_hop_size=147, multi_stft_normalized=False, multi_stft_window_fn: Callable = torch.hann_window, match_input_audio_length=False, # if True, pad output tensor to match length of input tensor mlp_expansion_factor=4, use_torch_checkpoint=False, skip_connection=False, ): super().__init__() self.stereo = stereo self.audio_channels = 2 if stereo else 1 self.num_stems = num_stems self.use_torch_checkpoint = use_torch_checkpoint self.skip_connection = skip_connection self.layers = ModuleList([]) transformer_kwargs = dict( dim=dim, heads=heads, dim_head=dim_head, attn_dropout=attn_dropout, ff_dropout=ff_dropout, flash_attn=flash_attn, ) time_rotary_embed = RotaryEmbedding(dim=dim_head) freq_rotary_embed = RotaryEmbedding(dim=dim_head) for _ in range(depth): tran_modules = [] if linear_transformer_depth > 0: tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) tran_modules.append( Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) ) tran_modules.append( Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) ) self.layers.append(nn.ModuleList(tran_modules)) self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) self.stft_kwargs = dict( n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized ) freqs = torch.stft( torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_n_fft), return_complex=True ).shape[1] # create mel filter bank # with librosa.filters.mel as in section 2 of paper mel_filter_bank_numpy = filters.mel(sr=sample_rate, n_fft=stft_n_fft, n_mels=num_bands) mel_filter_bank = torch.from_numpy(mel_filter_bank_numpy) # for some reason, it doesn't include the first freq? just force a value for now mel_filter_bank[0][0] = 1.0 # In some systems/envs we get 0.0 instead of ~1.9e-18 in the last position, # so let's force a positive value mel_filter_bank[-1, -1] = 1.0 # binary as in paper (then estimated masks are averaged for overlapping regions) freqs_per_band = mel_filter_bank > 0 assert freqs_per_band.any(dim=0).all(), "all frequencies need to be covered by all bands for now" repeated_freq_indices = repeat(torch.arange(freqs), "f -> b f", b=num_bands) freq_indices = repeated_freq_indices[freqs_per_band] if stereo: freq_indices = repeat(freq_indices, "f -> f s", s=2) freq_indices = freq_indices * 2 + torch.arange(2) freq_indices = rearrange(freq_indices, "f s -> (f s)") self.register_buffer("freq_indices", freq_indices, persistent=False) self.register_buffer("freqs_per_band", freqs_per_band, persistent=False) num_freqs_per_band = reduce(freqs_per_band, "b f -> b", "sum") num_bands_per_freq = reduce(freqs_per_band, "b f -> f", "sum") self.register_buffer("num_freqs_per_band", num_freqs_per_band, persistent=False) self.register_buffer("num_bands_per_freq", num_bands_per_freq, persistent=False) # band split and mask estimator freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in num_freqs_per_band.tolist()) self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex) self.mask_estimators = nn.ModuleList([]) for _ in range(num_stems): mask_estimator = MaskEstimator( dim=dim, dim_inputs=freqs_per_bands_with_complex, depth=mask_estimator_depth, mlp_expansion_factor=mlp_expansion_factor, ) self.mask_estimators.append(mask_estimator) # for the multi-resolution stft loss self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes self.multi_stft_n_fft = stft_n_fft self.multi_stft_window_fn = multi_stft_window_fn self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized) self.match_input_audio_length = match_input_audio_length def forward(self, raw_audio, target=None, return_loss_breakdown=False): """ einops b - batch f - freq t - time s - audio channel (1 for mono, 2 for stereo) n - number of 'stems' c - complex (2) d - feature dimension """ device = raw_audio.device if raw_audio.ndim == 2: raw_audio = rearrange(raw_audio, "b t -> b 1 t") batch, channels, raw_audio_length = raw_audio.shape istft_length = raw_audio_length if self.match_input_audio_length else None assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), ( "stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)" ) # to stft raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t") stft_window = self.stft_window_fn(device=device) stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) stft_repr = torch.view_as_real(stft_repr) stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c") # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c") # index out all frequencies for all frequency ranges across bands ascending in one go batch_arange = torch.arange(batch, device=device)[..., None] # account for stereo x = stft_repr[batch_arange, self.freq_indices] # fold the complex (real and imag) into the frequencies dimension x = rearrange(x, "b f t c -> b t (f c)") if self.use_torch_checkpoint: x = checkpoint(self.band_split, x, use_reentrant=False) else: x = self.band_split(x) # axial / hierarchical attention store = [None] * len(self.layers) for i, transformer_block in enumerate(self.layers): if len(transformer_block) == 3: linear_transformer, time_transformer, freq_transformer = transformer_block x, ft_ps = pack([x], "b * d") if self.use_torch_checkpoint: x = checkpoint(linear_transformer, x, use_reentrant=False) else: x = linear_transformer(x) (x,) = unpack(x, ft_ps, "b * d") else: time_transformer, freq_transformer = transformer_block if self.skip_connection: # Sum all previous for j in range(i): x = x + store[j] x = rearrange(x, "b t f d -> b f t d") x, ps = pack([x], "* t d") if self.use_torch_checkpoint: x = checkpoint(time_transformer, x, use_reentrant=False) else: x = time_transformer(x) (x,) = unpack(x, ps, "* t d") x = rearrange(x, "b f t d -> b t f d") x, ps = pack([x], "* f d") if self.use_torch_checkpoint: x = checkpoint(freq_transformer, x, use_reentrant=False) else: x = freq_transformer(x) (x,) = unpack(x, ps, "* f d") if self.skip_connection: store[i] = x num_stems = len(self.mask_estimators) if self.use_torch_checkpoint: masks = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1) else: masks = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) masks = rearrange(masks, "b n t (f c) -> b n f t c", c=2) # modulate frequency representation stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c") # complex number multiplication stft_repr = torch.view_as_complex(stft_repr) masks = torch.view_as_complex(masks) masks = masks.type(stft_repr.dtype) # need to average the estimated mask for the overlapped frequencies scatter_indices = repeat(self.freq_indices, "f -> b n f t", b=batch, n=num_stems, t=stft_repr.shape[-1]) stft_repr_expanded_stems = repeat(stft_repr, "b 1 ... -> b n ...", n=num_stems) masks_summed = torch.zeros_like(stft_repr_expanded_stems).scatter_add_(2, scatter_indices, masks) denom = repeat(self.num_bands_per_freq, "f -> (f r) 1", r=channels) masks_averaged = masks_summed / denom.clamp(min=1e-8) # modulate stft repr with estimated mask stft_repr = stft_repr * masks_averaged # istft stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels) recon_audio = torch.istft( stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=istft_length ) recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", b=batch, s=self.audio_channels, n=num_stems) if num_stems == 1: recon_audio = rearrange(recon_audio, "b 1 s t -> b s t") # if a target is passed in, calculate loss for learning if not exists(target): return recon_audio if self.num_stems > 1: assert target.ndim == 4 and target.shape[1] == self.num_stems if target.ndim == 2: target = rearrange(target, "... t -> ... 1 t") target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft loss = F.l1_loss(recon_audio, target) multi_stft_resolution_loss = 0.0 for window_size in self.multi_stft_resolutions_window_sizes: res_stft_kwargs = dict( n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft win_length=window_size, return_complex=True, window=self.multi_stft_window_fn(window_size, device=device), **self.multi_stft_kwargs, ) recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs) target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs) multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight total_loss = loss + weighted_multi_resolution_loss if not return_loss_breakdown: return total_loss return total_loss, (loss, multi_stft_resolution_loss)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/bs_roformer/bs_roformer.py
tools/uvr5/bs_roformer/bs_roformer.py
from functools import partial import torch from torch import nn from torch.nn import Module, ModuleList import torch.nn.functional as F from bs_roformer.attend import Attend from torch.utils.checkpoint import checkpoint from typing import Tuple, Optional, Callable # from beartype.typing import Tuple, Optional, List, Callable # from beartype import beartype from rotary_embedding_torch import RotaryEmbedding from einops import rearrange, pack, unpack from einops.layers.torch import Rearrange # helper functions def exists(val): return val is not None def default(v, d): return v if exists(v) else d def pack_one(t, pattern): return pack([t], pattern) def unpack_one(t, ps, pattern): return unpack(t, ps, pattern)[0] # norm def l2norm(t): return F.normalize(t, dim=-1, p=2) class RMSNorm(Module): def __init__(self, dim): super().__init__() self.scale = dim**0.5 self.gamma = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim=-1) * self.scale * self.gamma # attention class FeedForward(Module): def __init__(self, dim, mult=4, dropout=0.0): super().__init__() dim_inner = int(dim * mult) self.net = nn.Sequential( RMSNorm(dim), nn.Linear(dim, dim_inner), nn.GELU(), nn.Dropout(dropout), nn.Linear(dim_inner, dim), nn.Dropout(dropout), ) def forward(self, x): return self.net(x) class Attention(Module): def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True): super().__init__() self.heads = heads self.scale = dim_head**-0.5 dim_inner = heads * dim_head self.rotary_embed = rotary_embed self.attend = Attend(flash=flash, dropout=dropout) self.norm = RMSNorm(dim) self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) self.to_gates = nn.Linear(dim, heads) self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout)) def forward(self, x): x = self.norm(x) q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads) if exists(self.rotary_embed): q = self.rotary_embed.rotate_queries_or_keys(q) k = self.rotary_embed.rotate_queries_or_keys(k) out = self.attend(q, k, v) gates = self.to_gates(x) out = out * rearrange(gates, "b n h -> b h n 1").sigmoid() out = rearrange(out, "b h n d -> b n (h d)") return self.to_out(out) class LinearAttention(Module): """ this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. """ # @beartype def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0): super().__init__() dim_inner = dim_head * heads self.norm = RMSNorm(dim) self.to_qkv = nn.Sequential( nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads) ) self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) self.attend = Attend(scale=scale, dropout=dropout, flash=flash) self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False)) def forward(self, x): x = self.norm(x) q, k, v = self.to_qkv(x) q, k = map(l2norm, (q, k)) q = q * self.temperature.exp() out = self.attend(q, k, v) return self.to_out(out) class Transformer(Module): def __init__( self, *, dim, depth, dim_head=64, heads=8, attn_dropout=0.0, ff_dropout=0.0, ff_mult=4, norm_output=True, rotary_embed=None, flash_attn=True, linear_attn=False, ): super().__init__() self.layers = ModuleList([]) for _ in range(depth): if linear_attn: attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) else: attn = Attention( dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, rotary_embed=rotary_embed, flash=flash_attn, ) self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)])) self.norm = RMSNorm(dim) if norm_output else nn.Identity() def forward(self, x): for attn, ff in self.layers: x = attn(x) + x x = ff(x) + x return self.norm(x) # bandsplit module class BandSplit(Module): # @beartype def __init__(self, dim, dim_inputs: Tuple[int, ...]): super().__init__() self.dim_inputs = dim_inputs self.to_features = ModuleList([]) for dim_in in dim_inputs: net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim)) self.to_features.append(net) def forward(self, x): x = x.split(self.dim_inputs, dim=-1) outs = [] for split_input, to_feature in zip(x, self.to_features): split_output = to_feature(split_input) outs.append(split_output) return torch.stack(outs, dim=-2) def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh): dim_hidden = default(dim_hidden, dim_in) net = [] dims = (dim_in, *((dim_hidden,) * (depth - 1)), dim_out) for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): is_last = ind == (len(dims) - 2) net.append(nn.Linear(layer_dim_in, layer_dim_out)) if is_last: continue net.append(activation()) return nn.Sequential(*net) class MaskEstimator(Module): # @beartype def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4): super().__init__() self.dim_inputs = dim_inputs self.to_freqs = ModuleList([]) dim_hidden = dim * mlp_expansion_factor for dim_in in dim_inputs: net = [] mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1)) self.to_freqs.append(mlp) def forward(self, x): x = x.unbind(dim=-2) outs = [] for band_features, mlp in zip(x, self.to_freqs): freq_out = mlp(band_features) outs.append(freq_out) return torch.cat(outs, dim=-1) # main class DEFAULT_FREQS_PER_BANDS = ( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 24, 24, 24, 24, 48, 48, 48, 48, 48, 48, 48, 48, 128, 129, ) class BSRoformer(Module): # @beartype def __init__( self, dim, *, depth, stereo=False, num_stems=1, time_transformer_depth=2, freq_transformer_depth=2, linear_transformer_depth=0, freqs_per_bands: Tuple[int, ...] = DEFAULT_FREQS_PER_BANDS, # in the paper, they divide into ~60 bands, test with 1 for starters dim_head=64, heads=8, attn_dropout=0.0, ff_dropout=0.0, flash_attn=True, dim_freqs_in=1025, stft_n_fft=2048, stft_hop_length=512, # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction stft_win_length=2048, stft_normalized=False, stft_window_fn: Optional[Callable] = None, mask_estimator_depth=2, multi_stft_resolution_loss_weight=1.0, multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), multi_stft_hop_size=147, multi_stft_normalized=False, multi_stft_window_fn: Callable = torch.hann_window, mlp_expansion_factor=4, use_torch_checkpoint=False, skip_connection=False, ): super().__init__() self.stereo = stereo self.audio_channels = 2 if stereo else 1 self.num_stems = num_stems self.use_torch_checkpoint = use_torch_checkpoint self.skip_connection = skip_connection self.layers = ModuleList([]) transformer_kwargs = dict( dim=dim, heads=heads, dim_head=dim_head, attn_dropout=attn_dropout, ff_dropout=ff_dropout, flash_attn=flash_attn, norm_output=False, ) time_rotary_embed = RotaryEmbedding(dim=dim_head) freq_rotary_embed = RotaryEmbedding(dim=dim_head) for _ in range(depth): tran_modules = [] if linear_transformer_depth > 0: tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) tran_modules.append( Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) ) tran_modules.append( Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) ) self.layers.append(nn.ModuleList(tran_modules)) self.final_norm = RMSNorm(dim) self.stft_kwargs = dict( n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized ) self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) freqs = torch.stft( torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_win_length), return_complex=True ).shape[1] assert len(freqs_per_bands) > 1 assert sum(freqs_per_bands) == freqs, ( f"the number of freqs in the bands must equal {freqs} based on the STFT settings, but got {sum(freqs_per_bands)}" ) freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in freqs_per_bands) self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex) self.mask_estimators = nn.ModuleList([]) for _ in range(num_stems): mask_estimator = MaskEstimator( dim=dim, dim_inputs=freqs_per_bands_with_complex, depth=mask_estimator_depth, mlp_expansion_factor=mlp_expansion_factor, ) self.mask_estimators.append(mask_estimator) # for the multi-resolution stft loss self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes self.multi_stft_n_fft = stft_n_fft self.multi_stft_window_fn = multi_stft_window_fn self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized) def forward(self, raw_audio, target=None, return_loss_breakdown=False): """ einops b - batch f - freq t - time s - audio channel (1 for mono, 2 for stereo) n - number of 'stems' c - complex (2) d - feature dimension """ device = raw_audio.device # defining whether model is loaded on MPS (MacOS GPU accelerator) x_is_mps = True if device.type == "mps" else False if raw_audio.ndim == 2: raw_audio = rearrange(raw_audio, "b t -> b 1 t") channels = raw_audio.shape[1] assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), ( "stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)" ) # to stft raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t") stft_window = self.stft_window_fn(device=device) # RuntimeError: FFT operations are only supported on MacOS 14+ # Since it's tedious to define whether we're on correct MacOS version - simple try-catch is used try: stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) except: stft_repr = torch.stft( raw_audio.cpu() if x_is_mps else raw_audio, **self.stft_kwargs, window=stft_window.cpu() if x_is_mps else stft_window, return_complex=True, ).to(device) stft_repr = torch.view_as_real(stft_repr) stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c") # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c") x = rearrange(stft_repr, "b f t c -> b t (f c)") if self.use_torch_checkpoint: x = checkpoint(self.band_split, x, use_reentrant=False) else: x = self.band_split(x) # axial / hierarchical attention store = [None] * len(self.layers) for i, transformer_block in enumerate(self.layers): if len(transformer_block) == 3: linear_transformer, time_transformer, freq_transformer = transformer_block x, ft_ps = pack([x], "b * d") if self.use_torch_checkpoint: x = checkpoint(linear_transformer, x, use_reentrant=False) else: x = linear_transformer(x) (x,) = unpack(x, ft_ps, "b * d") else: time_transformer, freq_transformer = transformer_block if self.skip_connection: # Sum all previous for j in range(i): x = x + store[j] x = rearrange(x, "b t f d -> b f t d") x, ps = pack([x], "* t d") if self.use_torch_checkpoint: x = checkpoint(time_transformer, x, use_reentrant=False) else: x = time_transformer(x) (x,) = unpack(x, ps, "* t d") x = rearrange(x, "b f t d -> b t f d") x, ps = pack([x], "* f d") if self.use_torch_checkpoint: x = checkpoint(freq_transformer, x, use_reentrant=False) else: x = freq_transformer(x) (x,) = unpack(x, ps, "* f d") if self.skip_connection: store[i] = x x = self.final_norm(x) num_stems = len(self.mask_estimators) if self.use_torch_checkpoint: mask = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1) else: mask = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) mask = rearrange(mask, "b n t (f c) -> b n f t c", c=2) # modulate frequency representation stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c") # complex number multiplication stft_repr = torch.view_as_complex(stft_repr) mask = torch.view_as_complex(mask) stft_repr = stft_repr * mask # istft stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels) # same as torch.stft() fix for MacOS MPS above try: recon_audio = torch.istft( stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=raw_audio.shape[-1] ) except: recon_audio = torch.istft( stft_repr.cpu() if x_is_mps else stft_repr, **self.stft_kwargs, window=stft_window.cpu() if x_is_mps else stft_window, return_complex=False, length=raw_audio.shape[-1], ).to(device) recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", s=self.audio_channels, n=num_stems) if num_stems == 1: recon_audio = rearrange(recon_audio, "b 1 s t -> b s t") # if a target is passed in, calculate loss for learning if not exists(target): return recon_audio if self.num_stems > 1: assert target.ndim == 4 and target.shape[1] == self.num_stems if target.ndim == 2: target = rearrange(target, "... t -> ... 1 t") target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft loss = F.l1_loss(recon_audio, target) multi_stft_resolution_loss = 0.0 for window_size in self.multi_stft_resolutions_window_sizes: res_stft_kwargs = dict( n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft win_length=window_size, return_complex=True, window=self.multi_stft_window_fn(window_size, device=device), **self.multi_stft_kwargs, ) recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs) target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs) multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight total_loss = loss + weighted_multi_resolution_loss if not return_loss_breakdown: return total_loss return total_loss, (loss, multi_stft_resolution_loss)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/bs_roformer/__init__.py
tools/uvr5/bs_roformer/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/utils.py
tools/uvr5/lib/utils.py
import json import numpy as np import torch from tqdm import tqdm def load_data(file_name: str = "./lib/name_params.json") -> dict: with open(file_name, "r") as f: data = json.load(f) return data def make_padding(width, cropsize, offset): left = offset roi_size = cropsize - left * 2 if roi_size == 0: roi_size = cropsize right = roi_size - (width % roi_size) + left return left, right, roi_size def inference(X_spec, device, model, aggressiveness, data): """ data : dic configs """ def _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True): model.eval() with torch.no_grad(): preds = [] iterations = [n_window] total_iterations = sum(iterations) for i in tqdm(range(n_window)): start = i * roi_size X_mag_window = X_mag_pad[None, :, :, start : start + data["window_size"]] X_mag_window = torch.from_numpy(X_mag_window) if is_half: X_mag_window = X_mag_window.half() X_mag_window = X_mag_window.to(device) pred = model.predict(X_mag_window, aggressiveness) pred = pred.detach().cpu().numpy() preds.append(pred[0]) pred = np.concatenate(preds, axis=2) return pred def preprocess(X_spec): X_mag = np.abs(X_spec) X_phase = np.angle(X_spec) return X_mag, X_phase X_mag, X_phase = preprocess(X_spec) coef = X_mag.max() X_mag_pre = X_mag / coef n_frame = X_mag_pre.shape[2] pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) n_window = int(np.ceil(n_frame / roi_size)) X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") if list(model.state_dict().values())[0].dtype == torch.float16: is_half = True else: is_half = False pred = _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half) pred = pred[:, :, :n_frame] if data["tta"]: pad_l += roi_size // 2 pad_r += roi_size // 2 n_window += 1 X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") pred_tta = _execute(X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half) pred_tta = pred_tta[:, :, roi_size // 2 :] pred_tta = pred_tta[:, :, :n_frame] return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) else: return pred * coef, X_mag, np.exp(1.0j * X_phase) def _get_name_params(model_path, model_hash): data = load_data() flag = False ModelName = model_path for type in list(data): for model in list(data[type][0]): for i in range(len(data[type][0][model])): if str(data[type][0][model][i]["hash_name"]) == model_hash: flag = True elif str(data[type][0][model][i]["hash_name"]) in ModelName: flag = True if flag: model_params_auto = data[type][0][model][i]["model_params"] param_name_auto = data[type][0][model][i]["param_name"] if type == "equivalent": return param_name_auto, model_params_auto else: flag = False return param_name_auto, model_params_auto
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_new.py
tools/uvr5/lib/lib_v5/layers_new.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) def __call__(self, x): h = self.conv1(x) h = self.conv2(h) return h class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv1(x) # h = self.conv2(h) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) self.conv3 = Conv2DBNActiv(nin, nout, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = Conv2DBNActiv(nin, nout, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = Conv2DBNActiv(nin, nout, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) out = self.bottleneck(out) if self.dropout is not None: out = self.dropout(out) return out class LSTMModule(nn.Module): def __init__(self, nin_conv, nin_lstm, nout_lstm): super(LSTMModule, self).__init__() self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) self.lstm = nn.LSTM(input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True) self.dense = nn.Sequential(nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()) def forward(self, x): N, _, nbins, nframes = x.size() h = self.conv(x)[:, 0] # N, nbins, nframes h = h.permute(2, 0, 1) # nframes, N, nbins h, _ = self.lstm(h) h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins h = h.reshape(nframes, N, 1, nbins) h = h.permute(1, 2, 3, 0) return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_123821KB.py
tools/uvr5/lib/lib_v5/layers_123821KB.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_123812KB.py
tools/uvr5/lib/lib_v5/layers_123812KB.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/spec_utils.py
tools/uvr5/lib/lib_v5/spec_utils.py
import hashlib import json import math import os import librosa import numpy as np import soundfile as sf from tqdm import tqdm def crop_center(h1, h2): h1_shape = h1.size() h2_shape = h2.size() if h1_shape[3] == h2_shape[3]: return h1 elif h1_shape[3] < h2_shape[3]: raise ValueError("h1_shape[3] must be greater than h2_shape[3]") # s_freq = (h2_shape[2] - h1_shape[2]) // 2 # e_freq = s_freq + h1_shape[2] s_time = (h1_shape[3] - h2_shape[3]) // 2 e_time = s_time + h2_shape[3] h1 = h1[:, :, :, s_time:e_time] return h1 def wave_to_spectrogram(wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False): if reverse: wave_left = np.flip(np.asfortranarray(wave[0])) wave_right = np.flip(np.asfortranarray(wave[1])) elif mid_side: wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) elif mid_side_b2: wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) else: wave_left = np.asfortranarray(wave[0]) wave_right = np.asfortranarray(wave[1]) spec_left = librosa.stft(wave_left, n_fft=n_fft, hop_length=hop_length) spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length) spec = np.asfortranarray([spec_left, spec_right]) return spec def wave_to_spectrogram_mt(wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False): import threading if reverse: wave_left = np.flip(np.asfortranarray(wave[0])) wave_right = np.flip(np.asfortranarray(wave[1])) elif mid_side: wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) elif mid_side_b2: wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) else: wave_left = np.asfortranarray(wave[0]) wave_right = np.asfortranarray(wave[1]) def run_thread(**kwargs): global spec_left spec_left = librosa.stft(**kwargs) thread = threading.Thread( target=run_thread, kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, ) thread.start() spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length) thread.join() spec = np.asfortranarray([spec_left, spec_right]) return spec def combine_spectrograms(specs, mp): l = min([specs[i].shape[2] for i in specs]) spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) offset = 0 bands_n = len(mp.param["band"]) for d in range(1, bands_n + 1): h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] spec_c[:, offset : offset + h, :l] = specs[d][ :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l ] offset += h if offset > mp.param["bins"]: raise ValueError("Too much bins") # lowpass fiter if mp.param["pre_filter_start"] > 0: # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: if bands_n == 1: spec_c = fft_lp_filter(spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]) else: gp = 1 for b in range(mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]): g = math.pow(10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0) gp = g spec_c[:, b, :] *= g return np.asfortranarray(spec_c) def spectrogram_to_image(spec, mode="magnitude"): if mode == "magnitude": if np.iscomplexobj(spec): y = np.abs(spec) else: y = spec y = np.log10(y**2 + 1e-8) elif mode == "phase": if np.iscomplexobj(spec): y = np.angle(spec) else: y = spec y -= y.min() y *= 255 / y.max() img = np.uint8(y) if y.ndim == 3: img = img.transpose(1, 2, 0) img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) return img def reduce_vocal_aggressively(X, y, softmask): v = X - y y_mag_tmp = np.abs(y) v_mag_tmp = np.abs(v) v_mask = v_mag_tmp > y_mag_tmp y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) return y_mag * np.exp(1.0j * np.angle(y)) def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): if min_range < fade_size * 2: raise ValueError("min_range must be >= fade_area * 2") mag = mag.copy() idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) uninformative = np.where(ends - starts > min_range)[0] if len(uninformative) > 0: starts = starts[uninformative] ends = ends[uninformative] old_e = None for s, e in zip(starts, ends): if old_e is not None and s - old_e < fade_size: s = old_e - fade_size * 2 if s != 0: weight = np.linspace(0, 1, fade_size) mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] else: s -= fade_size if e != mag.shape[2]: weight = np.linspace(1, 0, fade_size) mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] else: e += fade_size mag[:, :, s + fade_size : e - fade_size] += ref[:, :, s + fade_size : e - fade_size] old_e = e return mag def align_wave_head_and_tail(a, b): l = min([a[0].size, b[0].size]) return a[:l, :l], b[:l, :l] def cache_or_load(mix_path, inst_path, mp): mix_basename = os.path.splitext(os.path.basename(mix_path))[0] inst_basename = os.path.splitext(os.path.basename(inst_path))[0] cache_dir = "mph{}".format(hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()) mix_cache_dir = os.path.join("cache", cache_dir) inst_cache_dir = os.path.join("cache", cache_dir) os.makedirs(mix_cache_dir, exist_ok=True) os.makedirs(inst_cache_dir, exist_ok=True) mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): X_spec_m = np.load(mix_cache_path) y_spec_m = np.load(inst_cache_path) else: X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} for d in range(len(mp.param["band"]), 0, -1): bp = mp.param["band"][d] if d == len(mp.param["band"]): # high-end band X_wave[d], _ = librosa.load( mix_path, sr=bp["sr"], mono=False, dtype=np.float32, res_type=bp["res_type"] ) y_wave[d], _ = librosa.load( inst_path, sr=bp["sr"], mono=False, dtype=np.float32, res_type=bp["res_type"], ) else: # lower bands X_wave[d] = librosa.resample( X_wave[d + 1], orig_sr=mp.param["band"][d + 1]["sr"], target_sr=bp["sr"], res_type=bp["res_type"], ) y_wave[d] = librosa.resample( y_wave[d + 1], orig_sr=mp.param["band"][d + 1]["sr"], target_sr=bp["sr"], res_type=bp["res_type"], ) X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) X_spec_s[d] = wave_to_spectrogram( X_wave[d], bp["hl"], bp["n_fft"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ) y_spec_s[d] = wave_to_spectrogram( y_wave[d], bp["hl"], bp["n_fft"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ) del X_wave, y_wave X_spec_m = combine_spectrograms(X_spec_s, mp) y_spec_m = combine_spectrograms(y_spec_s, mp) if X_spec_m.shape != y_spec_m.shape: raise ValueError("The combined spectrograms are different: " + mix_path) _, ext = os.path.splitext(mix_path) np.save(mix_cache_path, X_spec_m) np.save(inst_cache_path, y_spec_m) return X_spec_m, y_spec_m def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): spec_left = np.asfortranarray(spec[0]) spec_right = np.asfortranarray(spec[1]) wave_left = librosa.istft(spec_left, hop_length=hop_length) wave_right = librosa.istft(spec_right, hop_length=hop_length) if reverse: return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) elif mid_side: return np.asfortranarray([np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]) elif mid_side_b2: return np.asfortranarray( [ np.add(wave_right / 1.25, 0.4 * wave_left), np.subtract(wave_left / 1.25, 0.4 * wave_right), ] ) else: return np.asfortranarray([wave_left, wave_right]) def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): import threading spec_left = np.asfortranarray(spec[0]) spec_right = np.asfortranarray(spec[1]) def run_thread(**kwargs): global wave_left wave_left = librosa.istft(**kwargs) thread = threading.Thread(target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}) thread.start() wave_right = librosa.istft(spec_right, hop_length=hop_length) thread.join() if reverse: return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) elif mid_side: return np.asfortranarray([np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]) elif mid_side_b2: return np.asfortranarray( [ np.add(wave_right / 1.25, 0.4 * wave_left), np.subtract(wave_left / 1.25, 0.4 * wave_right), ] ) else: return np.asfortranarray([wave_left, wave_right]) def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): wave_band = {} bands_n = len(mp.param["band"]) offset = 0 for d in range(1, bands_n + 1): bp = mp.param["band"][d] spec_s = np.ndarray(shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex) h = bp["crop_stop"] - bp["crop_start"] spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[:, offset : offset + h, :] offset += h if d == bands_n: # higher if extra_bins_h: # if --high_end_process bypass max_bin = bp["n_fft"] // 2 spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[:, :extra_bins_h, :] if bp["hpf_start"] > 0: spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) if bands_n == 1: wave = spectrogram_to_wave( spec_s, bp["hl"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ) else: wave = np.add( wave, spectrogram_to_wave( spec_s, bp["hl"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ), ) else: sr = mp.param["band"][d + 1]["sr"] if d == 1: # lower spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) wave = librosa.resample( spectrogram_to_wave( spec_s, bp["hl"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ), orig_sr=bp["sr"], target_sr=sr, res_type="sinc_fastest", ) else: # mid spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) wave2 = np.add( wave, spectrogram_to_wave( spec_s, bp["hl"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ), ) # wave = librosa.core.resample(wave2, orig_sr=bp['sr'], target_sr=sr, res_type="sinc_fastest") wave = librosa.core.resample(wave2, orig_sr=bp["sr"], target_sr=sr, res_type="scipy") return wave.T def fft_lp_filter(spec, bin_start, bin_stop): g = 1.0 for b in range(bin_start, bin_stop): g -= 1 / (bin_stop - bin_start) spec[:, b, :] = g * spec[:, b, :] spec[:, bin_stop:, :] *= 0 return spec def fft_hp_filter(spec, bin_start, bin_stop): g = 1.0 for b in range(bin_start, bin_stop, -1): g -= 1 / (bin_start - bin_stop) spec[:, b, :] = g * spec[:, b, :] spec[:, 0 : bin_stop + 1, :] *= 0 return spec def mirroring(a, spec_m, input_high_end, mp): if "mirroring" == a: mirror = np.flip( np.abs( spec_m[ :, mp.param["pre_filter_start"] - 10 - input_high_end.shape[1] : mp.param["pre_filter_start"] - 10, :, ] ), 1, ) mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) return np.where(np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror) if "mirroring2" == a: mirror = np.flip( np.abs( spec_m[ :, mp.param["pre_filter_start"] - 10 - input_high_end.shape[1] : mp.param["pre_filter_start"] - 10, :, ] ), 1, ) mi = np.multiply(mirror, input_high_end * 1.7) return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) def ensembling(a, specs): for i in range(1, len(specs)): if i == 1: spec = specs[0] ln = min([spec.shape[2], specs[i].shape[2]]) spec = spec[:, :, :ln] specs[i] = specs[i][:, :, :ln] if "min_mag" == a: spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) if "max_mag" == a: spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) return spec def stft(wave, nfft, hl): wave_left = np.asfortranarray(wave[0]) wave_right = np.asfortranarray(wave[1]) spec_left = librosa.stft(wave_left, n_fft=nfft, hop_length=hl) spec_right = librosa.stft(wave_right, n_fft=nfft, hop_length=hl) spec = np.asfortranarray([spec_left, spec_right]) return spec def istft(spec, hl): spec_left = np.asfortranarray(spec[0]) spec_right = np.asfortranarray(spec[1]) wave_left = librosa.istft(spec_left, hop_length=hl) wave_right = librosa.istft(spec_right, hop_length=hl) wave = np.asfortranarray([wave_left, wave_right]) if __name__ == "__main__": import argparse import time import cv2 from model_param_init import ModelParameters p = argparse.ArgumentParser() p.add_argument( "--algorithm", "-a", type=str, choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], default="min_mag", ) p.add_argument( "--model_params", "-m", type=str, default=os.path.join("modelparams", "1band_sr44100_hl512.json"), ) p.add_argument("--output_name", "-o", type=str, default="output") p.add_argument("--vocals_only", "-v", action="store_true") p.add_argument("input", nargs="+") args = p.parse_args() start_time = time.time() if args.algorithm.startswith("invert") and len(args.input) != 2: raise ValueError("There should be two input files.") if not args.algorithm.startswith("invert") and len(args.input) < 2: raise ValueError("There must be at least two input files.") wave, specs = {}, {} mp = ModelParameters(args.model_params) for i in range(len(args.input)): spec = {} for d in range(len(mp.param["band"]), 0, -1): bp = mp.param["band"][d] if d == len(mp.param["band"]): # high-end band wave[d], _ = librosa.load( args.input[i], sr=bp["sr"], mono=False, dtype=np.float32, res_type=bp["res_type"], ) if len(wave[d].shape) == 1: # mono to stereo wave[d] = np.array([wave[d], wave[d]]) else: # lower bands wave[d] = librosa.resample( wave[d + 1], orig_sr=mp.param["band"][d + 1]["sr"], target_sr=bp["sr"], res_type=bp["res_type"], ) spec[d] = wave_to_spectrogram( wave[d], bp["hl"], bp["n_fft"], mp.param["mid_side"], mp.param["mid_side_b2"], mp.param["reverse"], ) specs[i] = combine_spectrograms(spec, mp) del wave if args.algorithm == "deep": d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) v_spec = d_spec - specs[1] sf.write( os.path.join("{}.wav".format(args.output_name)), cmb_spectrogram_to_wave(v_spec, mp), mp.param["sr"], ) if args.algorithm.startswith("invert"): ln = min([specs[0].shape[2], specs[1].shape[2]]) specs[0] = specs[0][:, :, :ln] specs[1] = specs[1][:, :, :ln] if "invert_p" == args.algorithm: X_mag = np.abs(specs[0]) y_mag = np.abs(specs[1]) max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) else: specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) v_spec = specs[0] - specs[1] if not args.vocals_only: X_mag = np.abs(specs[0]) y_mag = np.abs(specs[1]) v_mag = np.abs(v_spec) X_image = spectrogram_to_image(X_mag) y_image = spectrogram_to_image(y_mag) v_image = spectrogram_to_image(v_mag) cv2.imwrite("{}_X.png".format(args.output_name), X_image) cv2.imwrite("{}_y.png".format(args.output_name), y_image) cv2.imwrite("{}_v.png".format(args.output_name), v_image) sf.write( "{}_X.wav".format(args.output_name), cmb_spectrogram_to_wave(specs[0], mp), mp.param["sr"], ) sf.write( "{}_y.wav".format(args.output_name), cmb_spectrogram_to_wave(specs[1], mp), mp.param["sr"], ) sf.write( "{}_v.wav".format(args.output_name), cmb_spectrogram_to_wave(v_spec, mp), mp.param["sr"], ) else: if not args.algorithm == "deep": sf.write( os.path.join("ensembled", "{}.wav".format(args.output_name)), cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), mp.param["sr"], ) if args.algorithm == "align": trackalignment = [ { "file1": '"{}"'.format(args.input[0]), "file2": '"{}"'.format(args.input[1]), } ] for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/model_param_init.py
tools/uvr5/lib/lib_v5/model_param_init.py
import json import pathlib default_param = {} default_param["bins"] = 768 default_param["unstable_bins"] = 9 # training only default_param["reduction_bins"] = 762 # training only default_param["sr"] = 44100 default_param["pre_filter_start"] = 757 default_param["pre_filter_stop"] = 768 default_param["band"] = {} default_param["band"][1] = { "sr": 11025, "hl": 128, "n_fft": 960, "crop_start": 0, "crop_stop": 245, "lpf_start": 61, # inference only "res_type": "polyphase", } default_param["band"][2] = { "sr": 44100, "hl": 512, "n_fft": 1536, "crop_start": 24, "crop_stop": 547, "hpf_start": 81, # inference only "res_type": "sinc_best", } def int_keys(d): r = {} for k, v in d: if k.isdigit(): k = int(k) r[k] = v return r class ModelParameters(object): def __init__(self, config_path=""): if ".pth" == pathlib.Path(config_path).suffix: import zipfile with zipfile.ZipFile(config_path, "r") as zip: self.param = json.loads(zip.read("param.json"), object_pairs_hook=int_keys) elif ".json" == pathlib.Path(config_path).suffix: with open(config_path, "r") as f: self.param = json.loads(f.read(), object_pairs_hook=int_keys) else: self.param = default_param for k in [ "mid_side", "mid_side_b", "mid_side_b2", "stereo_w", "stereo_n", "reverse", ]: if k not in self.param: self.param[k] = False
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_new.py
tools/uvr5/lib/lib_v5/nets_new.py
import torch import torch.nn.functional as F from torch import nn from . import layers_new class BaseNet(nn.Module): def __init__(self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))): super(BaseNet, self).__init__() self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) def __call__(self, x): e1 = self.enc1(x) e2 = self.enc2(e1) e3 = self.enc3(e2) e4 = self.enc4(e3) e5 = self.enc5(e4) h = self.aspp(e5) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = torch.cat([h, self.lstm_dec2(h)], dim=1) h = self.dec1(h, e1) return h class CascadedNet(nn.Module): def __init__(self, n_fft, nout=32, nout_lstm=128): super(CascadedNet, self).__init__() self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.nin_lstm = self.max_bin // 2 self.offset = 64 self.stg1_low_band_net = nn.Sequential( BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), ) self.stg1_high_band_net = BaseNet(2, nout // 4, self.nin_lstm // 2, nout_lstm // 2) self.stg2_low_band_net = nn.Sequential( BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), ) self.stg2_high_band_net = BaseNet(nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2) self.stg3_full_band_net = BaseNet(3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm) self.out = nn.Conv2d(nout, 2, 1, bias=False) self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) def forward(self, x): x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 l1_in = x[:, :, :bandw] h1_in = x[:, :, bandw:] l1 = self.stg1_low_band_net(l1_in) h1 = self.stg1_high_band_net(h1_in) aux1 = torch.cat([l1, h1], dim=2) l2_in = torch.cat([l1_in, l1], dim=1) h2_in = torch.cat([h1_in, h1], dim=1) l2 = self.stg2_low_band_net(l2_in) h2 = self.stg2_high_band_net(h2_in) aux2 = torch.cat([l2, h2], dim=2) f3_in = torch.cat([x, aux1, aux2], dim=1) f3 = self.stg3_full_band_net(f3_in) mask = torch.sigmoid(self.out(f3)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux = torch.cat([aux1, aux2], dim=1) aux = torch.sigmoid(self.aux_out(aux)) aux = F.pad( input=aux, pad=(0, 0, 0, self.output_bin - aux.size()[2]), mode="replicate", ) return mask, aux else: return mask def predict_mask(self, x): mask = self.forward(x) if self.offset > 0: mask = mask[:, :, :, self.offset : -self.offset] assert mask.size()[3] > 0 return mask def predict(self, x, aggressiveness=None): mask = self.forward(x) pred_mag = x * mask if self.offset > 0: pred_mag = pred_mag[:, :, :, self.offset : -self.offset] assert pred_mag.size()[3] > 0 return pred_mag
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/dataset.py
tools/uvr5/lib/lib_v5/dataset.py
import os import random import numpy as np import torch import torch.utils.data from tqdm import tqdm from . import spec_utils class VocalRemoverValidationSet(torch.utils.data.Dataset): def __init__(self, patch_list): self.patch_list = patch_list def __len__(self): return len(self.patch_list) def __getitem__(self, idx): path = self.patch_list[idx] data = np.load(path) X, y = data["X"], data["y"] X_mag = np.abs(X) y_mag = np.abs(y) return X_mag, y_mag def make_pair(mix_dir, inst_dir): input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] X_list = sorted( [os.path.join(mix_dir, fname) for fname in os.listdir(mix_dir) if os.path.splitext(fname)[1] in input_exts] ) y_list = sorted( [os.path.join(inst_dir, fname) for fname in os.listdir(inst_dir) if os.path.splitext(fname)[1] in input_exts] ) filelist = list(zip(X_list, y_list)) return filelist def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): if split_mode == "random": filelist = make_pair( os.path.join(dataset_dir, "mixtures"), os.path.join(dataset_dir, "instruments"), ) random.shuffle(filelist) if len(val_filelist) == 0: val_size = int(len(filelist) * val_rate) train_filelist = filelist[:-val_size] val_filelist = filelist[-val_size:] else: train_filelist = [pair for pair in filelist if list(pair) not in val_filelist] elif split_mode == "subdirs": if len(val_filelist) != 0: raise ValueError("The `val_filelist` option is not available in `subdirs` mode") train_filelist = make_pair( os.path.join(dataset_dir, "training/mixtures"), os.path.join(dataset_dir, "training/instruments"), ) val_filelist = make_pair( os.path.join(dataset_dir, "validation/mixtures"), os.path.join(dataset_dir, "validation/instruments"), ) return train_filelist, val_filelist def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): perm = np.random.permutation(len(X)) for i, idx in enumerate(tqdm(perm)): if np.random.uniform() < reduction_rate: y[idx] = spec_utils.reduce_vocal_aggressively(X[idx], y[idx], reduction_mask) if np.random.uniform() < 0.5: # swap channel X[idx] = X[idx, ::-1] y[idx] = y[idx, ::-1] if np.random.uniform() < 0.02: # mono X[idx] = X[idx].mean(axis=0, keepdims=True) y[idx] = y[idx].mean(axis=0, keepdims=True) if np.random.uniform() < 0.02: # inst X[idx] = y[idx] if np.random.uniform() < mixup_rate and i < len(perm) - 1: lam = np.random.beta(mixup_alpha, mixup_alpha) X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] return X, y def make_padding(width, cropsize, offset): left = offset roi_size = cropsize - left * 2 if roi_size == 0: roi_size = cropsize right = roi_size - (width % roi_size) + left return left, right, roi_size def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): len_dataset = patches * len(filelist) X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) for i, (X_path, y_path) in enumerate(tqdm(filelist)): X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) coef = np.max([np.abs(X).max(), np.abs(y).max()]) X, y = X / coef, y / coef l, r, roi_size = make_padding(X.shape[2], cropsize, offset) X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) ends = starts + cropsize for j in range(patches): idx = i * patches + j X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] return X_dataset, y_dataset def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): patch_list = [] patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(cropsize, sr, hop_length, n_fft, offset) os.makedirs(patch_dir, exist_ok=True) for i, (X_path, y_path) in enumerate(tqdm(filelist)): basename = os.path.splitext(os.path.basename(X_path))[0] X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) coef = np.max([np.abs(X).max(), np.abs(y).max()]) X, y = X / coef, y / coef l, r, roi_size = make_padding(X.shape[2], cropsize, offset) X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") len_dataset = int(np.ceil(X.shape[2] / roi_size)) for j in range(len_dataset): outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) start = j * roi_size if not os.path.exists(outpath): np.savez( outpath, X=X_pad[:, :, start : start + cropsize], y=y_pad[:, :, start : start + cropsize], ) patch_list.append(outpath) return VocalRemoverValidationSet(patch_list)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_61968KB.py
tools/uvr5/lib/lib_v5/nets_61968KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_123821KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 32) self.stg1_high_band_net = BaseASPPNet(2, 32) self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(16, 32) self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(32, 64) self.out = nn.Conv2d(64, 2, 1, bias=False) self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_33966KB.py
tools/uvr5/lib/lib_v5/layers_33966KB.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) feat6 = self.conv6(x) feat7 = self.conv7(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_537227KB.py
tools/uvr5/lib/lib_v5/nets_537227KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_537238KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 64) self.stg1_high_band_net = BaseASPPNet(2, 64) self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(32, 64) self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(64, 128) self.out = nn.Conv2d(128, 2, 1, bias=False) self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_537227KB.py
tools/uvr5/lib/lib_v5/layers_537227KB.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) feat6 = self.conv6(x) feat7 = self.conv7(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_123812KB.py
tools/uvr5/lib/lib_v5/nets_123812KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_123821KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 32) self.stg1_high_band_net = BaseASPPNet(2, 32) self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(16, 32) self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(32, 64) self.out = nn.Conv2d(64, 2, 1, bias=False) self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets.py
tools/uvr5/lib/lib_v5/nets.py
import layers import torch import torch.nn.functional as F from torch import nn class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 16) self.stg1_high_band_net = BaseASPPNet(2, 16) self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(8, 16) self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(16, 32) self.out = nn.Conv2d(32, 2, 1, bias=False) self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_537238KB.py
tools/uvr5/lib/lib_v5/nets_537238KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_537238KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 64) self.stg1_high_band_net = BaseASPPNet(2, 64) self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(32, 64) self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(64, 128) self.out = nn.Conv2d(128, 2, 1, bias=False) self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_123821KB.py
tools/uvr5/lib/lib_v5/nets_123821KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_123821KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 32) self.stg1_high_band_net = BaseASPPNet(2, 32) self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(16, 32) self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(32, 64) self.out = nn.Conv2d(64, 2, 1, bias=False) self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers_537238KB.py
tools/uvr5/lib/lib_v5/layers_537238KB.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv6 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.conv7 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) feat6 = self.conv6(x) feat7 = self.conv7(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/nets_33966KB.py
tools/uvr5/lib/lib_v5/nets_33966KB.py
import torch import torch.nn.functional as F from torch import nn from . import layers_33966KB as layers class BaseASPPNet(nn.Module): def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): super(BaseASPPNet, self).__init__() self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) def __call__(self, x): h, e1 = self.enc1(x) h, e2 = self.enc2(h) h, e3 = self.enc3(h) h, e4 = self.enc4(h) h = self.aspp(h) h = self.dec4(h, e4) h = self.dec3(h, e3) h = self.dec2(h, e2) h = self.dec1(h, e1) return h class CascadedASPPNet(nn.Module): def __init__(self, n_fft): super(CascadedASPPNet, self).__init__() self.stg1_low_band_net = BaseASPPNet(2, 16) self.stg1_high_band_net = BaseASPPNet(2, 16) self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) self.stg2_full_band_net = BaseASPPNet(8, 16) self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) self.stg3_full_band_net = BaseASPPNet(16, 32) self.out = nn.Conv2d(32, 2, 1, bias=False) self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) self.max_bin = n_fft // 2 self.output_bin = n_fft // 2 + 1 self.offset = 128 def forward(self, x, aggressiveness=None): mix = x.detach() x = x.clone() x = x[:, :, : self.max_bin] bandw = x.size()[2] // 2 aux1 = torch.cat( [ self.stg1_low_band_net(x[:, :, :bandw]), self.stg1_high_band_net(x[:, :, bandw:]), ], dim=2, ) h = torch.cat([x, aux1], dim=1) aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) h = torch.cat([x, aux1, aux2], dim=1) h = self.stg3_full_band_net(self.stg3_bridge(h)) mask = torch.sigmoid(self.out(h)) mask = F.pad( input=mask, pad=(0, 0, 0, self.output_bin - mask.size()[2]), mode="replicate", ) if self.training: aux1 = torch.sigmoid(self.aux1_out(aux1)) aux1 = F.pad( input=aux1, pad=(0, 0, 0, self.output_bin - aux1.size()[2]), mode="replicate", ) aux2 = torch.sigmoid(self.aux2_out(aux2)) aux2 = F.pad( input=aux2, pad=(0, 0, 0, self.output_bin - aux2.size()[2]), mode="replicate", ) return mask * mix, aux1 * mix, aux2 * mix else: if aggressiveness: mask[:, :, : aggressiveness["split_bin"]] = torch.pow( mask[:, :, : aggressiveness["split_bin"]], 1 + aggressiveness["value"] / 3, ) mask[:, :, aggressiveness["split_bin"] :] = torch.pow( mask[:, :, aggressiveness["split_bin"] :], 1 + aggressiveness["value"], ) return mask * mix def predict(self, x_mag, aggressiveness=None): h = self.forward(x_mag, aggressiveness) if self.offset > 0: h = h[:, :, :, self.offset : -self.offset] assert h.size()[3] > 0 return h
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/tools/uvr5/lib/lib_v5/layers.py
tools/uvr5/lib/lib_v5/layers.py
import torch import torch.nn.functional as F from torch import nn from . import spec_utils class Conv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(Conv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nout, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, bias=False, ), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class SeperableConv2DBNActiv(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): super(SeperableConv2DBNActiv, self).__init__() self.conv = nn.Sequential( nn.Conv2d( nin, nin, kernel_size=ksize, stride=stride, padding=pad, dilation=dilation, groups=nin, bias=False, ), nn.Conv2d(nin, nout, kernel_size=1, bias=False), nn.BatchNorm2d(nout), activ(), ) def __call__(self, x): return self.conv(x) class Encoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): super(Encoder, self).__init__() self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) def __call__(self, x): skip = self.conv1(x) h = self.conv2(skip) return h, skip class Decoder(nn.Module): def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): super(Decoder, self).__init__() self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) self.dropout = nn.Dropout2d(0.1) if dropout else None def __call__(self, x, skip=None): x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) if skip is not None: skip = spec_utils.crop_center(skip, x) x = torch.cat([x, skip], dim=1) h = self.conv(x) if self.dropout is not None: h = self.dropout(h) return h class ASPPModule(nn.Module): def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): super(ASPPModule, self).__init__() self.conv1 = nn.Sequential( nn.AdaptiveAvgPool2d((1, None)), Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), ) self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) self.conv3 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) self.conv4 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) self.conv5 = SeperableConv2DBNActiv(nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) self.bottleneck = nn.Sequential(Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)) def forward(self, x): _, _, h, w = x.size() feat1 = F.interpolate(self.conv1(x), size=(h, w), mode="bilinear", align_corners=True) feat2 = self.conv2(x) feat3 = self.conv3(x) feat4 = self.conv4(x) feat5 = self.conv5(x) out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) bottle = self.bottleneck(out) return bottle
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/export_torch_script_v3v4.py
GPT_SoVITS/export_torch_script_v3v4.py
import os from export_torch_script import ( T2SModel, get_raw_t2s_model, resamplex, spectrogram_torch, ) from f5_tts.model.backbones.dit import DiT from inference_webui import get_phones_and_bert import librosa from module import commons from module.mel_processing import mel_spectrogram_torch from module.models_onnx import CFM, Generator, SynthesizerTrnV3 import numpy as np import torch._dynamo.config import torchaudio import logging import uvicorn import torch import soundfile from librosa.filters import mel as librosa_mel_fn from inference_webui import get_spepc, norm_spec, resample, ssl_model logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG) logger = logging.getLogger("uvicorn") is_half = True device = "cuda" if torch.cuda.is_available() else "cpu" now_dir = os.getcwd() class MelSpectrgram(torch.nn.Module): def __init__( self, dtype, device, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, ): super().__init__() self.hann_window = torch.hann_window(win_size).to(device=device, dtype=dtype) mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) self.mel_basis = torch.from_numpy(mel).to(dtype=dtype, device=device) self.n_fft: int = n_fft self.hop_size: int = hop_size self.win_size: int = win_size self.center: bool = center def forward(self, y): y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.n_fft - self.hop_size) / 2), int((self.n_fft - self.hop_size) / 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_size, win_length=self.win_size, window=self.hann_window, center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False, ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) spec = torch.matmul(self.mel_basis, spec) # spec = spectral_normalize_torch(spec) spec = torch.log(torch.clamp(spec, min=1e-5)) return spec class ExportDitBlocks(torch.nn.Module): def __init__(self, dit: DiT): super().__init__() self.transformer_blocks = dit.transformer_blocks self.norm_out = dit.norm_out self.proj_out = dit.proj_out self.depth = dit.depth def forward(self, x, t, mask, rope): for block in self.transformer_blocks: x = block(x, t, mask=mask, rope=(rope, 1.0)) x = self.norm_out(x, t) output = self.proj_out(x) return output class ExportDitEmbed(torch.nn.Module): def __init__(self, dit: DiT): super().__init__() self.time_embed = dit.time_embed self.d_embed = dit.d_embed self.text_embed = dit.text_embed self.input_embed = dit.input_embed self.rotary_embed = dit.rotary_embed self.rotary_embed.inv_freq.to(device) def forward( self, x0: torch.Tensor, # nosied input audio # noqa: F722 cond0: torch.Tensor, # masked cond audio # noqa: F722 x_lens: torch.Tensor, time: torch.Tensor, # time step # noqa: F821 F722 dt_base_bootstrap: torch.Tensor, text0: torch.Tensor, # noqa: F722#####condition feature ): x = x0.transpose(2, 1) cond = cond0.transpose(2, 1) text = text0.transpose(2, 1) mask = commons.sequence_mask(x_lens, max_length=x.size(1)).to(x.device) t = self.time_embed(time) + self.d_embed(dt_base_bootstrap) text_embed = self.text_embed(text, x.shape[1]) rope_t = torch.arange(x.shape[1], device=device) rope, _ = self.rotary_embed(rope_t) x = self.input_embed(x, cond, text_embed) return x, t, mask, rope class ExportDiT(torch.nn.Module): def __init__(self, dit: DiT): super().__init__() if dit != None: self.embed = ExportDitEmbed(dit) self.blocks = ExportDitBlocks(dit) else: self.embed = None self.blocks = None def forward( # x, prompt_x, x_lens, t, style,cond self, # d is channel,n is T x0: torch.Tensor, # nosied input audio # noqa: F722 cond0: torch.Tensor, # masked cond audio # noqa: F722 x_lens: torch.Tensor, time: torch.Tensor, # time step # noqa: F821 F722 dt_base_bootstrap: torch.Tensor, text0: torch.Tensor, # noqa: F722#####condition feature ): x, t, mask, rope = self.embed(x0, cond0, x_lens, time, dt_base_bootstrap, text0) output = self.blocks(x, t, mask, rope) return output class ExportCFM(torch.nn.Module): def __init__(self, cfm: CFM): super().__init__() self.cfm = cfm def forward( self, fea_ref: torch.Tensor, fea_todo_chunk: torch.Tensor, mel2: torch.Tensor, sample_steps: torch.LongTensor, ): T_min = fea_ref.size(2) fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1) cfm_res = self.cfm(fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps) cfm_res = cfm_res[:, :, mel2.shape[2] :] mel2 = cfm_res[:, :, -T_min:] fea_ref = fea_todo_chunk[:, :, -T_min:] return cfm_res, fea_ref, mel2 mel_fn = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1024, "win_size": 1024, "hop_size": 256, "num_mels": 100, "sampling_rate": 24000, "fmin": 0, "fmax": None, "center": False, }, ) mel_fn_v4 = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1280, "win_size": 1280, "hop_size": 320, "num_mels": 100, "sampling_rate": 32000, "fmin": 0, "fmax": None, "center": False, }, ) spec_min = -12 spec_max = 2 @torch.jit.script def norm_spec(x): spec_min = -12 spec_max = 2 return (x - spec_min) / (spec_max - spec_min) * 2 - 1 def denorm_spec(x): spec_min = -12 spec_max = 2 return (x + 1) / 2 * (spec_max - spec_min) + spec_min class ExportGPTSovitsHalf(torch.nn.Module): def __init__(self, hps, t2s_m: T2SModel, vq_model: SynthesizerTrnV3): super().__init__() self.hps = hps self.t2s_m = t2s_m self.vq_model = vq_model self.mel2 = MelSpectrgram( dtype=torch.float32, device=device, n_fft=1024, num_mels=100, sampling_rate=24000, hop_size=256, win_size=1024, fmin=0, fmax=None, center=False, ) # self.dtype = dtype self.filter_length: int = hps.data.filter_length self.sampling_rate: int = hps.data.sampling_rate self.hop_length: int = hps.data.hop_length self.win_length: int = hps.data.win_length self.hann_window = torch.hann_window(self.win_length, device=device, dtype=torch.float32) def forward( self, ssl_content, ref_audio_32k: torch.FloatTensor, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k, ): refer = spectrogram_torch( self.hann_window, ref_audio_32k, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False, ).to(ssl_content.dtype) codes = self.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompt = prompt_semantic.unsqueeze(0) # print('extract_latent',codes.shape,datetime.now().strftime("%Y-%m-%d %H:%M:%S")) pred_semantic = self.t2s_m(prompt, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k) # print('t2s_m',pred_semantic.shape,datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ge = self.vq_model.create_ge(refer) # print('create_ge',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) prompt_ = prompt.unsqueeze(0) fea_ref = self.vq_model(prompt_, phoneme_ids0, ge) # print('fea_ref',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # print(prompt_.shape, phoneme_ids0.shape, ge.shape) # print(fea_ref.shape) ref_24k = resamplex(ref_audio_32k, 32000, 24000) mel2 = norm_spec(self.mel2(ref_24k)).to(ssl_content.dtype) T_min = min(mel2.shape[2], fea_ref.shape[2]) mel2 = mel2[:, :, :T_min] fea_ref = fea_ref[:, :, :T_min] if T_min > 468: mel2 = mel2[:, :, -468:] fea_ref = fea_ref[:, :, -468:] T_min = 468 fea_todo = self.vq_model(pred_semantic, phoneme_ids1, ge) # print('fea_todo',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # print(pred_semantic.shape, phoneme_ids1.shape, ge.shape) # print(fea_todo.shape) return fea_ref, fea_todo, mel2 class ExportGPTSovitsV4Half(torch.nn.Module): def __init__(self, hps, t2s_m: T2SModel, vq_model: SynthesizerTrnV3): super().__init__() self.hps = hps self.t2s_m = t2s_m self.vq_model = vq_model self.mel2 = MelSpectrgram( dtype=torch.float32, device=device, n_fft=1280, num_mels=100, sampling_rate=32000, hop_size=320, win_size=1280, fmin=0, fmax=None, center=False, ) # self.dtype = dtype self.filter_length: int = hps.data.filter_length self.sampling_rate: int = hps.data.sampling_rate self.hop_length: int = hps.data.hop_length self.win_length: int = hps.data.win_length self.hann_window = torch.hann_window(self.win_length, device=device, dtype=torch.float32) def forward( self, ssl_content, ref_audio_32k: torch.FloatTensor, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k, ): refer = spectrogram_torch( self.hann_window, ref_audio_32k, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, center=False, ).to(ssl_content.dtype) codes = self.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompt = prompt_semantic.unsqueeze(0) # print('extract_latent',codes.shape,datetime.now().strftime("%Y-%m-%d %H:%M:%S")) pred_semantic = self.t2s_m(prompt, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k) # print('t2s_m',pred_semantic.shape,datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ge = self.vq_model.create_ge(refer) # print('create_ge',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) prompt_ = prompt.unsqueeze(0) fea_ref = self.vq_model(prompt_, phoneme_ids0, ge) # print('fea_ref',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # print(prompt_.shape, phoneme_ids0.shape, ge.shape) # print(fea_ref.shape) ref_32k = ref_audio_32k mel2 = norm_spec(self.mel2(ref_32k)).to(ssl_content.dtype) T_min = min(mel2.shape[2], fea_ref.shape[2]) mel2 = mel2[:, :, :T_min] fea_ref = fea_ref[:, :, :T_min] if T_min > 500: mel2 = mel2[:, :, -500:] fea_ref = fea_ref[:, :, -500:] T_min = 500 fea_todo = self.vq_model(pred_semantic, phoneme_ids1, ge) # print('fea_todo',datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # print(pred_semantic.shape, phoneme_ids1.shape, ge.shape) # print(fea_todo.shape) return fea_ref, fea_todo, mel2 class GPTSoVITSV3(torch.nn.Module): def __init__(self, gpt_sovits_half, cfm, bigvgan): super().__init__() self.gpt_sovits_half = gpt_sovits_half self.cfm = cfm self.bigvgan = bigvgan def forward( self, ssl_content, ref_audio_32k: torch.FloatTensor, phoneme_ids0: torch.LongTensor, phoneme_ids1: torch.LongTensor, bert1, bert2, top_k: torch.LongTensor, sample_steps: torch.LongTensor, ): # current_time = datetime.now() # print("gpt_sovits_half",current_time.strftime("%Y-%m-%d %H:%M:%S")) fea_ref, fea_todo, mel2 = self.gpt_sovits_half( ssl_content, ref_audio_32k, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k ) chunk_len = 934 - fea_ref.shape[2] wav_gen_list = [] idx = 0 fea_todo = fea_todo[:, :, :-5] wav_gen_length = fea_todo.shape[2] * 256 while 1: # current_time = datetime.now() # print("idx:",idx,current_time.strftime("%Y-%m-%d %H:%M:%S")) fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len] if fea_todo_chunk.shape[-1] == 0: break # 因为导出的模型在不同shape时会重新编译还是怎么的,会卡顿10s这样, # 所以在这里补0让他shape维持不变 # 但是这样会导致生成的音频长度不对,所以在最后截取一下。 # 经过 bigvgan 之后音频长度就是 fea_todo.shape[2] * 256 complete_len = chunk_len - fea_todo_chunk.shape[-1] if complete_len != 0: fea_todo_chunk = torch.cat( [ fea_todo_chunk, torch.zeros(1, 512, complete_len).to(fea_todo_chunk.device).to(fea_todo_chunk.dtype), ], 2, ) cfm_res, fea_ref, mel2 = self.cfm(fea_ref, fea_todo_chunk, mel2, sample_steps) idx += chunk_len cfm_res = denorm_spec(cfm_res) bigvgan_res = self.bigvgan(cfm_res) wav_gen_list.append(bigvgan_res) wav_gen = torch.cat(wav_gen_list, 2) return wav_gen[0][0][:wav_gen_length] class GPTSoVITSV4(torch.nn.Module): def __init__(self, gpt_sovits_half, cfm, hifigan): super().__init__() self.gpt_sovits_half = gpt_sovits_half self.cfm = cfm self.hifigan = hifigan def forward( self, ssl_content, ref_audio_32k: torch.FloatTensor, phoneme_ids0: torch.LongTensor, phoneme_ids1: torch.LongTensor, bert1, bert2, top_k: torch.LongTensor, sample_steps: torch.LongTensor, ): # current_time = datetime.now() # print("gpt_sovits_half",current_time.strftime("%Y-%m-%d %H:%M:%S")) fea_ref, fea_todo, mel2 = self.gpt_sovits_half( ssl_content, ref_audio_32k, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k ) chunk_len = 1000 - fea_ref.shape[2] wav_gen_list = [] idx = 0 fea_todo = fea_todo[:, :, :-10] wav_gen_length = fea_todo.shape[2] * 480 while 1: # current_time = datetime.now() # print("idx:",idx,current_time.strftime("%Y-%m-%d %H:%M:%S")) fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len] if fea_todo_chunk.shape[-1] == 0: break # 因为导出的模型在不同shape时会重新编译还是怎么的,会卡顿10s这样, # 所以在这里补0让他shape维持不变 # 但是这样会导致生成的音频长度不对,所以在最后截取一下。 # 经过 hifigan 之后音频长度就是 fea_todo.shape[2] * 480 complete_len = chunk_len - fea_todo_chunk.shape[-1] if complete_len != 0: fea_todo_chunk = torch.cat( [ fea_todo_chunk, torch.zeros(1, 512, complete_len).to(fea_todo_chunk.device).to(fea_todo_chunk.dtype), ], 2, ) cfm_res, fea_ref, mel2 = self.cfm(fea_ref, fea_todo_chunk, mel2, sample_steps) idx += chunk_len cfm_res = denorm_spec(cfm_res) hifigan_res = self.hifigan(cfm_res) wav_gen_list.append(hifigan_res) wav_gen = torch.cat(wav_gen_list, 2) return wav_gen[0][0][:wav_gen_length] def init_bigvgan(): global bigvgan_model from BigVGAN import bigvgan bigvgan_model = bigvgan.BigVGAN.from_pretrained( "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False, ) # if True, RuntimeError: Ninja is required to load C++ extensions # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() bigvgan_model = bigvgan_model.eval() if is_half == True: bigvgan_model = bigvgan_model.half().to(device) else: bigvgan_model = bigvgan_model.to(device) def init_hifigan(): global hifigan_model, bigvgan_model hifigan_model = Generator( initial_channel=100, resblock="1", resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], upsample_rates=[10, 6, 2, 2, 2], upsample_initial_channel=512, upsample_kernel_sizes=[20, 12, 4, 4, 4], gin_channels=0, is_bias=True, ) hifigan_model.eval() hifigan_model.remove_weight_norm() state_dict_g = torch.load( "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), map_location="cpu" ) print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) if is_half == True: hifigan_model = hifigan_model.half().to(device) else: hifigan_model = hifigan_model.to(device) class Sovits: def __init__(self, vq_model: SynthesizerTrnV3, cfm: CFM, hps): self.vq_model = vq_model self.hps = hps cfm.estimator = ExportDiT(cfm.estimator) self.cfm = cfm class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) for key, value in input_dict.items(): if isinstance(value, dict): value = DictToAttrRecursive(value) self[key] = value setattr(self, key, value) def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def __setattr__(self, key, value): if isinstance(value, dict): value = DictToAttrRecursive(value) super(DictToAttrRecursive, self).__setitem__(key, value) super().__setattr__(key, value) def __delattr__(self, item): try: del self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new v3v4set = {"v3", "v4"} def get_sovits_weights(sovits_path): path_sovits_v3 = "GPT_SoVITS/pretrained_models/s2Gv3.pth" is_exist_s2gv3 = os.path.exists(path_sovits_v3) version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) if if_lora_v3 == True and is_exist_s2gv3 == False: logger.info("SoVITS V3 底模缺失,无法加载相应 LoRA 权重") dict_s2 = load_sovits_new(sovits_path) hps = dict_s2["config"] hps = DictToAttrRecursive(hps) hps.model.semantic_frame_rate = "25hz" if "enc_p.text_embedding.weight" not in dict_s2["weight"]: hps.model.version = "v2" # v3model,v2sybomls elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: hps.model.version = "v1" else: hps.model.version = "v2" if model_version in v3v4set: hps.model.version = model_version logger.info(f"hps: {hps}") vq_model = SynthesizerTrnV3( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) # init_bigvgan() model_version = hps.model.version logger.info(f"模型版本: {model_version}") if is_half == True: vq_model = vq_model.half().to(device) else: vq_model = vq_model.to(device) vq_model.load_state_dict(dict_s2["weight"], strict=False) vq_model.eval() cfm = vq_model.cfm del vq_model.cfm sovits = Sovits(vq_model, cfm, hps) return sovits logger.info(f"torch version {torch.__version__}") # ssl_model = cnhubert.get_model() # if is_half: # ssl_model = ssl_model.half().to(device) # else: # ssl_model = ssl_model.to(device) def export_cfm( e_cfm: ExportCFM, mu: torch.Tensor, x_lens: torch.LongTensor, prompt: torch.Tensor, n_timesteps: torch.IntTensor, temperature=1.0, ): cfm = e_cfm.cfm B, T = mu.size(0), mu.size(1) x = torch.randn([B, cfm.in_channels, T], device=mu.device, dtype=mu.dtype) * temperature print("x:", x.shape, x.dtype) prompt_len = prompt.size(-1) prompt_x = torch.zeros_like(x, dtype=mu.dtype) prompt_x[..., :prompt_len] = prompt[..., :prompt_len] x[..., :prompt_len] = 0.0 mu = mu.transpose(2, 1) ntimestep = int(n_timesteps) t = torch.tensor(0.0, dtype=x.dtype, device=x.device) d = torch.tensor(1.0 / ntimestep, dtype=x.dtype, device=x.device) t_tensor = torch.ones(x.shape[0], device=x.device, dtype=mu.dtype) * t d_tensor = torch.ones(x.shape[0], device=x.device, dtype=mu.dtype) * d print( "cfm input shapes:", x.shape, prompt_x.shape, x_lens.shape, t_tensor.shape, d_tensor.shape, mu.shape, ) print("cfm input dtypes:", x.dtype, prompt_x.dtype, x_lens.dtype, t_tensor.dtype, d_tensor.dtype, mu.dtype) estimator: ExportDiT = torch.jit.trace( cfm.estimator, optimize=True, example_inputs=(x, prompt_x, x_lens, t_tensor, d_tensor, mu), ) estimator.save("onnx/ad/estimator.pt") # torch.onnx.export( # cfm.estimator, # (x, prompt_x, x_lens, t_tensor, d_tensor, mu), # "onnx/ad/dit.onnx", # input_names=["x", "prompt_x", "x_lens", "t", "d", "mu"], # output_names=["output"], # dynamic_axes={ # "x": [2], # "prompt_x": [2], # "mu": [2], # }, # ) print("save estimator ok") cfm.estimator = estimator export_cfm = torch.jit.script(e_cfm) export_cfm.save("onnx/ad/cfm.pt") # sovits.cfm = cfm # cfm.save("onnx/ad/cfm.pt") return export_cfm def export_1(ref_wav_path, ref_wav_text, version="v3"): if version == "v3": sovits = get_sovits_weights("GPT_SoVITS/pretrained_models/s2Gv3.pth") init_bigvgan() else: sovits = get_sovits_weights("GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth") init_hifigan() dict_s1 = torch.load("GPT_SoVITS/pretrained_models/s1v3.ckpt") raw_t2s = get_raw_t2s_model(dict_s1).to(device) print("#### get_raw_t2s_model ####") print(raw_t2s.config) if is_half: raw_t2s = raw_t2s.half().to(device) t2s_m = T2SModel(raw_t2s) t2s_m.eval() script_t2s = torch.jit.script(t2s_m).to(device) hps = sovits.hps # ref_wav_path = "onnx/ad/ref.wav" speed = 1.0 sample_steps = 8 dtype = torch.float16 if is_half == True else torch.float32 refer = get_spepc(hps, ref_wav_path).to(device).to(dtype) zero_wav = np.zeros( int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32, ) with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: wav16k = wav16k.to(device) zero_wav_torch = zero_wav_torch.to(device) wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = sovits.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompt = prompt_semantic.unsqueeze(0).to(device) # phones1, bert1, norm_text1 = get_phones_and_bert( # "你这老坏蛋,我找了你这么久,真没想到在这里找到你。他说。", "all_zh", "v3" # ) phones1, bert1, norm_text1 = get_phones_and_bert(ref_wav_text, "auto", "v3") phones2, bert2, norm_text2 = get_phones_and_bert( "这是一个简单的示例,真没想到这么简单就完成了。The King and His Stories.Once there was a king. He likes to write stories, but his stories were not good. As people were afraid of him, they all said his stories were good.After reading them, the writer at once turned to the soldiers and said: Take me back to prison, please.", "auto", "v3", ) phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0) phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) # codes = sovits.vq_model.extract_latent(ssl_content) # prompt_semantic = codes[0, 0] # prompts = prompt_semantic.unsqueeze(0) top_k = torch.LongTensor([15]).to(device) print("topk", top_k) bert1 = bert1.T.to(device) bert2 = bert2.T.to(device) print( prompt.dtype, phoneme_ids0.dtype, phoneme_ids1.dtype, bert1.dtype, bert2.dtype, top_k.dtype, ) print( prompt.shape, phoneme_ids0.shape, phoneme_ids1.shape, bert1.shape, bert2.shape, top_k.shape, ) pred_semantic = t2s_m(prompt, phoneme_ids0, phoneme_ids1, bert1, bert2, top_k) ge = sovits.vq_model.create_ge(refer) prompt_ = prompt.unsqueeze(0) torch._dynamo.mark_dynamic(prompt_, 2) torch._dynamo.mark_dynamic(phoneme_ids0, 1) fea_ref = sovits.vq_model(prompt_, phoneme_ids0, ge) inputs = { "forward": (prompt_, phoneme_ids0, ge), "extract_latent": ssl_content, "create_ge": refer, } trace_vq_model = torch.jit.trace_module(sovits.vq_model, inputs, optimize=True) trace_vq_model.save("onnx/ad/vq_model.pt") print(fea_ref.shape, fea_ref.dtype, ge.shape) print(prompt_.shape, phoneme_ids0.shape, ge.shape) # vq_model = torch.jit.trace( # sovits.vq_model, # optimize=True, # # strict=False, # example_inputs=(prompt_, phoneme_ids0, ge), # ) # vq_model = sovits.vq_model vq_model = trace_vq_model if version == "v3": gpt_sovits_half = ExportGPTSovitsHalf(sovits.hps, script_t2s, trace_vq_model) torch.jit.script(gpt_sovits_half).save("onnx/ad/gpt_sovits_v3_half.pt") else: gpt_sovits_half = ExportGPTSovitsV4Half(sovits.hps, script_t2s, trace_vq_model) torch.jit.script(gpt_sovits_half).save("onnx/ad/gpt_sovits_v4_half.pt") ref_audio, sr = torchaudio.load(ref_wav_path) ref_audio = ref_audio.to(device).float() if ref_audio.shape[0] == 2: ref_audio = ref_audio.mean(0).unsqueeze(0) tgt_sr = 24000 if version == "v3" else 32000 if sr != tgt_sr: ref_audio = resample(ref_audio, sr, tgt_sr) # mel2 = mel_fn(ref_audio) mel2 = mel_fn(ref_audio) if version == "v3" else mel_fn_v4(ref_audio) mel2 = norm_spec(mel2) T_min = min(mel2.shape[2], fea_ref.shape[2]) fea_ref = fea_ref[:, :, :T_min] print("fea_ref:", fea_ref.shape, T_min) Tref = 468 if version == "v3" else 500 Tchunk = 934 if version == "v3" else 1000 if T_min > Tref: mel2 = mel2[:, :, -Tref:] fea_ref = fea_ref[:, :, -Tref:] T_min = Tref chunk_len = Tchunk - T_min mel2 = mel2.to(dtype) # fea_todo, ge = sovits.vq_model(pred_semantic,y_lengths, phoneme_ids1, ge) fea_todo = vq_model(pred_semantic, phoneme_ids1, ge) cfm_resss = [] idx = 0 sample_steps = torch.LongTensor([sample_steps]).to(device) export_cfm_ = ExportCFM(sovits.cfm) while 1: print("idx:", idx) fea_todo_chunk = fea_todo[:, :, idx : idx + chunk_len] if fea_todo_chunk.shape[-1] == 0: break print( "export_cfm:", fea_ref.shape, fea_todo_chunk.shape, mel2.shape, sample_steps.shape, ) if idx == 0: fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1) export_cfm_ = export_cfm( export_cfm_, fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, ) # torch.onnx.export( # export_cfm_, # ( # fea_ref, # fea_todo_chunk, # mel2, # sample_steps, # ), # "onnx/ad/cfm.onnx", # input_names=["fea_ref", "fea_todo_chunk", "mel2", "sample_steps"], # output_names=["cfm_res", "fea_ref_", "mel2_"], # dynamic_axes={ # "fea_ref": [2], # "fea_todo_chunk": [2], # "mel2": [2], # }, # ) idx += chunk_len cfm_res, fea_ref, mel2 = export_cfm_(fea_ref, fea_todo_chunk, mel2, sample_steps) cfm_resss.append(cfm_res) continue cmf_res = torch.cat(cfm_resss, 2) cmf_res = denorm_spec(cmf_res).to(device) print("cmf_res:", cmf_res.shape, cmf_res.dtype) with torch.inference_mode(): cmf_res_rand = torch.randn(1, 100, 934).to(device).to(dtype) torch._dynamo.mark_dynamic(cmf_res_rand, 2) if version == "v3": bigvgan_model_ = torch.jit.trace(bigvgan_model, optimize=True, example_inputs=(cmf_res_rand,)) bigvgan_model_.save("onnx/ad/bigvgan_model.pt") wav_gen = bigvgan_model(cmf_res) else: hifigan_model_ = torch.jit.trace(hifigan_model, optimize=True, example_inputs=(cmf_res_rand,)) hifigan_model_.save("onnx/ad/hifigan_model.pt") wav_gen = hifigan_model(cmf_res) print("wav_gen:", wav_gen.shape, wav_gen.dtype) audio = wav_gen[0][0].cpu().detach().numpy() sr = 24000 if version == "v3" else 48000 soundfile.write("out.export.wav", (audio * 32768).astype(np.int16), sr) from datetime import datetime def test_export( todo_text, gpt_sovits_v3_half, cfm, bigvgan, output, ): # hps = sovits.hps ref_wav_path = "onnx/ad/ref.wav" speed = 1.0 sample_steps = 8 dtype = torch.float16 if is_half == True else torch.float32 zero_wav = np.zeros( int(16000 * 0.3), dtype=np.float16 if is_half == True else np.float32, ) with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: wav16k = wav16k.half().to(device) zero_wav_torch = zero_wav_torch.half().to(device) else: wav16k = wav16k.to(device) zero_wav_torch = zero_wav_torch.to(device) wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() ref_audio_32k, _ = librosa.load(ref_wav_path, sr=32000) ref_audio_32k = torch.from_numpy(ref_audio_32k).unsqueeze(0).to(device).float() phones1, bert1, norm_text1 = get_phones_and_bert( "你这老坏蛋,我找了你这么久,真没想到在这里找到你。他说。", "all_zh", "v3" ) phones2, bert2, norm_text2 = get_phones_and_bert( todo_text, "zh", "v3", ) phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0) phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) bert1 = bert1.T.to(device) bert2 = bert2.T.to(device) top_k = torch.LongTensor([15]).to(device) current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") logger.info("start inference %s", current_time) print( ssl_content.shape, ref_audio_32k.shape, phoneme_ids0.shape,
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/inference_webui_fast.py
GPT_SoVITS/inference_webui_fast.py
""" 按中英混合识别 按日英混合识别 多语种启动切分识别语种 全部按中文识别 全部按英文识别 全部按日文识别 """ import psutil import os def set_high_priority(): """把当前 Python 进程设为 HIGH_PRIORITY_CLASS""" if os.name != "nt": return # 仅 Windows 有效 p = psutil.Process(os.getpid()) try: p.nice(psutil.HIGH_PRIORITY_CLASS) print("已将进程优先级设为 High") except psutil.AccessDenied: print("权限不足,无法修改优先级(请用管理员运行)") set_high_priority() import json import logging import os import random import re import sys import torch now_dir = os.getcwd() sys.path.append(now_dir) sys.path.append("%s/GPT_SoVITS" % (now_dir)) logging.getLogger("markdown_it").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) logging.getLogger("httpcore").setLevel(logging.ERROR) logging.getLogger("httpx").setLevel(logging.ERROR) logging.getLogger("asyncio").setLevel(logging.ERROR) logging.getLogger("charset_normalizer").setLevel(logging.ERROR) logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) is_share = os.environ.get("is_share", "False") is_share = eval(is_share) if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() gpt_path = os.environ.get("gpt_path", None) sovits_path = os.environ.get("sovits_path", None) cnhubert_base_path = os.environ.get("cnhubert_base_path", None) bert_path = os.environ.get("bert_path", None) version = model_version = os.environ.get("version", "v2") import gradio as gr from TTS_infer_pack.text_segmentation_method import get_method from TTS_infer_pack.TTS import NO_PROMPT_ERROR, TTS, TTS_Config from tools.assets import css, js, top_html from tools.i18n.i18n import I18nAuto, scan_language_list language = os.environ.get("language", "Auto") language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language i18n = I18nAuto(language=language) # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 if torch.cuda.is_available(): device = "cuda" # elif torch.backends.mps.is_available(): # device = "mps" else: device = "cpu" # is_half = False # device = "cpu" dict_language_v1 = { i18n("中文"): "all_zh", # 全部按中文识别 i18n("英文"): "en", # 全部按英文识别#######不变 i18n("日文"): "all_ja", # 全部按日文识别 i18n("中英混合"): "zh", # 按中英混合识别####不变 i18n("日英混合"): "ja", # 按日英混合识别####不变 i18n("多语种混合"): "auto", # 多语种启动切分识别语种 } dict_language_v2 = { i18n("中文"): "all_zh", # 全部按中文识别 i18n("英文"): "en", # 全部按英文识别#######不变 i18n("日文"): "all_ja", # 全部按日文识别 i18n("粤语"): "all_yue", # 全部按中文识别 i18n("韩文"): "all_ko", # 全部按韩文识别 i18n("中英混合"): "zh", # 按中英混合识别####不变 i18n("日英混合"): "ja", # 按日英混合识别####不变 i18n("粤英混合"): "yue", # 按粤英混合识别####不变 i18n("韩英混合"): "ko", # 按韩英混合识别####不变 i18n("多语种混合"): "auto", # 多语种启动切分识别语种 i18n("多语种混合(粤语)"): "auto_yue", # 多语种启动切分识别语种 } dict_language = dict_language_v1 if version == "v1" else dict_language_v2 cut_method = { i18n("不切"): "cut0", i18n("凑四句一切"): "cut1", i18n("凑50字一切"): "cut2", i18n("按中文句号。切"): "cut3", i18n("按英文句号.切"): "cut4", i18n("按标点符号切"): "cut5", } from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path SoVITS_names, GPT_names = get_weights_names() from config import pretrained_sovits_name path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] is_exist_s2gv3 = os.path.exists(path_sovits_v3) is_exist_s2gv4 = os.path.exists(path_sovits_v4) tts_config = TTS_Config("GPT_SoVITS/configs/tts_infer.yaml") tts_config.device = device tts_config.is_half = is_half # tts_config.version = version tts_config.update_version(version) if gpt_path is not None: if "!" in gpt_path or "!" in gpt_path: gpt_path = name2gpt_path[gpt_path] tts_config.t2s_weights_path = gpt_path if sovits_path is not None: if "!" in sovits_path or "!" in sovits_path: sovits_path = name2sovits_path[sovits_path] tts_config.vits_weights_path = sovits_path if cnhubert_base_path is not None: tts_config.cnhuhbert_base_path = cnhubert_base_path if bert_path is not None: tts_config.bert_base_path = bert_path print(tts_config) tts_pipeline = TTS(tts_config) gpt_path = tts_config.t2s_weights_path sovits_path = tts_config.vits_weights_path version = tts_config.version def inference( text, text_lang, ref_audio_path, aux_ref_audio_paths, prompt_text, prompt_lang, top_k, top_p, temperature, text_split_method, batch_size, speed_factor, ref_text_free, split_bucket, fragment_interval, seed, keep_random, parallel_infer, repetition_penalty, sample_steps, super_sampling, ): seed = -1 if keep_random else seed actual_seed = seed if seed not in [-1, "", None] else random.randint(0, 2**32 - 1) inputs = { "text": text, "text_lang": dict_language[text_lang], "ref_audio_path": ref_audio_path, "aux_ref_audio_paths": [item.name for item in aux_ref_audio_paths] if aux_ref_audio_paths is not None else [], "prompt_text": prompt_text if not ref_text_free else "", "prompt_lang": dict_language[prompt_lang], "top_k": top_k, "top_p": top_p, "temperature": temperature, "text_split_method": cut_method[text_split_method], "batch_size": int(batch_size), "speed_factor": float(speed_factor), "split_bucket": split_bucket, "return_fragment": False, "fragment_interval": fragment_interval, "seed": actual_seed, "parallel_infer": parallel_infer, "repetition_penalty": repetition_penalty, "sample_steps": int(sample_steps), "super_sampling": super_sampling, } try: for item in tts_pipeline.run(inputs): yield item, actual_seed except NO_PROMPT_ERROR: gr.Warning(i18n("V3不支持无参考文本模式,请填写参考文本!")) def custom_sort_key(s): # 使用正则表达式提取字符串中的数字部分和非数字部分 parts = re.split("(\d+)", s) # 将数字部分转换为整数,非数字部分保持不变 parts = [int(part) if part.isdigit() else part for part in parts] return parts if os.path.exists("./weight.json"): pass else: with open("./weight.json", "w", encoding="utf-8") as file: json.dump({"GPT": {}, "SoVITS": {}}, file) with open("./weight.json", "r", encoding="utf-8") as file: weight_data = file.read() weight_data = json.loads(weight_data) gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1])) sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0])) if isinstance(gpt_path, list): gpt_path = gpt_path[0] if isinstance(sovits_path, list): sovits_path = sovits_path[0] from process_ckpt import get_sovits_version_from_path_fast v3v4set = {"v3", "v4"} def change_sovits_weights(sovits_path, prompt_language=None, text_language=None): if "!" in sovits_path or "!" in sovits_path: sovits_path = name2sovits_path[sovits_path] global version, model_version, dict_language, if_lora_v3 version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) # print(sovits_path,version, model_version, if_lora_v3) is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 if if_lora_v3 == True and is_exist == False: info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重") gr.Warning(info) raise FileExistsError(info) dict_language = dict_language_v1 if version == "v1" else dict_language_v2 if prompt_language is not None and text_language is not None: if prompt_language in list(dict_language.keys()): prompt_text_update, prompt_language_update = ( {"__type__": "update"}, {"__type__": "update", "value": prompt_language}, ) else: prompt_text_update = {"__type__": "update", "value": ""} prompt_language_update = {"__type__": "update", "value": i18n("中文")} if text_language in list(dict_language.keys()): text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language} else: text_update = {"__type__": "update", "value": ""} text_language_update = {"__type__": "update", "value": i18n("中文")} if model_version in v3v4set: visible_sample_steps = True visible_inp_refs = False else: visible_sample_steps = False visible_inp_refs = True yield ( {"__type__": "update", "choices": list(dict_language.keys())}, {"__type__": "update", "choices": list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update, {"__type__": "update", "interactive": visible_sample_steps, "value": 32}, {"__type__": "update", "visible": visible_inp_refs}, {"__type__": "update", "interactive": True if model_version not in v3v4set else False}, {"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False}, ) tts_pipeline.init_vits_weights(sovits_path) yield ( {"__type__": "update", "choices": list(dict_language.keys())}, {"__type__": "update", "choices": list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update, {"__type__": "update", "interactive": visible_sample_steps, "value": 32}, {"__type__": "update", "visible": visible_inp_refs}, {"__type__": "update", "interactive": True if model_version not in v3v4set else False}, {"__type__": "update", "value": i18n("合成语音"), "interactive": True}, ) with open("./weight.json") as f: data = f.read() data = json.loads(data) data["SoVITS"][version] = sovits_path with open("./weight.json", "w") as f: f.write(json.dumps(data)) def change_gpt_weights(gpt_path): if "!" in gpt_path or "!" in gpt_path: gpt_path = name2gpt_path[gpt_path] tts_pipeline.init_t2s_weights(gpt_path) with gr.Blocks(title="GPT-SoVITS WebUI", analytics_enabled=False, js=js, css=css) as app: gr.HTML( top_html.format( i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.") + i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ), elem_classes="markdown", ) with gr.Column(): # with gr.Group(): gr.Markdown(value=i18n("模型切换")) with gr.Row(): GPT_dropdown = gr.Dropdown( label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, ) SoVITS_dropdown = gr.Dropdown( label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, ) refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) with gr.Row(): with gr.Column(): gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): inp_ref = gr.Audio(label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), type="filepath") inp_refs = gr.File( label=i18n("辅参考音频(可选多个,或不选)"), file_count="multiple", visible=True if model_version != "v3" else False, ) prompt_text = gr.Textbox(label=i18n("主参考音频的文本"), value="", lines=2) with gr.Row(): prompt_language = gr.Dropdown( label=i18n("主参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文") ) with gr.Column(): ref_text_free = gr.Checkbox( label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True if model_version != "v3" else False, show_label=True, ) gr.Markdown( i18n("使用无参考文本模式时建议使用微调的GPT") + "<br>" + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。") ) with gr.Column(): gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式")) text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=20, max_lines=20) text_language = gr.Dropdown( label=i18n("需要合成的文本的语种"), choices=list(dict_language.keys()), value=i18n("中文") ) with gr.Group(): gr.Markdown(value=i18n("推理设置")) with gr.Row(): with gr.Column(): with gr.Row(): batch_size = gr.Slider( minimum=1, maximum=200, step=1, label=i18n("batch_size"), value=20, interactive=True ) sample_steps = gr.Radio( label=i18n("采样步数(仅对V3/4生效)"), value=32, choices=[4, 8, 16, 32, 64, 128], visible=True ) with gr.Row(): fragment_interval = gr.Slider( minimum=0.01, maximum=1, step=0.01, label=i18n("分段间隔(秒)"), value=0.3, interactive=True ) speed_factor = gr.Slider( minimum=0.6, maximum=1.65, step=0.05, label="语速", value=1.0, interactive=True ) with gr.Row(): top_k = gr.Slider(minimum=1, maximum=100, step=1, label=i18n("top_k"), value=15, interactive=True) top_p = gr.Slider(minimum=0, maximum=1, step=0.05, label=i18n("top_p"), value=1, interactive=True) with gr.Row(): temperature = gr.Slider( minimum=0, maximum=1, step=0.05, label=i18n("temperature"), value=1, interactive=True ) repetition_penalty = gr.Slider( minimum=0, maximum=2, step=0.05, label=i18n("重复惩罚"), value=1.35, interactive=True ) with gr.Column(): with gr.Row(): how_to_cut = gr.Dropdown( label=i18n("怎么切"), choices=[ i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ], value=i18n("凑四句一切"), interactive=True, scale=1, ) super_sampling = gr.Checkbox( label=i18n("音频超采样(仅对V3生效))"), value=False, interactive=True, show_label=True ) with gr.Row(): parallel_infer = gr.Checkbox(label=i18n("并行推理"), value=True, interactive=True, show_label=True) split_bucket = gr.Checkbox( label=i18n("数据分桶(并行推理时会降低一点计算量)"), value=True, interactive=True, show_label=True, ) with gr.Row(): seed = gr.Number(label=i18n("随机种子"), value=-1) keep_random = gr.Checkbox(label=i18n("保持随机"), value=True, interactive=True, show_label=True) output = gr.Audio(label=i18n("输出的语音")) with gr.Row(): inference_button = gr.Button(i18n("合成语音"), variant="primary") stop_infer = gr.Button(i18n("终止合成"), variant="primary") inference_button.click( inference, [ text, text_language, inp_ref, inp_refs, prompt_text, prompt_language, top_k, top_p, temperature, how_to_cut, batch_size, speed_factor, ref_text_free, split_bucket, fragment_interval, seed, keep_random, parallel_infer, repetition_penalty, sample_steps, super_sampling, ], [output, seed], ) stop_infer.click(tts_pipeline.stop, [], []) SoVITS_dropdown.change( change_sovits_weights, [SoVITS_dropdown, prompt_language, text_language], [ prompt_language, text_language, prompt_text, prompt_language, text, text_language, sample_steps, inp_refs, ref_text_free, inference_button, ], ) # GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], []) with gr.Group(): gr.Markdown( value=i18n( "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。" ) ) with gr.Row(): text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="", lines=4) with gr.Column(): _how_to_cut = gr.Radio( label=i18n("怎么切"), choices=[ i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ], value=i18n("凑四句一切"), interactive=True, ) cut_text = gr.Button(i18n("切分"), variant="primary") def to_cut(text_inp, how_to_cut): if len(text_inp.strip()) == 0 or text_inp == []: return "" method = get_method(cut_method[how_to_cut]) return method(text_inp) text_opt = gr.Textbox(label=i18n("切分后文本"), value="", lines=4) cut_text.click(to_cut, [text_inp, _how_to_cut], [text_opt]) gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")) if __name__ == "__main__": app.queue().launch( # concurrency_count=511, max_size=1022 server_name="0.0.0.0", inbrowser=True, share=is_share, server_port=infer_ttswebui, # quiet=True, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/s1_train.py
GPT_SoVITS/s1_train.py
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/train_t2s.py import os if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] import argparse import logging import platform from pathlib import Path import torch from AR.data.data_module import Text2SemanticDataModule from AR.models.t2s_lightning_module import Text2SemanticLightningModule from AR.utils.io import load_yaml_config from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger # WandbLogger from pytorch_lightning.strategies import DDPStrategy logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) torch.set_float32_matmul_precision("high") from collections import OrderedDict from AR.utils import get_newest_ckpt from process_ckpt import my_save class my_model_ckpt(ModelCheckpoint): def __init__( self, config, if_save_latest, if_save_every_weights, half_weights_save_dir, exp_name, **kwargs, ): super().__init__(**kwargs) self.if_save_latest = if_save_latest self.if_save_every_weights = if_save_every_weights self.half_weights_save_dir = half_weights_save_dir self.exp_name = exp_name self.config = config def on_train_epoch_end(self, trainer, pl_module): # if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer): if self._should_save_on_train_epoch_end(trainer): monitor_candidates = self._monitor_candidates(trainer) if self._every_n_epochs >= 1 and (trainer.current_epoch + 1) % self._every_n_epochs == 0: if ( self.if_save_latest == True ): ####如果设置只保存最后一个ckpt,在保存下一个ckpt后要清理掉之前的所有ckpt to_clean = list(os.listdir(self.dirpath)) self._save_topk_checkpoint(trainer, monitor_candidates) if self.if_save_latest == True: for name in to_clean: try: os.remove("%s/%s" % (self.dirpath, name)) except: pass if self.if_save_every_weights == True: to_save_od = OrderedDict() to_save_od["weight"] = OrderedDict() dictt = trainer.strategy._lightning_module.state_dict() for key in dictt: to_save_od["weight"][key] = dictt[key].half() to_save_od["config"] = self.config to_save_od["info"] = "GPT-e%s" % (trainer.current_epoch + 1) # torch.save( # print(os.environ) if os.environ.get("LOCAL_RANK", "0") == "0": my_save( to_save_od, "%s/%s-e%s.ckpt" % ( self.half_weights_save_dir, self.exp_name, trainer.current_epoch + 1, ), ) self._save_last_checkpoint(trainer, monitor_candidates) def main(args): config = load_yaml_config(args.config_file) output_dir = Path(config["output_dir"]) output_dir.mkdir(parents=True, exist_ok=True) ckpt_dir = output_dir / "ckpt" ckpt_dir.mkdir(parents=True, exist_ok=True) seed_everything(config["train"]["seed"], workers=True) ckpt_callback: ModelCheckpoint = my_model_ckpt( config=config, if_save_latest=config["train"]["if_save_latest"], if_save_every_weights=config["train"]["if_save_every_weights"], half_weights_save_dir=config["train"]["half_weights_save_dir"], exp_name=config["train"]["exp_name"], save_top_k=-1, monitor="top_3_acc", mode="max", save_on_train_epoch_end=True, every_n_epochs=config["train"]["save_every_n_epoch"], dirpath=ckpt_dir, ) logger = TensorBoardLogger(name=output_dir.stem, save_dir=output_dir) os.environ["MASTER_ADDR"] = "localhost" os.environ["USE_LIBUV"] = "0" trainer: Trainer = Trainer( max_epochs=config["train"]["epochs"], accelerator="gpu" if torch.cuda.is_available() else "cpu", # val_check_interval=9999999999999999999999,###不要验证 # check_val_every_n_epoch=None, limit_val_batches=0, devices=-1 if torch.cuda.is_available() else 1, benchmark=False, fast_dev_run=False, strategy=DDPStrategy(process_group_backend="nccl" if platform.system() != "Windows" else "gloo") if torch.cuda.is_available() else "auto", precision=config["train"]["precision"], logger=logger, num_sanity_val_steps=0, callbacks=[ckpt_callback], use_distributed_sampler=False, # 非常简单的修改,但解决了采用自定义的 bucket_sampler 下训练步数不一致的问题! ) model: Text2SemanticLightningModule = Text2SemanticLightningModule(config, output_dir) data_module: Text2SemanticDataModule = Text2SemanticDataModule( config, train_semantic_path=config["train_semantic_path"], train_phoneme_path=config["train_phoneme_path"], # dev_semantic_path=args.dev_semantic_path, # dev_phoneme_path=args.dev_phoneme_path ) try: # 使用正则表达式匹配文件名中的数字部分,并按数字大小进行排序 newest_ckpt_name = get_newest_ckpt(os.listdir(ckpt_dir)) ckpt_path = ckpt_dir / newest_ckpt_name except Exception: ckpt_path = None print("ckpt_path:", ckpt_path) trainer.fit(model, data_module, ckpt_path=ckpt_path) # srun --gpus-per-node=1 --ntasks-per-node=1 python train.py --path-to-configuration configurations/default.yaml if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-c", "--config_file", type=str, default="configs/s1longer.yaml", help="path of config file", ) # args for dataset # parser.add_argument('--train_semantic_path',type=str,default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/6-name2semantic.tsv') # parser.add_argument('--train_phoneme_path', type=str, default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/2-name2text.txt') # parser.add_argument('--dev_semantic_path', type=str, default='dump_mix/semantic_dev.tsv') # parser.add_argument('--dev_phoneme_path', type=str, default='dump_mix/phoneme_dev.npy') # parser.add_argument('--output_dir',type=str,default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/logs_s1',help='directory to save the results') # parser.add_argument('--output_dir',type=str,default='/liujing04/gpt_logs/s1/xuangou_ft',help='directory to save the results') args = parser.parse_args() logging.info(str(args)) main(args)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/inference_gui.py
GPT_SoVITS/inference_gui.py
import os import sys from PyQt5.QtCore import QEvent from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox import soundfile as sf from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav class GPTSoVITSGUI(QMainWindow): GPT_Path = gpt_path SoVITS_Path = sovits_path def __init__(self): super().__init__() self.setWindowTitle("GPT-SoVITS GUI") self.setGeometry(800, 450, 950, 850) self.setStyleSheet(""" QWidget { background-color: #a3d3b1; } QTabWidget::pane { background-color: #a3d3b1; } QTabWidget::tab-bar { alignment: left; } QTabBar::tab { background: #8da4bf; color: #ffffff; padding: 8px; } QTabBar::tab:selected { background: #2a3f54; } QLabel { color: #000000; } QPushButton { background-color: #4CAF50; color: white; padding: 8px; border: 1px solid #4CAF50; border-radius: 4px; } QPushButton:hover { background-color: #45a049; border: 1px solid #45a049; box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1); } """) license_text = ( "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. " "如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." ) license_label = QLabel(license_text) license_label.setWordWrap(True) self.GPT_model_label = QLabel("选择GPT模型:") self.GPT_model_input = QLineEdit() self.GPT_model_input.setPlaceholderText("拖拽或选择文件") self.GPT_model_input.setText(self.GPT_Path) self.GPT_model_input.setReadOnly(True) self.GPT_model_button = QPushButton("选择GPT模型文件") self.GPT_model_button.clicked.connect(self.select_GPT_model) self.SoVITS_model_label = QLabel("选择SoVITS模型:") self.SoVITS_model_input = QLineEdit() self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件") self.SoVITS_model_input.setText(self.SoVITS_Path) self.SoVITS_model_input.setReadOnly(True) self.SoVITS_model_button = QPushButton("选择SoVITS模型文件") self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model) self.ref_audio_label = QLabel("上传参考音频:") self.ref_audio_input = QLineEdit() self.ref_audio_input.setPlaceholderText("拖拽或选择文件") self.ref_audio_input.setReadOnly(True) self.ref_audio_button = QPushButton("选择音频文件") self.ref_audio_button.clicked.connect(self.select_ref_audio) self.ref_text_label = QLabel("参考音频文本:") self.ref_text_input = QLineEdit() self.ref_text_input.setPlaceholderText("直接输入文字或上传文本") self.ref_text_button = QPushButton("上传文本") self.ref_text_button.clicked.connect(self.upload_ref_text) self.ref_language_label = QLabel("参考音频语言:") self.ref_language_combobox = QComboBox() self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"]) self.ref_language_combobox.setCurrentText("多语种混合") self.target_text_label = QLabel("合成目标文本:") self.target_text_input = QLineEdit() self.target_text_input.setPlaceholderText("直接输入文字或上传文本") self.target_text_button = QPushButton("上传文本") self.target_text_button.clicked.connect(self.upload_target_text) self.target_language_label = QLabel("合成音频语言:") self.target_language_combobox = QComboBox() self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"]) self.target_language_combobox.setCurrentText("多语种混合") self.output_label = QLabel("输出音频路径:") self.output_input = QLineEdit() self.output_input.setPlaceholderText("拖拽或选择文件") self.output_input.setReadOnly(True) self.output_button = QPushButton("选择文件夹") self.output_button.clicked.connect(self.select_output_path) self.output_text = QTextEdit() self.output_text.setReadOnly(True) self.add_drag_drop_events( [ self.GPT_model_input, self.SoVITS_model_input, self.ref_audio_input, self.ref_text_input, self.target_text_input, self.output_input, ] ) self.synthesize_button = QPushButton("合成") self.synthesize_button.clicked.connect(self.synthesize) self.clear_output_button = QPushButton("清空输出") self.clear_output_button.clicked.connect(self.clear_output) self.status_bar = QStatusBar() main_layout = QVBoxLayout() input_layout = QGridLayout(self) input_layout.setSpacing(10) input_layout.addWidget(license_label, 0, 0, 1, 3) input_layout.addWidget(self.GPT_model_label, 1, 0) input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2) input_layout.addWidget(self.GPT_model_button, 2, 2) input_layout.addWidget(self.SoVITS_model_label, 3, 0) input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2) input_layout.addWidget(self.SoVITS_model_button, 4, 2) input_layout.addWidget(self.ref_audio_label, 5, 0) input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2) input_layout.addWidget(self.ref_audio_button, 6, 2) input_layout.addWidget(self.ref_language_label, 7, 0) input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1) input_layout.addWidget(self.ref_text_label, 9, 0) input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2) input_layout.addWidget(self.ref_text_button, 10, 2) input_layout.addWidget(self.target_language_label, 11, 0) input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1) input_layout.addWidget(self.target_text_label, 13, 0) input_layout.addWidget(self.target_text_input, 14, 0, 1, 2) input_layout.addWidget(self.target_text_button, 14, 2) input_layout.addWidget(self.output_label, 15, 0) input_layout.addWidget(self.output_input, 16, 0, 1, 2) input_layout.addWidget(self.output_button, 16, 2) main_layout.addLayout(input_layout) output_layout = QVBoxLayout() output_layout.addWidget(self.output_text) main_layout.addLayout(output_layout) main_layout.addWidget(self.synthesize_button) main_layout.addWidget(self.clear_output_button) main_layout.addWidget(self.status_bar) self.central_widget = QWidget() self.central_widget.setLayout(main_layout) self.setCentralWidget(self.central_widget) def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.acceptProposedAction() def dropEvent(self, event): if event.mimeData().hasUrls(): file_paths = [url.toLocalFile() for url in event.mimeData().urls()] if len(file_paths) == 1: self.update_ref_audio(file_paths[0]) else: self.update_ref_audio(", ".join(file_paths)) def add_drag_drop_events(self, widgets): for widget in widgets: widget.setAcceptDrops(True) widget.installEventFilter(self) def eventFilter(self, obj, event): if event.type() in (QEvent.DragEnter, QEvent.Drop): mime_data = event.mimeData() if mime_data.hasUrls(): event.acceptProposedAction() return super().eventFilter(obj, event) def select_GPT_model(self): file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)") if file_path: self.GPT_model_input.setText(file_path) def select_SoVITS_model(self): file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)") if file_path: self.SoVITS_model_input.setText(file_path) def select_ref_audio(self): file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)") if file_path: self.update_ref_audio(file_path) def upload_ref_text(self): file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)") if file_path: with open(file_path, "r", encoding="utf-8") as file: content = file.read() self.ref_text_input.setText(content) def upload_target_text(self): file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)") if file_path: with open(file_path, "r", encoding="utf-8") as file: content = file.read() self.target_text_input.setText(content) def select_output_path(self): options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog options |= QFileDialog.ShowDirsOnly folder_dialog = QFileDialog() folder_dialog.setOptions(options) folder_dialog.setFileMode(QFileDialog.Directory) if folder_dialog.exec_(): folder_path = folder_dialog.selectedFiles()[0] self.output_input.setText(folder_path) def update_ref_audio(self, file_path): self.ref_audio_input.setText(file_path) def clear_output(self): self.output_text.clear() def synthesize(self): GPT_model_path = self.GPT_model_input.text() SoVITS_model_path = self.SoVITS_model_input.text() ref_audio_path = self.ref_audio_input.text() language_combobox = self.ref_language_combobox.currentText() language_combobox = i18n(language_combobox) ref_text = self.ref_text_input.text() target_language_combobox = self.target_language_combobox.currentText() target_language_combobox = i18n(target_language_combobox) target_text = self.target_text_input.text() output_path = self.output_input.text() if GPT_model_path != self.GPT_Path: change_gpt_weights(gpt_path=GPT_model_path) self.GPT_Path = GPT_model_path if SoVITS_model_path != self.SoVITS_Path: change_sovits_weights(sovits_path=SoVITS_model_path) self.SoVITS_Path = SoVITS_model_path synthesis_result = get_tts_wav( ref_wav_path=ref_audio_path, prompt_text=ref_text, prompt_language=language_combobox, text=target_text, text_language=target_language_combobox, ) result_list = list(synthesis_result) if result_list: last_sampling_rate, last_audio_data = result_list[-1] output_wav_path = os.path.join(output_path, "output.wav") sf.write(output_wav_path, last_audio_data, last_sampling_rate) result = "Audio saved to " + output_wav_path self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000) self.output_text.append("处理结果:\n" + result) if __name__ == "__main__": app = QApplication(sys.argv) mainWin = GPTSoVITSGUI() mainWin.show() sys.exit(app.exec_())
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/inference_webui.py
GPT_SoVITS/inference_webui.py
""" 按中英混合识别 按日英混合识别 多语种启动切分识别语种 全部按中文识别 全部按英文识别 全部按日文识别 """ import psutil import os def set_high_priority(): """把当前 Python 进程设为 HIGH_PRIORITY_CLASS""" if os.name != "nt": return # 仅 Windows 有效 p = psutil.Process(os.getpid()) try: p.nice(psutil.HIGH_PRIORITY_CLASS) print("已将进程优先级设为 High") except psutil.AccessDenied: print("权限不足,无法修改优先级(请用管理员运行)") set_high_priority() import json import logging import os import re import sys import traceback import warnings import torch import torchaudio from text.LangSegmenter import LangSegmenter logging.getLogger("markdown_it").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) logging.getLogger("httpcore").setLevel(logging.ERROR) logging.getLogger("httpx").setLevel(logging.ERROR) logging.getLogger("asyncio").setLevel(logging.ERROR) logging.getLogger("charset_normalizer").setLevel(logging.ERROR) logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) logging.getLogger("multipart.multipart").setLevel(logging.ERROR) warnings.simplefilter(action="ignore", category=FutureWarning) version = model_version = os.environ.get("version", "v2") from config import change_choices, get_weights_names, name2gpt_path, name2sovits_path SoVITS_names, GPT_names = get_weights_names() from config import pretrained_sovits_name path_sovits_v3 = pretrained_sovits_name["v3"] path_sovits_v4 = pretrained_sovits_name["v4"] is_exist_s2gv3 = os.path.exists(path_sovits_v3) is_exist_s2gv4 = os.path.exists(path_sovits_v4) if os.path.exists("./weight.json"): pass else: with open("./weight.json", "w", encoding="utf-8") as file: json.dump({"GPT": {}, "SoVITS": {}}, file) with open("./weight.json", "r", encoding="utf-8") as file: weight_data = file.read() weight_data = json.loads(weight_data) gpt_path = os.environ.get("gpt_path", weight_data.get("GPT", {}).get(version, GPT_names[-1])) sovits_path = os.environ.get("sovits_path", weight_data.get("SoVITS", {}).get(version, SoVITS_names[0])) if isinstance(gpt_path, list): gpt_path = gpt_path[0] if isinstance(sovits_path, list): sovits_path = sovits_path[0] # print(2333333) # print(os.environ["gpt_path"]) # print(gpt_path) # print(GPT_names) # print(weight_data) # print(weight_data.get("GPT", {})) # print(version)###GPT version里没有s2的v2pro # print(weight_data.get("GPT", {}).get(version, GPT_names[-1])) cnhubert_base_path = os.environ.get("cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base") bert_path = os.environ.get("bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large") infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) is_share = os.environ.get("is_share", "False") is_share = eval(is_share) if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() # is_half=False punctuation = set(["!", "?", "…", ",", ".", "-", " "]) import gradio as gr import librosa import numpy as np from feature_extractor import cnhubert from transformers import AutoModelForMaskedLM, AutoTokenizer cnhubert.cnhubert_base_path = cnhubert_base_path import random from GPT_SoVITS.module.models import Generator, SynthesizerTrn, SynthesizerTrnV3 def set_seed(seed): if seed == -1: seed = random.randint(0, 1000000) seed = int(seed) random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) # set_seed(42) from time import time as ttime from AR.models.t2s_lightning_module import Text2SemanticLightningModule from peft import LoraConfig, get_peft_model from text import cleaned_text_to_sequence from text.cleaner import clean_text from tools.assets import css, js, top_html from tools.i18n.i18n import I18nAuto, scan_language_list language = os.environ.get("language", "Auto") language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language i18n = I18nAuto(language=language) # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 if torch.cuda.is_available(): device = "cuda" else: device = "cpu" dict_language_v1 = { i18n("中文"): "all_zh", # 全部按中文识别 i18n("英文"): "en", # 全部按英文识别#######不变 i18n("日文"): "all_ja", # 全部按日文识别 i18n("中英混合"): "zh", # 按中英混合识别####不变 i18n("日英混合"): "ja", # 按日英混合识别####不变 i18n("多语种混合"): "auto", # 多语种启动切分识别语种 } dict_language_v2 = { i18n("中文"): "all_zh", # 全部按中文识别 i18n("英文"): "en", # 全部按英文识别#######不变 i18n("日文"): "all_ja", # 全部按日文识别 i18n("粤语"): "all_yue", # 全部按中文识别 i18n("韩文"): "all_ko", # 全部按韩文识别 i18n("中英混合"): "zh", # 按中英混合识别####不变 i18n("日英混合"): "ja", # 按日英混合识别####不变 i18n("粤英混合"): "yue", # 按粤英混合识别####不变 i18n("韩英混合"): "ko", # 按韩英混合识别####不变 i18n("多语种混合"): "auto", # 多语种启动切分识别语种 i18n("多语种混合(粤语)"): "auto_yue", # 多语种启动切分识别语种 } dict_language = dict_language_v1 if version == "v1" else dict_language_v2 tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: bert_model = bert_model.half().to(device) else: bert_model = bert_model.to(device) def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") for i in inputs: inputs[i] = inputs[i].to(device) res = bert_model(**inputs, output_hidden_states=True) res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] assert len(word2ph) == len(text) phone_level_feature = [] for i in range(len(word2ph)): repeat_feature = res[i].repeat(word2ph[i], 1) phone_level_feature.append(repeat_feature) phone_level_feature = torch.cat(phone_level_feature, dim=0) return phone_level_feature.T class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) for key, value in input_dict.items(): if isinstance(value, dict): value = DictToAttrRecursive(value) self[key] = value setattr(self, key, value) def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def __setattr__(self, key, value): if isinstance(value, dict): value = DictToAttrRecursive(value) super(DictToAttrRecursive, self).__setitem__(key, value) super().__setattr__(key, value) def __delattr__(self, item): try: del self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") ssl_model = cnhubert.get_model() if is_half == True: ssl_model = ssl_model.half().to(device) else: ssl_model = ssl_model.to(device) ###todo:put them to process_ckpt and modify my_save func (save sovits weights), gpt save weights use my_save in process_ckpt # symbol_version-model_version-if_lora_v3 from process_ckpt import get_sovits_version_from_path_fast, load_sovits_new v3v4set = {"v3", "v4"} def change_sovits_weights(sovits_path, prompt_language=None, text_language=None): if "!" in sovits_path or "!" in sovits_path: sovits_path = name2sovits_path[sovits_path] global vq_model, hps, version, model_version, dict_language, if_lora_v3 version, model_version, if_lora_v3 = get_sovits_version_from_path_fast(sovits_path) print(sovits_path, version, model_version, if_lora_v3) is_exist = is_exist_s2gv3 if model_version == "v3" else is_exist_s2gv4 path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 if if_lora_v3 == True and is_exist == False: info = path_sovits + "SoVITS %s" % model_version + i18n("底模缺失,无法加载相应 LoRA 权重") gr.Warning(info) raise FileExistsError(info) dict_language = dict_language_v1 if version == "v1" else dict_language_v2 if prompt_language is not None and text_language is not None: if prompt_language in list(dict_language.keys()): prompt_text_update, prompt_language_update = ( {"__type__": "update"}, {"__type__": "update", "value": prompt_language}, ) else: prompt_text_update = {"__type__": "update", "value": ""} prompt_language_update = {"__type__": "update", "value": i18n("中文")} if text_language in list(dict_language.keys()): text_update, text_language_update = {"__type__": "update"}, {"__type__": "update", "value": text_language} else: text_update = {"__type__": "update", "value": ""} text_language_update = {"__type__": "update", "value": i18n("中文")} if model_version in v3v4set: visible_sample_steps = True visible_inp_refs = False else: visible_sample_steps = False visible_inp_refs = True yield ( {"__type__": "update", "choices": list(dict_language.keys())}, {"__type__": "update", "choices": list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update, { "__type__": "update", "visible": visible_sample_steps, "value": 32 if model_version == "v3" else 8, "choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], }, {"__type__": "update", "visible": visible_inp_refs}, {"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False}, {"__type__": "update", "visible": True if model_version == "v3" else False}, {"__type__": "update", "value": i18n("模型加载中,请等待"), "interactive": False}, ) dict_s2 = load_sovits_new(sovits_path) hps = dict_s2["config"] hps = DictToAttrRecursive(hps) hps.model.semantic_frame_rate = "25hz" if "enc_p.text_embedding.weight" not in dict_s2["weight"]: hps.model.version = "v2" # v3model,v2sybomls elif dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: hps.model.version = "v1" else: hps.model.version = "v2" version = hps.model.version # print("sovits版本:",hps.model.version) if model_version not in v3v4set: if "Pro" not in model_version: model_version = version else: hps.model.version = model_version vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) else: hps.model.version = model_version vq_model = SynthesizerTrnV3( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) if "pretrained" not in sovits_path: try: del vq_model.enc_q except: pass if is_half == True: vq_model = vq_model.half().to(device) else: vq_model = vq_model.to(device) vq_model.eval() if if_lora_v3 == False: print("loading sovits_%s" % model_version, vq_model.load_state_dict(dict_s2["weight"], strict=False)) else: path_sovits = path_sovits_v3 if model_version == "v3" else path_sovits_v4 print( "loading sovits_%spretrained_G" % model_version, vq_model.load_state_dict(load_sovits_new(path_sovits)["weight"], strict=False), ) lora_rank = dict_s2["lora_rank"] lora_config = LoraConfig( target_modules=["to_k", "to_q", "to_v", "to_out.0"], r=lora_rank, lora_alpha=lora_rank, init_lora_weights=True, ) vq_model.cfm = get_peft_model(vq_model.cfm, lora_config) print("loading sovits_%s_lora%s" % (model_version, lora_rank)) vq_model.load_state_dict(dict_s2["weight"], strict=False) vq_model.cfm = vq_model.cfm.merge_and_unload() # torch.save(vq_model.state_dict(),"merge_win.pth") vq_model.eval() yield ( {"__type__": "update", "choices": list(dict_language.keys())}, {"__type__": "update", "choices": list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update, { "__type__": "update", "visible": visible_sample_steps, "value": 32 if model_version == "v3" else 8, "choices": [4, 8, 16, 32, 64, 128] if model_version == "v3" else [4, 8, 16, 32], }, {"__type__": "update", "visible": visible_inp_refs}, {"__type__": "update", "value": False, "interactive": True if model_version not in v3v4set else False}, {"__type__": "update", "visible": True if model_version == "v3" else False}, {"__type__": "update", "value": i18n("合成语音"), "interactive": True}, ) with open("./weight.json") as f: data = f.read() data = json.loads(data) data["SoVITS"][version] = sovits_path with open("./weight.json", "w") as f: f.write(json.dumps(data)) try: next(change_sovits_weights(sovits_path)) except: pass def change_gpt_weights(gpt_path): if "!" in gpt_path or "!" in gpt_path: gpt_path = name2gpt_path[gpt_path] global hz, max_sec, t2s_model, config hz = 50 dict_s1 = torch.load(gpt_path, map_location="cpu", weights_only=False) config = dict_s1["config"] max_sec = config["data"]["max_sec"] t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) t2s_model.load_state_dict(dict_s1["weight"]) if is_half == True: t2s_model = t2s_model.half() t2s_model = t2s_model.to(device) t2s_model.eval() # total = sum([param.nelement() for param in t2s_model.parameters()]) # print("Number of parameter: %.2fM" % (total / 1e6)) with open("./weight.json") as f: data = f.read() data = json.loads(data) data["GPT"][version] = gpt_path with open("./weight.json", "w") as f: f.write(json.dumps(data)) change_gpt_weights(gpt_path) os.environ["HF_ENDPOINT"] = "https://hf-mirror.com" import torch now_dir = os.getcwd() def clean_hifigan_model(): global hifigan_model if hifigan_model: hifigan_model = hifigan_model.cpu() hifigan_model = None try: torch.cuda.empty_cache() except: pass def clean_bigvgan_model(): global bigvgan_model if bigvgan_model: bigvgan_model = bigvgan_model.cpu() bigvgan_model = None try: torch.cuda.empty_cache() except: pass def clean_sv_cn_model(): global sv_cn_model if sv_cn_model: sv_cn_model.embedding_model = sv_cn_model.embedding_model.cpu() sv_cn_model = None try: torch.cuda.empty_cache() except: pass def init_bigvgan(): global bigvgan_model, hifigan_model, sv_cn_model from BigVGAN import bigvgan bigvgan_model = bigvgan.BigVGAN.from_pretrained( "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False, ) # if True, RuntimeError: Ninja is required to load C++ extensions # remove weight norm in the model and set to eval mode bigvgan_model.remove_weight_norm() bigvgan_model = bigvgan_model.eval() clean_hifigan_model() clean_sv_cn_model() if is_half == True: bigvgan_model = bigvgan_model.half().to(device) else: bigvgan_model = bigvgan_model.to(device) def init_hifigan(): global hifigan_model, bigvgan_model, sv_cn_model hifigan_model = Generator( initial_channel=100, resblock="1", resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], upsample_rates=[10, 6, 2, 2, 2], upsample_initial_channel=512, upsample_kernel_sizes=[20, 12, 4, 4, 4], gin_channels=0, is_bias=True, ) hifigan_model.eval() hifigan_model.remove_weight_norm() state_dict_g = torch.load( "%s/GPT_SoVITS/pretrained_models/gsv-v4-pretrained/vocoder.pth" % (now_dir,), map_location="cpu", weights_only=False, ) print("loading vocoder", hifigan_model.load_state_dict(state_dict_g)) clean_bigvgan_model() clean_sv_cn_model() if is_half == True: hifigan_model = hifigan_model.half().to(device) else: hifigan_model = hifigan_model.to(device) from sv import SV def init_sv_cn(): global hifigan_model, bigvgan_model, sv_cn_model sv_cn_model = SV(device, is_half) clean_bigvgan_model() clean_hifigan_model() bigvgan_model = hifigan_model = sv_cn_model = None if model_version == "v3": init_bigvgan() if model_version == "v4": init_hifigan() if model_version in {"v2Pro", "v2ProPlus"}: init_sv_cn() resample_transform_dict = {} def resample(audio_tensor, sr0, sr1, device): global resample_transform_dict key = "%s-%s-%s" % (sr0, sr1, str(device)) if key not in resample_transform_dict: resample_transform_dict[key] = torchaudio.transforms.Resample(sr0, sr1).to(device) return resample_transform_dict[key](audio_tensor) def get_spepc(hps, filename, dtype, device, is_v2pro=False): # audio = load_audio(filename, int(hps.data.sampling_rate)) # audio, sampling_rate = librosa.load(filename, sr=int(hps.data.sampling_rate)) # audio = torch.FloatTensor(audio) sr1 = int(hps.data.sampling_rate) audio, sr0 = torchaudio.load(filename) if sr0 != sr1: audio = audio.to(device) if audio.shape[0] == 2: audio = audio.mean(0).unsqueeze(0) audio = resample(audio, sr0, sr1, device) else: audio = audio.to(device) if audio.shape[0] == 2: audio = audio.mean(0).unsqueeze(0) maxx = audio.abs().max() if maxx > 1: audio /= min(2, maxx) spec = spectrogram_torch( audio, hps.data.filter_length, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, center=False, ) spec = spec.to(dtype) if is_v2pro == True: audio = resample(audio, sr1, 16000, device).to(dtype) return spec, audio def clean_text_inf(text, language, version): language = language.replace("all_", "") phones, word2ph, norm_text = clean_text(text, language, version) phones = cleaned_text_to_sequence(phones, version) return phones, word2ph, norm_text dtype = torch.float16 if is_half == True else torch.float32 def get_bert_inf(phones, word2ph, norm_text, language): language = language.replace("all_", "") if language == "zh": bert = get_bert_feature(norm_text, word2ph).to(device) # .to(dtype) else: bert = torch.zeros( (1024, len(phones)), dtype=torch.float16 if is_half == True else torch.float32, ).to(device) return bert splits = { ",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", } def get_first(text): pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]" text = re.split(pattern, text)[0].strip() return text from text import chinese def get_phones_and_bert(text, language, version, final=False): text = re.sub(r' {2,}', ' ', text) textlist = [] langlist = [] if language == "all_zh": for tmp in LangSegmenter.getTexts(text,"zh"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_yue": for tmp in LangSegmenter.getTexts(text,"zh"): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ja": for tmp in LangSegmenter.getTexts(text,"ja"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "all_ko": for tmp in LangSegmenter.getTexts(text,"ko"): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "en": langlist.append("en") textlist.append(text) elif language == "auto": for tmp in LangSegmenter.getTexts(text): langlist.append(tmp["lang"]) textlist.append(tmp["text"]) elif language == "auto_yue": for tmp in LangSegmenter.getTexts(text): if tmp["lang"] == "zh": tmp["lang"] = "yue" langlist.append(tmp["lang"]) textlist.append(tmp["text"]) else: for tmp in LangSegmenter.getTexts(text): if langlist: if (tmp["lang"] == "en" and langlist[-1] == "en") or (tmp["lang"] != "en" and langlist[-1] != "en"): textlist[-1] += tmp["text"] continue if tmp["lang"] == "en": langlist.append(tmp["lang"]) else: # 因无法区别中日韩文汉字,以用户输入为准 langlist.append(language) textlist.append(tmp["text"]) print(textlist) print(langlist) phones_list = [] bert_list = [] norm_text_list = [] for i in range(len(textlist)): lang = langlist[i] phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version) bert = get_bert_inf(phones, word2ph, norm_text, lang) phones_list.append(phones) norm_text_list.append(norm_text) bert_list.append(bert) bert = torch.cat(bert_list, dim=1) phones = sum(phones_list, []) norm_text = "".join(norm_text_list) if not final and len(phones) < 6: return get_phones_and_bert("." + text, language, version, final=True) return phones, bert.to(dtype), norm_text from module.mel_processing import mel_spectrogram_torch, spectrogram_torch spec_min = -12 spec_max = 2 def norm_spec(x): return (x - spec_min) / (spec_max - spec_min) * 2 - 1 def denorm_spec(x): return (x + 1) / 2 * (spec_max - spec_min) + spec_min mel_fn = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1024, "win_size": 1024, "hop_size": 256, "num_mels": 100, "sampling_rate": 24000, "fmin": 0, "fmax": None, "center": False, }, ) mel_fn_v4 = lambda x: mel_spectrogram_torch( x, **{ "n_fft": 1280, "win_size": 1280, "hop_size": 320, "num_mels": 100, "sampling_rate": 32000, "fmin": 0, "fmax": None, "center": False, }, ) def merge_short_text_in_array(texts, threshold): if (len(texts)) < 2: return texts result = [] text = "" for ele in texts: text += ele if len(text) >= threshold: result.append(text) text = "" if len(text) > 0: if len(result) == 0: result.append(text) else: result[len(result) - 1] += text return result sr_model = None def audio_sr(audio, sr): global sr_model if sr_model == None: from tools.audio_sr import AP_BWE try: sr_model = AP_BWE(device, DictToAttrRecursive) except FileNotFoundError: gr.Warning(i18n("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载好")) return audio.cpu().detach().numpy(), sr return sr_model(audio, sr) ##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature # cache_tokens={}#暂未实现清理机制 cache = {} def get_tts_wav( ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free=False, speed=1, if_freeze=False, inp_refs=None, sample_steps=8, if_sr=False, pause_second=0.3, ): global cache if ref_wav_path: pass else: gr.Warning(i18n("请上传参考音频")) if text: pass else: gr.Warning(i18n("请填入推理文本")) t = [] if prompt_text is None or len(prompt_text) == 0: ref_free = True if model_version in v3v4set: ref_free = False # s2v3暂不支持ref_free else: if_sr = False if model_version not in {"v3", "v4", "v2Pro", "v2ProPlus"}: clean_bigvgan_model() clean_hifigan_model() clean_sv_cn_model() t0 = ttime() prompt_language = dict_language[prompt_language] text_language = dict_language[text_language] if not ref_free: prompt_text = prompt_text.strip("\n") if prompt_text[-1] not in splits: prompt_text += "。" if prompt_language != "en" else "." print(i18n("实际输入的参考文本:"), prompt_text) text = text.strip("\n") # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text print(i18n("实际输入的目标文本:"), text) zero_wav = np.zeros( int(hps.data.sampling_rate * pause_second), dtype=np.float16 if is_half == True else np.float32, ) zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: zero_wav_torch = zero_wav_torch.half().to(device) else: zero_wav_torch = zero_wav_torch.to(device) if not ref_free: with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) if wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000: gr.Warning(i18n("参考音频在3~10秒范围外,请更换!")) raise OSError(i18n("参考音频在3~10秒范围外,请更换!")) wav16k = torch.from_numpy(wav16k) if is_half == True: wav16k = wav16k.half().to(device) else: wav16k = wav16k.to(device) wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompt = prompt_semantic.unsqueeze(0).to(device) t1 = ttime() t.append(t1 - t0) if how_to_cut == i18n("凑四句一切"): text = cut1(text) elif how_to_cut == i18n("凑50字一切"): text = cut2(text) elif how_to_cut == i18n("按中文句号。切"): text = cut3(text) elif how_to_cut == i18n("按英文句号.切"): text = cut4(text) elif how_to_cut == i18n("按标点符号切"): text = cut5(text) while "\n\n" in text: text = text.replace("\n\n", "\n") print(i18n("实际输入的目标文本(切句后):"), text) texts = text.split("\n") texts = process_text(texts) texts = merge_short_text_in_array(texts, 5) audio_opt = [] ###s2v3暂不支持ref_free if not ref_free: phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version) for i_text, text in enumerate(texts): # 解决输入目标文本的空行导致报错的问题 if len(text.strip()) == 0: continue if text[-1] not in splits: text += "。" if text_language != "en" else "." print(i18n("实际输入的目标文本(每句):"), text) phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version) print(i18n("前端处理后的文本(每句):"), norm_text2) if not ref_free: bert = torch.cat([bert1, bert2], 1) all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) else: bert = bert2 all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0) bert = bert.to(device).unsqueeze(0) all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) t2 = ttime() # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature) # print(cache.keys(),if_freeze) if i_text in cache and if_freeze == True: pred_semantic = cache[i_text] else: with torch.no_grad(): pred_semantic, idx = t2s_model.model.infer_panel( all_phoneme_ids, all_phoneme_len, None if ref_free else prompt, bert, # prompt_phone_len=ph_offset, top_k=top_k, top_p=top_p, temperature=temperature, early_stop_num=hz * max_sec, ) pred_semantic = pred_semantic[:, -idx:].unsqueeze(0) cache[i_text] = pred_semantic t3 = ttime() is_v2pro = model_version in {"v2Pro", "v2ProPlus"} # print(23333,is_v2pro,model_version) ###v3不存在以下逻辑和inp_refs if model_version not in v3v4set: refers = [] if is_v2pro: sv_emb = [] if sv_cn_model == None: init_sv_cn() if inp_refs: for path in inp_refs: try: #####这里加上提取sv的逻辑,要么一堆sv一堆refer,要么单个sv单个refer refer, audio_tensor = get_spepc(hps, path.name, dtype, device, is_v2pro) refers.append(refer) if is_v2pro: sv_emb.append(sv_cn_model.compute_embedding3(audio_tensor)) except: traceback.print_exc() if len(refers) == 0: refers, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device, is_v2pro) refers = [refers] if is_v2pro: sv_emb = [sv_cn_model.compute_embedding3(audio_tensor)] if is_v2pro: audio = vq_model.decode( pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed, sv_emb=sv_emb )[0][0] else: audio = vq_model.decode( pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers, speed=speed )[0][0] else: refer, audio_tensor = get_spepc(hps, ref_wav_path, dtype, device) phoneme_ids0 = torch.LongTensor(phones1).to(device).unsqueeze(0) phoneme_ids1 = torch.LongTensor(phones2).to(device).unsqueeze(0) fea_ref, ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer) ref_audio, sr = torchaudio.load(ref_wav_path) ref_audio = ref_audio.to(device).float() if ref_audio.shape[0] == 2: ref_audio = ref_audio.mean(0).unsqueeze(0) tgt_sr = 24000 if model_version == "v3" else 32000 if sr != tgt_sr: ref_audio = resample(ref_audio, sr, tgt_sr, device) # print("ref_audio",ref_audio.abs().mean()) mel2 = mel_fn(ref_audio) if model_version == "v3" else mel_fn_v4(ref_audio) mel2 = norm_spec(mel2) T_min = min(mel2.shape[2], fea_ref.shape[2]) mel2 = mel2[:, :, :T_min] fea_ref = fea_ref[:, :, :T_min] Tref = 468 if model_version == "v3" else 500
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/process_ckpt.py
GPT_SoVITS/process_ckpt.py
import traceback from collections import OrderedDict from time import time as ttime import shutil import os import torch from tools.i18n.i18n import I18nAuto i18n = I18nAuto() def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path dir = os.path.dirname(path) name = os.path.basename(path) tmp_path = "%s.pth" % (ttime()) torch.save(fea, tmp_path) shutil.move(tmp_path, "%s/%s" % (dir, name)) from io import BytesIO model_version2byte = { "v3": b"03", "v4": b"04", "v2Pro": b"05", "v2ProPlus": b"06", } def my_save2(fea, path, model_version): bio = BytesIO() torch.save(fea, bio) bio.seek(0) data = bio.getvalue() byte = model_version2byte[model_version] data = byte + data[2:] with open(path, "wb") as f: f.write(data) def savee(ckpt, name, epoch, steps, hps, model_version=None, lora_rank=None): try: opt = OrderedDict() opt["weight"] = {} for key in ckpt.keys(): if "enc_q" in key: continue opt["weight"][key] = ckpt[key].half() opt["config"] = hps opt["info"] = "%sepoch_%siteration" % (epoch, steps) if lora_rank: opt["lora_rank"] = lora_rank my_save2(opt, "%s/%s.pth" % (hps.save_weight_dir, name), model_version) elif model_version != None and "Pro" in model_version: my_save2(opt, "%s/%s.pth" % (hps.save_weight_dir, name), model_version) else: my_save(opt, "%s/%s.pth" % (hps.save_weight_dir, name)) return "Success." except: return traceback.format_exc() """ 00:v1 01:v2 02:v3 03:v3lora 04:v4lora 05:v2Pro 06:v2ProPlus """ head2version = { b"00": ["v1", "v1", False], b"01": ["v2", "v2", False], b"02": ["v2", "v3", False], b"03": ["v2", "v3", True], b"04": ["v2", "v4", True], b"05": ["v2", "v2Pro", False], b"06": ["v2", "v2ProPlus", False], } hash_pretrained_dict = { "dc3c97e17592963677a4a1681f30c653": ["v2", "v2", False], # s2G488k.pth#sovits_v1_pretrained "43797be674a37c1c83ee81081941ed0f": ["v2", "v3", False], # s2Gv3.pth#sovits_v3_pretrained "6642b37f3dbb1f76882b69937c95a5f3": ["v2", "v2", False], # s2G2333K.pth#sovits_v2_pretrained "4f26b9476d0c5033e04162c486074374": ["v2", "v4", False], # s2Gv4.pth#sovits_v4_pretrained "c7e9fce2223f3db685cdfa1e6368728a": ["v2", "v2Pro", False], # s2Gv2Pro.pth#sovits_v2Pro_pretrained "66b313e39455b57ab1b0bc0b239c9d0a": ["v2", "v2ProPlus", False], # s2Gv2ProPlus.pth#sovits_v2ProPlus_pretrained } import hashlib def get_hash_from_file(sovits_path): with open(sovits_path, "rb") as f: data = f.read(8192) hash_md5 = hashlib.md5() hash_md5.update(data) return hash_md5.hexdigest() def get_sovits_version_from_path_fast(sovits_path): ###1-if it is pretrained sovits models, by hash hash = get_hash_from_file(sovits_path) if hash in hash_pretrained_dict: return hash_pretrained_dict[hash] ###2-new weights, by head with open(sovits_path, "rb") as f: version = f.read(2) if version != b"PK": return head2version[version] ###3-old weights, by file size if_lora_v3 = False size = os.path.getsize(sovits_path) """ v1weights:about 82942KB half thr:82978KB v2weights:about 83014KB v3weights:about 750MB """ if size < 82978 * 1024: model_version = version = "v1" elif size < 700 * 1024 * 1024: model_version = version = "v2" else: version = "v2" model_version = "v3" return version, model_version, if_lora_v3 def load_sovits_new(sovits_path): f = open(sovits_path, "rb") meta = f.read(2) if meta != b"PK": data = b"PK" + f.read() bio = BytesIO() bio.write(data) bio.seek(0) return torch.load(bio, map_location="cpu", weights_only=False) return torch.load(sovits_path, map_location="cpu", weights_only=False)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/inference_cli.py
GPT_SoVITS/inference_cli.py
import argparse import os import soundfile as sf from tools.i18n.i18n import I18nAuto from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav i18n = I18nAuto() def synthesize( GPT_model_path, SoVITS_model_path, ref_audio_path, ref_text_path, ref_language, target_text_path, target_language, output_path, ): # Read reference text with open(ref_text_path, "r", encoding="utf-8") as file: ref_text = file.read() # Read target text with open(target_text_path, "r", encoding="utf-8") as file: target_text = file.read() # Change model weights change_gpt_weights(gpt_path=GPT_model_path) change_sovits_weights(sovits_path=SoVITS_model_path) # Synthesize audio synthesis_result = get_tts_wav( ref_wav_path=ref_audio_path, prompt_text=ref_text, prompt_language=i18n(ref_language), text=target_text, text_language=i18n(target_language), top_p=1, temperature=1, ) result_list = list(synthesis_result) if result_list: last_sampling_rate, last_audio_data = result_list[-1] output_wav_path = os.path.join(output_path, "output.wav") sf.write(output_wav_path, last_audio_data, last_sampling_rate) print(f"Audio saved to {output_wav_path}") def main(): parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool") parser.add_argument("--gpt_model", required=True, help="Path to the GPT model file") parser.add_argument("--sovits_model", required=True, help="Path to the SoVITS model file") parser.add_argument("--ref_audio", required=True, help="Path to the reference audio file") parser.add_argument("--ref_text", required=True, help="Path to the reference text file") parser.add_argument( "--ref_language", required=True, choices=["中文", "英文", "日文"], help="Language of the reference audio" ) parser.add_argument("--target_text", required=True, help="Path to the target text file") parser.add_argument( "--target_language", required=True, choices=["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"], help="Language of the target text", ) parser.add_argument("--output_path", required=True, help="Path to the output directory") args = parser.parse_args() synthesize( args.gpt_model, args.sovits_model, args.ref_audio, args.ref_text, args.ref_language, args.target_text, args.target_language, args.output_path, ) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/stream_v2pro.py
GPT_SoVITS/stream_v2pro.py
# 这是一个实验性质的实现,旨在探索 stream infer 的可能性。(xiao hai xie zhe wan de) from typing import List from export_torch_script import ExportERes2NetV2, SSLModel, T2SModel, VitsModel, get_raw_t2s_model, init_sv_cn, resamplex, sample, spectrogram_torch import export_torch_script from my_utils import load_audio import torch from torch import LongTensor, Tensor, nn from torch.nn import functional as F import soundfile from inference_webui import get_phones_and_bert import matplotlib.pyplot as plt class StreamT2SModel(nn.Module): def __init__(self, t2s: T2SModel): super(StreamT2SModel, self).__init__() self.t2s = t2s @torch.jit.export def pre_infer( self, prompts: LongTensor, ref_seq: LongTensor, text_seq: LongTensor, ref_bert: torch.Tensor, text_bert: torch.Tensor, top_k: int, ) -> tuple[int, Tensor, Tensor, List[Tensor], List[Tensor]]: bert = torch.cat([ref_bert.T, text_bert.T], 1) all_phoneme_ids = torch.cat([ref_seq, text_seq], 1) bert = bert.unsqueeze(0) x = self.t2s.ar_text_embedding(all_phoneme_ids) x = x + self.t2s.bert_proj(bert.transpose(1, 2)) x: torch.Tensor = self.t2s.ar_text_position(x) # [1,N,512] [1,N] # y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) y = prompts # x_example = x[:,:,0] * 0.0 x_len = x.shape[1] x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool) y_emb = self.t2s.ar_audio_embedding(y) y_len: int = y_emb.shape[1] prefix_len = y.shape[1] y_pos = self.t2s.ar_audio_position(y_emb) xy_pos = torch.concat([x, y_pos], dim=1) bsz = x.shape[0] src_len = x_len + y_len x_attn_mask_pad = F.pad( x_attn_mask, (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y) value=True, ) y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), (x_len, 0), value=False, ) xy_attn_mask = ( torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) .unsqueeze(0) .expand(bsz * self.t2s.num_head, -1, -1) .view(bsz, self.t2s.num_head, src_len, src_len) .to(device=x.device, dtype=torch.bool) ) xy_dec, k_cache, v_cache = self.t2s.t2s_transformer.process_prompt( xy_pos, xy_attn_mask, None ) logits = self.t2s.ar_predict_layer(xy_dec[:, -1]) logits = logits[:, :-1] samples = sample( logits, y, top_k=top_k, top_p=1, repetition_penalty=1.35, temperature=1.0 )[0] y = torch.concat([y, samples], dim=1) y_emb: Tensor = self.t2s.ar_audio_embedding(y[:, -1:]) xy_pos: Tensor = ( y_emb * self.t2s.ar_audio_position.x_scale + self.t2s.ar_audio_position.alpha * self.t2s.ar_audio_position.pe[:, y_len].to( dtype=y_emb.dtype, device=y_emb.device ) ) return y_len, y, xy_pos, k_cache, v_cache @torch.jit.export def decode_next_token( self, idx: int, # 记住从1开始 到1500 top_k: int, y_len: int, y: Tensor, xy_pos: Tensor, k_cache: List[Tensor], v_cache: List[Tensor], ) -> tuple[Tensor, Tensor, int, List[Tensor], List[Tensor]]: # [1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N] # y, k, v, y_emb, logits, samples = self.stage_decoder(y, k, v, y_emb, x_example) xy_dec, k_cache, v_cache = self.t2s.t2s_transformer.decode_next_token( xy_pos, k_cache, v_cache ) logits = self.t2s.ar_predict_layer(xy_dec[:, -1]) if idx < 11: ###至少预测出10个token不然不给停止(0.4s) logits = logits[:, :-1] samples = sample( logits, y, top_k=top_k, top_p=1, repetition_penalty=1.35, temperature=1.0 )[0] y = torch.concat([y, samples], dim=1) last_token = int(samples[0, 0]) # if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: # stop = True if torch.argmax(logits, dim=-1)[0] == self.t2s.EOS or samples[0, 0] == self.t2s.EOS: return y[:,:-1], xy_pos, self.t2s.EOS, k_cache, v_cache # if stop: # if y.shape[1] == 0: # y = torch.concat([y, torch.zeros_like(samples)], dim=1) # break y_emb = self.t2s.ar_audio_embedding(y[:, -1:]) xy_pos = ( y_emb * self.t2s.ar_audio_position.x_scale + self.t2s.ar_audio_position.alpha * self.t2s.ar_audio_position.pe[:, y_len + idx].to( dtype=y_emb.dtype, device=y_emb.device ) ) return y, xy_pos, last_token, k_cache, v_cache def forward( self, idx: int, # 记住从1开始 到1500 top_k: int, y_len: int, y: Tensor, xy_pos: Tensor, k_cache: List[Tensor], v_cache: List[Tensor], ): return self.decode_next_token(idx,top_k,y_len,y,xy_pos,k_cache,v_cache) class StepVitsModel(nn.Module): def __init__(self, vits: VitsModel,sv_model:ExportERes2NetV2): super().__init__() self.hps = vits.hps self.vq_model = vits.vq_model self.hann_window = vits.hann_window self.sv = sv_model def ref_handle(self, ref_audio_32k): refer = spectrogram_torch( self.hann_window, ref_audio_32k.float(), self.hps.data.filter_length, self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length, center=False, ) refer = refer.to(ref_audio_32k.dtype) ref_audio_16k = resamplex(ref_audio_32k, 32000, 16000).to(ref_audio_32k.dtype).to(ref_audio_32k.device) sv_emb = self.sv(ref_audio_16k) return refer, sv_emb def extract_latent(self, ssl_content): codes = self.vq_model.extract_latent(ssl_content) return codes[0] def forward(self, pred_semantic, text_seq, refer, sv_emb=None): return self.vq_model( pred_semantic, text_seq, refer, speed=1.0, sv_emb=sv_emb )[0, 0] @torch.jit.script def find_best_audio_offset_fast(reference_audio: Tensor, search_audio: Tensor): ref_len = len(reference_audio) search_len = len(search_audio) if search_len < ref_len: raise ValueError( f"搜索音频长度 ({search_len}) 必须大于等于参考音频长度 ({ref_len})" ) # 使用F.conv1d计算原始互相关 reference_flipped = reference_audio.unsqueeze(0).unsqueeze(0) search_padded = search_audio.unsqueeze(0).unsqueeze(0) # 计算点积 dot_products = F.conv1d(search_padded, reference_flipped).squeeze() if len(dot_products.shape) == 0: dot_products = dot_products.unsqueeze(0) # 计算参考音频的平方和 ref_squared_sum = torch.sum(reference_audio**2) # 计算搜索音频每个位置的平方和(滑动窗口) search_squared = search_audio**2 search_squared_padded = search_squared.unsqueeze(0).unsqueeze(0) ones_kernel = torch.ones( 1, 1, ref_len, dtype=search_audio.dtype, device=search_audio.device ) segment_squared_sums = F.conv1d(search_squared_padded, ones_kernel).squeeze() if len(segment_squared_sums.shape) == 0: segment_squared_sums = segment_squared_sums.unsqueeze(0) # 计算归一化因子 ref_norm = torch.sqrt(ref_squared_sum) segment_norms = torch.sqrt(segment_squared_sums) # 避免除零 epsilon = 1e-8 normalization_factor = ref_norm * segment_norms + epsilon # 归一化互相关 correlation_scores = dot_products / normalization_factor best_offset = torch.argmax(correlation_scores).item() return best_offset, correlation_scores import time def test_stream( gpt_path, vits_path, version, ref_audio_path, ref_text, output_path, device="cpu", is_half=True, ): if export_torch_script.sv_cn_model == None: init_sv_cn(device,is_half) ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float() ssl = SSLModel() print(f"device: {device}") ref_seq_id, ref_bert_T, ref_norm_text = get_phones_and_bert( ref_text, "all_zh", "v2" ) ref_seq = torch.LongTensor([ref_seq_id]).to(device) ref_bert = ref_bert_T.T if is_half: ref_bert = ref_bert.half() ref_bert = ref_bert.to(ref_seq.device) text_seq_id, text_bert_T, norm_text = get_phones_and_bert( "这是一个简单的示例,真没想到这么简单就完成了,真的神奇,接下来我们说说狐狸,可能这就是狐狸吧.它有长长的尾巴,尖尖的耳朵,传说中还有九条尾巴。你觉得狐狸神奇吗?", "auto", "v2" ) text_seq = torch.LongTensor([text_seq_id]).to(device) text_bert = text_bert_T.T if is_half: text_bert = text_bert.half() text_bert = text_bert.to(text_seq.device) ssl_content = ssl(ref_audio) if is_half: ssl_content = ssl_content.half() ssl_content = ssl_content.to(device) sv_model = ExportERes2NetV2(export_torch_script.sv_cn_model) # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth" vits = VitsModel(vits_path, version,is_half=is_half,device=device) vits.eval() # gpt_path = "GPT_weights_v2/xw-e15.ckpt" # dict_s1 = torch.load(gpt_path, map_location=device) dict_s1 = torch.load(gpt_path, weights_only=False) raw_t2s = get_raw_t2s_model(dict_s1).to(device) print("#### get_raw_t2s_model ####") print(raw_t2s.config) if is_half: raw_t2s = raw_t2s.half() t2s_m = T2SModel(raw_t2s) t2s_m.eval() # t2s = torch.jit.script(t2s_m).to(device) t2s = t2s_m print("#### script t2s_m ####") print("vits.hps.data.sampling_rate:", vits.hps.data.sampling_rate) stream_t2s = StreamT2SModel(t2s).to(device) stream_t2s = torch.jit.script(stream_t2s) ref_audio_sr = resamplex(ref_audio, 16000, 32000) if is_half: ref_audio_sr = ref_audio_sr.half() ref_audio_sr = ref_audio_sr.to(device) top_k = 15 codes = vits.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompts = prompt_semantic.unsqueeze(0) audio_16k = resamplex(ref_audio_sr, 32000, 16000).to(ref_audio_sr.dtype) sv_emb = sv_model(audio_16k) print("text_seq",text_seq.shape) refer = spectrogram_torch( vits.hann_window, ref_audio_sr, vits.hps.data.filter_length, vits.hps.data.sampling_rate, vits.hps.data.hop_length, vits.hps.data.win_length, center=False, ) st = time.time() et = time.time() y_len, y, xy_pos, k_cache, v_cache = stream_t2s.pre_infer(prompts, ref_seq, text_seq, ref_bert, text_bert, top_k) idx = 1 last_idx = 0 audios = [] raw_audios = [] last_audio_ret = None offset_index = [] full_audios = [] print("y.shape:", y.shape) cut_id = 0 while True: y, xy_pos, last_token, k_cache, v_cache = stream_t2s(idx, top_k, y_len, y, xy_pos, k_cache, v_cache) # print("y.shape:", y.shape) stop = last_token==t2s.EOS print('idx:',idx , 'y.shape:', y.shape, y.shape[1]-idx) if last_token < 50 and idx-last_idx > (len(audios)+1) * 25 and idx > cut_id: cut_id = idx + 7 print('trigger:',idx, last_idx, y[:,-idx+last_idx:], y[:,-idx+last_idx:].shape) # y = torch.cat([y, y[:,-1:]], dim=1) # idx+=1 if stop : idx -=1 print('stop') print(idx, y[:,-idx+last_idx:]) print(idx,last_idx, y.shape) print(y[:,-idx:-idx+20]) # 玄学这档子事说不清楚 if idx == cut_id or stop: print(f"idx: {idx}, last_idx: {last_idx}, cut_id: {cut_id}, stop: {stop}") audio = vits.vq_model(y[:,-idx:].unsqueeze(0), text_seq, refer, speed=1.0, sv_emb=sv_emb)[0, 0] full_audios.append(audio) if last_idx == 0: last_audio_ret = audio[-1280*8:-1280*8+256] audio = audio[:-1280*8] raw_audios.append(audio) et = time.time() else: if stop: audio_ = audio[last_idx*1280 -1280*8:] raw_audios.append(audio_) i, x = find_best_audio_offset_fast(last_audio_ret, audio_[:1280]) offset_index.append(i) audio = audio_[i:] else: audio_ = audio[last_idx*1280 -1280*8:-1280*8] raw_audios.append(audio_) i, x = find_best_audio_offset_fast(last_audio_ret, audio_[:1280]) offset_index.append(i) last_audio_ret = audio[-1280*8:-1280*8+256] audio = audio_[i:] last_idx = idx # print(f'write {output_path}/out_{audio_index}') # soundfile.write(f"{output_path}/out_{audio_index}.wav", audio.float().detach().cpu().numpy(), 32000) audios.append(audio) # print(idx,'/',1500 , y.shape, y[0,-1].item(), stop) if idx>1500: break if stop: break idx+=1 at = time.time() for (i,a) in enumerate(audios): print(f'write {output_path}/out_{i}') soundfile.write(f"{output_path}/out_{i}.wav", a.float().detach().cpu().numpy(), 32000) print(f"frist token: {et - st:.4f} seconds") print(f"all token: {at - st:.4f} seconds") audio = vits.vq_model(y[:,-idx:].unsqueeze(0), text_seq, refer, speed=1.0, sv_emb=sv_emb)[0, 0] soundfile.write(f"{output_path}/out_final.wav", audio.float().detach().cpu().numpy(), 32000) audio = torch.cat(audios, dim=0) soundfile.write(f"{output_path}/out.wav", audio.float().detach().cpu().numpy(), 32000) audio_raw = torch.cat(raw_audios, dim=0) soundfile.write(f"{output_path}/out.raw.wav", audio_raw.float().detach().cpu().numpy(), 32000) colors = ['red', 'green', 'blue', 'orange', 'purple', 'cyan', 'magenta', 'yellow'] max_duration = full_audios[-1].shape[0] plt.xlim(0, max_duration) last_line = 0 for i,a in enumerate(full_audios): plt.plot((a+2.0*i).float().detach().cpu().numpy(), color=colors[i], alpha=0.5, label=f"Audio {i}") # plt.axvline(x=last_line, color=colors[i], linestyle='--') last_line = a.shape[0]-8*1280 plt.axvline(x=last_line, color=colors[i], linestyle='--') plt.plot((audio-2.0).float().detach().cpu().numpy(), color='black', label='Final Audio') plt.plot((audio_raw-4.0).float().detach().cpu().numpy(), color='cyan', label='Raw Audio') print("offset_index:", offset_index) plt.show() def export_prov2( gpt_path, vits_path, version, ref_audio_path, ref_text, output_path, device="cpu", is_half=True, lang="auto", ): if export_torch_script.sv_cn_model == None: init_sv_cn(device,is_half) ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float() ssl = SSLModel() print(f"device: {device}") ref_seq_id, ref_bert_T, ref_norm_text = get_phones_and_bert( ref_text, lang, "v2" ) ref_seq = torch.LongTensor([ref_seq_id]).to(device) ref_bert = ref_bert_T.T if is_half: ref_bert = ref_bert.half() ref_bert = ref_bert.to(ref_seq.device) text_seq_id, text_bert_T, norm_text = get_phones_and_bert( "这是一个简单的示例,真没想到这么简单就完成了.The King and His Stories.Once there was a king.He likes to write stories, but his stories were not good.", "auto", "v2" ) text_seq = torch.LongTensor([text_seq_id]).to(device) text_bert = text_bert_T.T if is_half: text_bert = text_bert.half() text_bert = text_bert.to(text_seq.device) ssl_content = ssl(ref_audio) if is_half: ssl_content = ssl_content.half() ssl_content = ssl_content.to(device) sv_model = ExportERes2NetV2(export_torch_script.sv_cn_model) # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth" vits = VitsModel(vits_path, version,is_half=is_half,device=device) vits.eval() vits = StepVitsModel(vits, sv_model) # gpt_path = "GPT_weights_v2/xw-e15.ckpt" # dict_s1 = torch.load(gpt_path, map_location=device) dict_s1 = torch.load(gpt_path, weights_only=False) raw_t2s = get_raw_t2s_model(dict_s1).to(device) print("#### get_raw_t2s_model ####") print(raw_t2s.config) if is_half: raw_t2s = raw_t2s.half() t2s_m = T2SModel(raw_t2s) t2s_m.eval() # t2s = torch.jit.script(t2s_m).to(device) t2s = t2s_m print("#### script t2s_m ####") print("vits.hps.data.sampling_rate:", vits.hps.data.sampling_rate) stream_t2s = StreamT2SModel(t2s).to(device) stream_t2s = torch.jit.script(stream_t2s) ref_audio_sr = resamplex(ref_audio, 16000, 32000) ref_audio_sr = ref_audio_sr.to(device) if is_half: ref_audio_sr = ref_audio_sr.half() top_k = 15 prompts = vits.extract_latent(ssl_content) audio_16k = resamplex(ref_audio_sr, 32000, 16000).to(ref_audio_sr.dtype) sv_emb = sv_model(audio_16k) print("text_seq",text_seq.shape) # torch.jit.trace() refer,sv_emb = vits.ref_handle(ref_audio_sr) st = time.time() et = time.time() y_len, y, xy_pos, k_cache, v_cache = stream_t2s.pre_infer(prompts, ref_seq, text_seq, ref_bert, text_bert, top_k) idx = 1 print("y.shape:", y.shape) while True: y, xy_pos, last_token, k_cache, v_cache = stream_t2s(idx, top_k, y_len, y, xy_pos, k_cache, v_cache) # print("y.shape:", y.shape) idx+=1 # print(idx,'/',1500 , y.shape, y[0,-1].item(), stop) if idx>1500: break if last_token == t2s.EOS: break at = time.time() print("EOS:",t2s.EOS) print(f"frist token: {et - st:.4f} seconds") print(f"all token: {at - st:.4f} seconds") print("sv_emb", sv_emb.shape) print("refer",refer.shape) y = y[:,-idx:].unsqueeze(0) print("y", y.shape) audio = vits(y, text_seq, refer, sv_emb) soundfile.write(f"{output_path}/out_final.wav", audio.float().detach().cpu().numpy(), 32000) torch._dynamo.mark_dynamic(ssl_content, 2) torch._dynamo.mark_dynamic(ref_audio_sr, 1) torch._dynamo.mark_dynamic(ref_seq, 1) torch._dynamo.mark_dynamic(text_seq, 1) torch._dynamo.mark_dynamic(ref_bert, 0) torch._dynamo.mark_dynamic(text_bert, 0) torch._dynamo.mark_dynamic(refer, 2) torch._dynamo.mark_dynamic(y, 2) inputs = { "forward": (y, text_seq, refer, sv_emb), "extract_latent": ssl_content, "ref_handle": ref_audio_sr, } stream_t2s.save(f"{output_path}/t2s.pt") torch.jit.trace_module(vits, inputs=inputs, optimize=True).save(f"{output_path}/vits.pt") torch.jit.script(find_best_audio_offset_fast, optimize=True).save(f"{output_path}/find_best_audio_offset_fast.pt") import argparse import os if __name__ == "__main__": parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool") parser.add_argument("--gpt_model", required=True, help="Path to the GPT model file") parser.add_argument( "--sovits_model", required=True, help="Path to the SoVITS model file" ) parser.add_argument( "--ref_audio", required=True, help="Path to the reference audio file" ) parser.add_argument( "--ref_text", required=True, help="Path to the reference text file" ) parser.add_argument( "--output_path", required=True, help="Path to the output directory" ) parser.add_argument("--device", help="Device to use", default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--version", help="version of the model", default="v2Pro") parser.add_argument("--no-half", action="store_true", help = "Do not use half precision for model weights") parser.add_argument("--lang", default="auto", help="Language for text processing (default: auto)") args = parser.parse_args() if not os.path.exists(args.output_path): os.makedirs(args.output_path) is_half = not args.no_half with torch.no_grad(): export_prov2( gpt_path=args.gpt_model, vits_path=args.sovits_model, version=args.version, ref_audio_path=args.ref_audio, ref_text=args.ref_text, output_path=args.output_path, device=args.device, is_half=is_half, lang=args.lang, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/s2_train_v3_lora.py
GPT_SoVITS/s2_train_v3_lora.py
import warnings warnings.filterwarnings("ignore") import os import utils hps = utils.get_hparams(stage=2) os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.cuda.amp import GradScaler, autocast from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("h5py").setLevel(logging.INFO) logging.getLogger("numba").setLevel(logging.INFO) from collections import OrderedDict as od from random import randint from module import commons from module.data_utils import ( DistributedBucketSampler, TextAudioSpeakerCollateV3, TextAudioSpeakerLoaderV3, TextAudioSpeakerCollateV4, TextAudioSpeakerLoaderV4, ) from module.models import ( SynthesizerTrnV3 as SynthesizerTrn, ) from peft import LoraConfig, get_peft_model from process_ckpt import savee torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = False ###反正A100fp32更快,那试试tf32吧 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 # from config import pretrained_s2G,pretrained_s2D global_step = 0 device = "cpu" # cuda以外的设备,等mps优化后加入 def main(): if torch.cuda.is_available(): n_gpus = torch.cuda.device_count() else: n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): global global_step, no_grad_names, save_root, lora_rank if rank == 0: logger = utils.get_logger(hps.data.exp_dir) logger.info(hps) # utils.check_git_hash(hps.s2_ckpt_dir) writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) dist.init_process_group( backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", init_method="env://?use_libuv=False", world_size=n_gpus, rank=rank, ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) TextAudioSpeakerLoader = TextAudioSpeakerLoaderV3 if hps.model.version == "v3" else TextAudioSpeakerLoaderV4 TextAudioSpeakerCollate = TextAudioSpeakerCollateV3 if hps.model.version == "v3" else TextAudioSpeakerCollateV4 train_dataset = TextAudioSpeakerLoader(hps.data) ######## train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [ 32, 300, 400, 500, 600, 700, 800, 900, 1000, # 1100, # 1200, # 1300, # 1400, # 1500, # 1600, # 1700, # 1800, # 1900, ], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=5, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=3, ) save_root = "%s/logs_s2_%s_lora_%s" % (hps.data.exp_dir, hps.model.version, hps.train.lora_rank) os.makedirs(save_root, exist_ok=True) lora_rank = int(hps.train.lora_rank) lora_config = LoraConfig( target_modules=["to_k", "to_q", "to_v", "to_out.0"], r=lora_rank, lora_alpha=lora_rank, init_lora_weights=True, ) def get_model(hps): return SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ) def get_optim(net_g): return torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), ###默认所有层lr一致 hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) def model2cuda(net_g, rank): if torch.cuda.is_available(): net_g = DDP(net_g.cuda(rank), device_ids=[rank], find_unused_parameters=True) else: net_g = net_g.to(device) return net_g try: # 如果能加载自动resume net_g = get_model(hps) net_g.cfm = get_peft_model(net_g.cfm, lora_config) net_g = model2cuda(net_g, rank) optim_g = get_optim(net_g) # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(save_root, "G_*.pth"), net_g, optim_g, ) epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) except: # 如果首次不能加载,加载pretrain # traceback.print_exc() epoch_str = 1 global_step = 0 net_g = get_model(hps) if ( hps.train.pretrained_s2G != "" and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) print( "loaded pretrained %s" % hps.train.pretrained_s2G, net_g.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ), ) net_g.cfm = get_peft_model(net_g.cfm, lora_config) net_g = model2cuda(net_g, rank) optim_g = get_optim(net_g) no_grad_names = set() for name, param in net_g.named_parameters(): if not param.requires_grad: no_grad_names.add(name.replace("module.", "")) # print(name, "not requires_grad") # print(no_grad_names) # os._exit(233333) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=-1) for _ in range(epoch_str): scheduler_g.step() scaler = GradScaler(enabled=hps.train.fp16_run) net_d = optim_d = scheduler_d = None print("start training from epoch %s" % epoch_str) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, # [train_loader, eval_loader], logger, [writer, writer_eval]) [train_loader, None], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None, ) scheduler_g.step() print("training done") def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): net_g, net_d = nets optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( tqdm(train_loader) ): if torch.cuda.is_available(): spec, spec_lengths = ( spec.cuda( rank, non_blocking=True, ), spec_lengths.cuda( rank, non_blocking=True, ), ) mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(rank, non_blocking=True) ssl = ssl.cuda(rank, non_blocking=True) ssl.requires_grad = False text, text_lengths = ( text.cuda( rank, non_blocking=True, ), text_lengths.cuda( rank, non_blocking=True, ), ) else: spec, spec_lengths = spec.to(device), spec_lengths.to(device) mel, mel_lengths = mel.to(device), mel_lengths.to(device) ssl = ssl.to(device) ssl.requires_grad = False text, text_lengths = text.to(device), text_lengths.to(device) with autocast(enabled=hps.train.fp16_run): cfm_loss = net_g( ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths, use_grad_ckpt=hps.train.grad_ckpt, ) loss_gen_all = cfm_loss optim_g.zero_grad() scaler.scale(loss_gen_all).backward() scaler.unscale_(optim_g) grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) scaler.step(optim_g) scaler.update() if rank == 0: if global_step % hps.train.log_interval == 0: lr = optim_g.param_groups[0]["lr"] losses = [cfm_loss] logger.info("Train Epoch: {} [{:.0f}%]".format(epoch, 100.0 * batch_idx / len(train_loader))) logger.info([x.item() for x in losses] + [global_step, lr]) scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} utils.summarize( writer=writer, global_step=global_step, scalars=scalar_dict, ) global_step += 1 if epoch % hps.train.save_every_epoch == 0 and rank == 0: if hps.train.if_save_latest == 0: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(save_root, "G_{}.pth".format(global_step)), ) else: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(save_root, "G_{}.pth".format(233333333333)), ) if rank == 0 and hps.train.if_save_every_weights == True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() sim_ckpt = od() for key in ckpt: # if "cfm"not in key: # print(key) if key not in no_grad_names: sim_ckpt[key] = ckpt[key].half().cpu() logger.info( "saving ckpt %s_e%s:%s" % ( hps.name, epoch, savee( sim_ckpt, hps.name + "_e%s_s%s_l%s" % (epoch, global_step, lora_rank), epoch, global_step, hps, model_version=hps.model.version, lora_rank=lora_rank, ), ) ) if rank == 0: logger.info("====> Epoch: {}".format(epoch)) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/s2_train.py
GPT_SoVITS/s2_train.py
import warnings warnings.filterwarnings("ignore") import os import utils hps = utils.get_hparams(stage=2) os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.cuda.amp import GradScaler, autocast from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("h5py").setLevel(logging.INFO) logging.getLogger("numba").setLevel(logging.INFO) from random import randint from module import commons from module.data_utils import ( DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader, ) from module.losses import discriminator_loss, feature_loss, generator_loss, kl_loss from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch from module.models import ( MultiPeriodDiscriminator, SynthesizerTrn, ) from process_ckpt import savee torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = False ###反正A100fp32更快,那试试tf32吧 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 # from config import pretrained_s2G,pretrained_s2D global_step = 0 device = "cpu" # cuda以外的设备,等mps优化后加入 def main(): if torch.cuda.is_available(): n_gpus = torch.cuda.device_count() else: n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): global global_step if rank == 0: logger = utils.get_logger(hps.data.exp_dir) logger.info(hps) # utils.check_git_hash(hps.s2_ckpt_dir) writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) dist.init_process_group( backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", init_method="env://?use_libuv=False", world_size=n_gpus, rank=rank, ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data, version=hps.model.version) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [ 32, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, ], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate(version=hps.model.version) train_loader = DataLoader( train_dataset, num_workers=5, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=3, ) # if rank == 0: # eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True) # eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, # batch_size=1, pin_memory=True, # drop_last=False, collate_fn=collate_fn) net_g = ( SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ).to(device) ) net_d = ( MultiPeriodDiscriminator(hps.model.use_spectral_norm, version=hps.model.version).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm, version=hps.model.version).to(device) ) for name, param in net_g.named_parameters(): if not param.requires_grad: print(name, "not requires_grad") te_p = list(map(id, net_g.enc_p.text_embedding.parameters())) et_p = list(map(id, net_g.enc_p.encoder_text.parameters())) mrte_p = list(map(id, net_g.enc_p.mrte.parameters())) base_params = filter( lambda p: id(p) not in te_p + et_p + mrte_p and p.requires_grad, net_g.parameters(), ) # te_p=net_g.enc_p.text_embedding.parameters() # et_p=net_g.enc_p.encoder_text.parameters() # mrte_p=net_g.enc_p.mrte.parameters() optim_g = torch.optim.AdamW( # filter(lambda p: p.requires_grad, net_g.parameters()),###默认所有层lr一致 [ {"params": base_params, "lr": hps.train.learning_rate}, { "params": net_g.enc_p.text_embedding.parameters(), "lr": hps.train.learning_rate * hps.train.text_low_lr_rate, }, { "params": net_g.enc_p.encoder_text.parameters(), "lr": hps.train.learning_rate * hps.train.text_low_lr_rate, }, { "params": net_g.enc_p.mrte.parameters(), "lr": hps.train.learning_rate * hps.train.text_low_lr_rate, }, ], hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if torch.cuda.is_available(): net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) else: net_g = net_g.to(device) net_d = net_d.to(device) try: # 如果能加载自动resume _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "D_*.pth"), net_d, optim_d, ) # D多半加载没事 if rank == 0: logger.info("loaded D") # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_*.pth"), net_g, optim_g, ) epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) # epoch_str = 1 # global_step = 0 except: # 如果首次不能加载,加载pretrain # traceback.print_exc() epoch_str = 1 global_step = 0 if ( hps.train.pretrained_s2G != "" and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) print( "loaded pretrained %s" % hps.train.pretrained_s2G, net_g.module.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ) if torch.cuda.is_available() else net_g.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ), ) ##测试不加载优化器 if ( hps.train.pretrained_s2D != "" and hps.train.pretrained_s2D != None and os.path.exists(hps.train.pretrained_s2D) ): if rank == 0: logger.info("loaded pretrained %s" % hps.train.pretrained_s2D) print( "loaded pretrained %s" % hps.train.pretrained_s2D, net_d.module.load_state_dict( torch.load(hps.train.pretrained_s2D, map_location="cpu", weights_only=False)["weight"], strict=False ) if torch.cuda.is_available() else net_d.load_state_dict( torch.load(hps.train.pretrained_s2D, map_location="cpu", weights_only=False)["weight"], ), ) # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=-1, ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=-1, ) for _ in range(epoch_str): scheduler_g.step() scheduler_d.step() scaler = GradScaler(enabled=hps.train.fp16_run) print("start training from epoch %s" % epoch_str) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, # [train_loader, eval_loader], logger, [writer, writer_eval]) [train_loader, None], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() print("training done") def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): net_g, net_d = nets optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() for batch_idx, data in enumerate(tqdm(train_loader)): if hps.model.version in {"v2Pro", "v2ProPlus"}: ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, sv_emb = data else: ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths = data if torch.cuda.is_available(): spec, spec_lengths = ( spec.cuda( rank, non_blocking=True, ), spec_lengths.cuda( rank, non_blocking=True, ), ) y, y_lengths = ( y.cuda( rank, non_blocking=True, ), y_lengths.cuda( rank, non_blocking=True, ), ) ssl = ssl.cuda(rank, non_blocking=True) ssl.requires_grad = False # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) text, text_lengths = ( text.cuda( rank, non_blocking=True, ), text_lengths.cuda( rank, non_blocking=True, ), ) if hps.model.version in {"v2Pro", "v2ProPlus"}: sv_emb = sv_emb.cuda(rank, non_blocking=True) else: spec, spec_lengths = spec.to(device), spec_lengths.to(device) y, y_lengths = y.to(device), y_lengths.to(device) ssl = ssl.to(device) ssl.requires_grad = False # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) text, text_lengths = text.to(device), text_lengths.to(device) if hps.model.version in {"v2Pro", "v2ProPlus"}: sv_emb = sv_emb.to(device) with autocast(enabled=hps.train.fp16_run): if hps.model.version in {"v2Pro", "v2ProPlus"}: (y_hat, kl_ssl, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl) = net_g( ssl, spec, spec_lengths, text, text_lengths, sv_emb ) else: ( y_hat, kl_ssl, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), stats_ssl, ) = net_g(ssl, spec, spec_lengths, text, text_lengths) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g, ) loss_disc_all = loss_disc optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) with autocast(enabled=False): loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g) loss_gen, losses_gen = generator_loss(y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + kl_ssl * 1 + loss_kl optim_g.zero_grad() scaler.scale(loss_gen_all).backward() scaler.unscale_(optim_g) grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) scaler.step(optim_g) scaler.update() if rank == 0: if global_step % hps.train.log_interval == 0: lr = optim_g.param_groups[0]["lr"] losses = [loss_disc, loss_gen, loss_fm, loss_mel, kl_ssl, loss_kl] logger.info( "Train Epoch: {} [{:.0f}%]".format( epoch, 100.0 * batch_idx / len(train_loader), ) ) logger.info([x.item() for x in losses] + [global_step, lr]) scalar_dict = { "loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g, } scalar_dict.update( { "loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl_ssl": kl_ssl, "loss/g/kl": loss_kl, } ) # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) image_dict = None try: ###Some people installed the wrong version of matplotlib. image_dict = { "slice/mel_org": utils.plot_spectrogram_to_numpy( y_mel[0].data.cpu().numpy(), ), "slice/mel_gen": utils.plot_spectrogram_to_numpy( y_hat_mel[0].data.cpu().numpy(), ), "all/mel": utils.plot_spectrogram_to_numpy( mel[0].data.cpu().numpy(), ), "all/stats_ssl": utils.plot_spectrogram_to_numpy( stats_ssl[0].data.cpu().numpy(), ), } except: pass if image_dict: utils.summarize( writer=writer, global_step=global_step, images=image_dict, scalars=scalar_dict, ) else: utils.summarize( writer=writer, global_step=global_step, scalars=scalar_dict, ) global_step += 1 if epoch % hps.train.save_every_epoch == 0 and rank == 0: if hps.train.if_save_latest == 0: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_{}.pth".format(global_step), ), ) utils.save_checkpoint( net_d, optim_d, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "D_{}.pth".format(global_step), ), ) else: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_{}.pth".format(233333333333), ), ) utils.save_checkpoint( net_d, optim_d, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "D_{}.pth".format(233333333333), ), ) if rank == 0 and hps.train.if_save_every_weights == True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() logger.info( "saving ckpt %s_e%s:%s" % ( hps.name, epoch, savee( ckpt, hps.name + "_e%s_s%s" % (epoch, global_step), epoch, global_step, hps, model_version=None if hps.model.version not in {"v2Pro", "v2ProPlus"} else hps.model.version, ), ) ) if rank == 0: logger.info("====> Epoch: {}".format(epoch)) def evaluate(hps, generator, eval_loader, writer_eval): generator.eval() image_dict = {} audio_dict = {} print("Evaluating ...") with torch.no_grad(): for batch_idx, ( ssl, ssl_lengths, spec, spec_lengths, y, y_lengths, text, text_lengths, ) in enumerate(eval_loader): print(111) if torch.cuda.is_available(): spec, spec_lengths = spec.cuda(), spec_lengths.cuda() y, y_lengths = y.cuda(), y_lengths.cuda() ssl = ssl.cuda() text, text_lengths = text.cuda(), text_lengths.cuda() else: spec, spec_lengths = spec.to(device), spec_lengths.to(device) y, y_lengths = y.to(device), y_lengths.to(device) ssl = ssl.to(device) text, text_lengths = text.to(device), text_lengths.to(device) for test in [0, 1]: y_hat, mask, *_ = ( generator.module.infer( ssl, spec, spec_lengths, text, text_lengths, test=test, ) if torch.cuda.is_available() else generator.infer( ssl, spec, spec_lengths, text, text_lengths, test=test, ) ) y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) image_dict.update( { f"gen/mel_{batch_idx}_{test}": utils.plot_spectrogram_to_numpy( y_hat_mel[0].cpu().numpy(), ), } ) audio_dict.update( { f"gen/audio_{batch_idx}_{test}": y_hat[0, :, : y_hat_lengths[0]], }, ) image_dict.update( { f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()), }, ) audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, : y_lengths[0]]}) # y_hat, mask, *_ = generator.module.infer(ssl, spec_lengths, speakers, y=None) # audio_dict.update({ # f"gen/audio_{batch_idx}_style_pred": y_hat[0, :, :] # }) utils.summarize( writer=writer_eval, global_step=global_step, images=image_dict, audios=audio_dict, audio_sampling_rate=hps.data.sampling_rate, ) generator.train() if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/s2_train_v3.py
GPT_SoVITS/s2_train_v3.py
import warnings warnings.filterwarnings("ignore") import os import utils hps = utils.get_hparams(stage=2) os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",") import logging import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.cuda.amp import GradScaler, autocast from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("h5py").setLevel(logging.INFO) logging.getLogger("numba").setLevel(logging.INFO) from random import randint from module import commons from module.data_utils import ( DistributedBucketSampler, ) from module.data_utils import ( TextAudioSpeakerCollateV3 as TextAudioSpeakerCollate, ) from module.data_utils import ( TextAudioSpeakerLoaderV3 as TextAudioSpeakerLoader, ) from module.models import ( SynthesizerTrnV3 as SynthesizerTrn, ) from process_ckpt import savee torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = False ###反正A100fp32更快,那试试tf32吧 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响 # from config import pretrained_s2G,pretrained_s2D global_step = 0 device = "cpu" # cuda以外的设备,等mps优化后加入 def main(): if torch.cuda.is_available(): n_gpus = torch.cuda.device_count() else: n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): global global_step if rank == 0: logger = utils.get_logger(hps.data.exp_dir) logger.info(hps) # utils.check_git_hash(hps.s2_ckpt_dir) writer = SummaryWriter(log_dir=hps.s2_ckpt_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) dist.init_process_group( backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", init_method="env://?use_libuv=False", world_size=n_gpus, rank=rank, ) torch.manual_seed(hps.train.seed) if torch.cuda.is_available(): torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data) ######## train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [ 32, 300, 400, 500, 600, 700, 800, 900, 1000, # 1100, # 1200, # 1300, # 1400, # 1500, # 1600, # 1700, # 1800, # 1900, ], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=5, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=3, ) # if rank == 0: # eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True) # eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, # batch_size=1, pin_memory=True, # drop_last=False, collate_fn=collate_fn) net_g = ( SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, ).to(device) ) # net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device) # for name, param in net_g.named_parameters(): # if not param.requires_grad: # print(name, "not requires_grad") optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), ###默认所有层lr一致 hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) # optim_d = torch.optim.AdamW( # net_d.parameters(), # hps.train.learning_rate, # betas=hps.train.betas, # eps=hps.train.eps, # ) if torch.cuda.is_available(): net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) else: net_g = net_g.to(device) # net_d = net_d.to(device) try: # 如果能加载自动resume # _, _, _, epoch_str = utils.load_checkpoint( # utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_*.pth"), # net_d, # optim_d, # ) # D多半加载没事 # if rank == 0: # logger.info("loaded D") # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) _, _, _, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_*.pth"), net_g, optim_g, ) epoch_str += 1 global_step = (epoch_str - 1) * len(train_loader) # epoch_str = 1 # global_step = 0 except: # 如果首次不能加载,加载pretrain # traceback.print_exc() epoch_str = 1 global_step = 0 if ( hps.train.pretrained_s2G != "" and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G) ): if rank == 0: logger.info("loaded pretrained %s" % hps.train.pretrained_s2G) print( "loaded pretrained %s" % hps.train.pretrained_s2G, net_g.module.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ) if torch.cuda.is_available() else net_g.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu", weights_only=False)["weight"], strict=False, ), ) ##测试不加载优化器 # if hps.train.pretrained_s2D != ""and hps.train.pretrained_s2D != None and os.path.exists(hps.train.pretrained_s2D): # if rank == 0: # logger.info("loaded pretrained %s" % hps.train.pretrained_s2D) # print( # net_d.module.load_state_dict( # torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] # ) if torch.cuda.is_available() else net_d.load_state_dict( # torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] # ) # ) # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=-1) # scheduler_d = torch.optim.lr_scheduler.ExponentialLR( # optim_d, gamma=hps.train.lr_decay, last_epoch=-1 # ) for _ in range(epoch_str): scheduler_g.step() # scheduler_d.step() scaler = GradScaler(enabled=hps.train.fp16_run) net_d = optim_d = scheduler_d = None print("start training from epoch %s" % epoch_str) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, # [train_loader, eval_loader], logger, [writer, writer_eval]) [train_loader, None], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None, ) scheduler_g.step() # scheduler_d.step() print("training done") def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d = nets optim_g, optim_d = optims # scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() # net_d.train() # for batch_idx, ( # ssl, # ssl_lengths, # spec, # spec_lengths, # y, # y_lengths, # text, # text_lengths, # ) in enumerate(tqdm(train_loader)): for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate( tqdm(train_loader) ): if torch.cuda.is_available(): spec, spec_lengths = ( spec.cuda( rank, non_blocking=True, ), spec_lengths.cuda( rank, non_blocking=True, ), ) mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(rank, non_blocking=True) ssl = ssl.cuda(rank, non_blocking=True) ssl.requires_grad = False # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) text, text_lengths = ( text.cuda( rank, non_blocking=True, ), text_lengths.cuda( rank, non_blocking=True, ), ) else: spec, spec_lengths = spec.to(device), spec_lengths.to(device) mel, mel_lengths = mel.to(device), mel_lengths.to(device) ssl = ssl.to(device) ssl.requires_grad = False # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) text, text_lengths = text.to(device), text_lengths.to(device) with autocast(enabled=hps.train.fp16_run): cfm_loss = net_g( ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths, use_grad_ckpt=hps.train.grad_ckpt, ) loss_gen_all = cfm_loss optim_g.zero_grad() scaler.scale(loss_gen_all).backward() scaler.unscale_(optim_g) grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) scaler.step(optim_g) scaler.update() if rank == 0: if global_step % hps.train.log_interval == 0: lr = optim_g.param_groups[0]["lr"] # losses = [commit_loss,cfm_loss,mel_loss,loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] losses = [cfm_loss] logger.info( "Train Epoch: {} [{:.0f}%]".format( epoch, 100.0 * batch_idx / len(train_loader), ) ) logger.info([x.item() for x in losses] + [global_step, lr]) scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g} # image_dict = { # "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), # "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), # "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), # "all/stats_ssl": utils.plot_spectrogram_to_numpy(stats_ssl[0].data.cpu().numpy()), # } utils.summarize( writer=writer, global_step=global_step, # images=image_dict, scalars=scalar_dict, ) # if global_step % hps.train.eval_interval == 0: # # evaluate(hps, net_g, eval_loader, writer_eval) # utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "G_{}.pth".format(global_step)),scaler) # # utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "D_{}.pth".format(global_step)),scaler) # # keep_ckpts = getattr(hps.train, 'keep_ckpts', 3) # # if keep_ckpts > 0: # # utils.clean_checkpoints(path_to_models=hps.s2_ckpt_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) global_step += 1 if epoch % hps.train.save_every_epoch == 0 and rank == 0: if hps.train.if_save_latest == 0: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_{}.pth".format(global_step), ), ) # utils.save_checkpoint( # net_d, # optim_d, # hps.train.learning_rate, # epoch, # os.path.join( # "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(global_step) # ), # ) else: utils.save_checkpoint( net_g, optim_g, hps.train.learning_rate, epoch, os.path.join( "%s/logs_s2_%s" % (hps.data.exp_dir, hps.model.version), "G_{}.pth".format(233333333333), ), ) # utils.save_checkpoint( # net_d, # optim_d, # hps.train.learning_rate, # epoch, # os.path.join( # "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(233333333333) # ), # ) if rank == 0 and hps.train.if_save_every_weights == True: if hasattr(net_g, "module"): ckpt = net_g.module.state_dict() else: ckpt = net_g.state_dict() logger.info( "saving ckpt %s_e%s:%s" % ( hps.name, epoch, savee( ckpt, hps.name + "_e%s_s%s" % (epoch, global_step), epoch, global_step, hps, ), ) ) if rank == 0: logger.info("====> Epoch: {}".format(epoch)) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/utils.py
GPT_SoVITS/utils.py
import argparse import glob import json import logging import os import subprocess import sys import traceback import librosa import numpy as np import torch logging.getLogger("numba").setLevel(logging.ERROR) logging.getLogger("matplotlib").setLevel(logging.ERROR) logging.getLogger("httpx").setLevel(logging.ERROR) MATPLOTLIB_FLAG = False logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): assert os.path.isfile(checkpoint_path) checkpoint_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=False) iteration = checkpoint_dict["iteration"] learning_rate = checkpoint_dict["learning_rate"] if optimizer is not None and not skip_optimizer and checkpoint_dict["optimizer"] is not None: optimizer.load_state_dict(checkpoint_dict["optimizer"]) saved_state_dict = checkpoint_dict["model"] if hasattr(model, "module"): state_dict = model.module.state_dict() else: state_dict = model.state_dict() new_state_dict = {} for k, v in state_dict.items(): try: # assert "quantizer" not in k # print("load", k) new_state_dict[k] = saved_state_dict[k] assert saved_state_dict[k].shape == v.shape, ( saved_state_dict[k].shape, v.shape, ) except: traceback.print_exc() print("error, %s is not in the checkpoint" % k) # shape不对也会,比如text_embedding当cleaner修改时 new_state_dict[k] = v if hasattr(model, "module"): model.module.load_state_dict(new_state_dict) else: model.load_state_dict(new_state_dict) print("load ") logger.info( "Loaded checkpoint '{}' (iteration {})".format( checkpoint_path, iteration, ) ) return model, optimizer, learning_rate, iteration import shutil from time import time as ttime def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path dir = os.path.dirname(path) name = os.path.basename(path) tmp_path = "%s.pth" % (ttime()) torch.save(fea, tmp_path) shutil.move(tmp_path, "%s/%s" % (dir, name)) def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): logger.info("Saving model and optimizer state at iteration {} to {}".format(iteration, checkpoint_path)) if hasattr(model, "module"): state_dict = model.module.state_dict() else: state_dict = model.state_dict() # torch.save( my_save( { "model": state_dict, "iteration": iteration, "optimizer": optimizer.state_dict(), "learning_rate": learning_rate, }, checkpoint_path, ) def summarize( writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050, ): for k, v in scalars.items(): writer.add_scalar(k, v, global_step) for k, v in histograms.items(): writer.add_histogram(k, v, global_step) for k, v in images.items(): writer.add_image(k, v, global_step, dataformats="HWC") for k, v in audios.items(): writer.add_audio(k, v, global_step, audio_sampling_rate) def latest_checkpoint_path(dir_path, regex="G_*.pth"): f_list = glob.glob(os.path.join(dir_path, regex)) f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) x = f_list[-1] print(x) return x def plot_spectrogram_to_numpy(spectrogram): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger("matplotlib") mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt fig, ax = plt.subplots(figsize=(10, 2)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") plt.colorbar(im, ax=ax) plt.xlabel("Frames") plt.ylabel("Channels") plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def plot_alignment_to_numpy(alignment, info=None): global MATPLOTLIB_FLAG if not MATPLOTLIB_FLAG: import matplotlib matplotlib.use("Agg") MATPLOTLIB_FLAG = True mpl_logger = logging.getLogger("matplotlib") mpl_logger.setLevel(logging.WARNING) import matplotlib.pylab as plt fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow( alignment.transpose(), aspect="auto", origin="lower", interpolation="none", ) fig.colorbar(im, ax=ax) xlabel = "Decoder timestep" if info is not None: xlabel += "\n\n" + info plt.xlabel(xlabel) plt.ylabel("Encoder timestep") plt.tight_layout() fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) plt.close() return data def load_wav_to_torch(full_path): data, sampling_rate = librosa.load(full_path, sr=None) return torch.FloatTensor(data), sampling_rate def load_filepaths_and_text(filename, split="|"): with open(filename, encoding="utf-8") as f: filepaths_and_text = [line.strip().split(split) for line in f] return filepaths_and_text def get_hparams(init=True, stage=1): parser = argparse.ArgumentParser() parser.add_argument( "-c", "--config", type=str, default="./configs/s2.json", help="JSON file for configuration", ) parser.add_argument("-p", "--pretrain", type=str, required=False, default=None, help="pretrain dir") parser.add_argument( "-rs", "--resume_step", type=int, required=False, default=None, help="resume step", ) # parser.add_argument('-e', '--exp_dir', type=str, required=False,default=None,help='experiment directory') # parser.add_argument('-g', '--pretrained_s2G', type=str, required=False,default=None,help='pretrained sovits gererator weights') # parser.add_argument('-d', '--pretrained_s2D', type=str, required=False,default=None,help='pretrained sovits discriminator weights') args = parser.parse_args() config_path = args.config with open(config_path, "r") as f: data = f.read() config = json.loads(data) hparams = HParams(**config) hparams.pretrain = args.pretrain hparams.resume_step = args.resume_step # hparams.data.exp_dir = args.exp_dir if stage == 1: model_dir = hparams.s1_ckpt_dir else: model_dir = hparams.s2_ckpt_dir config_save_path = os.path.join(model_dir, "config.json") if not os.path.exists(model_dir): os.makedirs(model_dir) with open(config_save_path, "w") as f: f.write(data) return hparams def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True): """Freeing up space by deleting saved ckpts Arguments: path_to_models -- Path to the model directory n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth sort_by_time -- True -> chronologically delete ckpts False -> lexicographically delete ckpts """ import re ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] name_key = lambda _f: int(re.compile("._(\d+)\.pth").match(_f).group(1)) time_key = lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)) sort_key = time_key if sort_by_time else name_key x_sorted = lambda _x: sorted( [f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")], key=sort_key, ) to_del = [ os.path.join(path_to_models, fn) for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep]) ] del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") del_routine = lambda x: [os.remove(x), del_info(x)] rs = [del_routine(fn) for fn in to_del] def get_hparams_from_dir(model_dir): config_save_path = os.path.join(model_dir, "config.json") with open(config_save_path, "r") as f: data = f.read() config = json.loads(data) hparams = HParams(**config) hparams.model_dir = model_dir return hparams def get_hparams_from_file(config_path): with open(config_path, "r") as f: data = f.read() config = json.loads(data) hparams = HParams(**config) return hparams def check_git_hash(model_dir): source_dir = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(os.path.join(source_dir, ".git")): logger.warning( "{} is not a git repository, therefore hash value comparison will be ignored.".format( source_dir, ) ) return cur_hash = subprocess.getoutput("git rev-parse HEAD") path = os.path.join(model_dir, "githash") if os.path.exists(path): saved_hash = open(path).read() if saved_hash != cur_hash: logger.warning( "git hash values are different. {}(saved) != {}(current)".format( saved_hash[:8], cur_hash[:8], ) ) else: open(path, "w").write(cur_hash) def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) h.setLevel(logging.INFO) h.setFormatter(formatter) logger.addHandler(h) return logger class HParams: def __init__(self, **kwargs): for k, v in kwargs.items(): if type(v) == dict: v = HParams(**v) self[k] = v def keys(self): return self.__dict__.keys() def items(self): return self.__dict__.items() def values(self): return self.__dict__.values() def __len__(self): return len(self.__dict__) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): return setattr(self, key, value) def __contains__(self, key): return key in self.__dict__ def __repr__(self): return self.__dict__.__repr__() if __name__ == "__main__": print( load_wav_to_torch( "/home/fish/wenetspeech/dataset_vq/Y0000022499_wHFSeHEx9CM/S00261.flac", ) )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/download.py
GPT_SoVITS/download.py
import os import sys now_dir = os.getcwd() sys.path.insert(0, now_dir) from text.g2pw import G2PWPinyin g2pw = G2PWPinyin( model_dir="GPT_SoVITS/text/G2PWModel", model_source="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", v_to_u=False, neutral_tone_with_five=True, )
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/export_torch_script.py
GPT_SoVITS/export_torch_script.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py # reference: https://github.com/lifeiteng/vall-e import argparse from io import BytesIO from typing import Optional from my_utils import load_audio import torch import torchaudio from torch import IntTensor, LongTensor, Tensor, nn from torch.nn import functional as F from transformers import AutoModelForMaskedLM, AutoTokenizer from feature_extractor import cnhubert from AR.models.t2s_lightning_module import Text2SemanticLightningModule from module.models_onnx import SynthesizerTrn from inference_webui import get_phones_and_bert from sv import SV import kaldi as Kaldi import os import soundfile default_config = { "embedding_dim": 512, "hidden_dim": 512, "num_head": 8, "num_layers": 12, "num_codebook": 8, "p_dropout": 0.0, "vocab_size": 1024 + 1, "phoneme_vocab_size": 512, "EOS": 1024, } sv_cn_model = None def init_sv_cn(device, is_half): global sv_cn_model sv_cn_model = SV(device, is_half) def load_sovits_new(sovits_path): f = open(sovits_path, "rb") meta = f.read(2) if meta != b"PK": data = b"PK" + f.read() bio = BytesIO() bio.write(data) bio.seek(0) return torch.load(bio, map_location="cpu", weights_only=False) return torch.load(sovits_path, map_location="cpu", weights_only=False) def get_raw_t2s_model(dict_s1) -> Text2SemanticLightningModule: config = dict_s1["config"] config["model"]["dropout"] = float(config["model"]["dropout"]) t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) t2s_model.load_state_dict(dict_s1["weight"]) t2s_model = t2s_model.eval() return t2s_model @torch.jit.script def logits_to_probs( logits, previous_tokens: Optional[torch.Tensor] = None, temperature: float = 1.0, top_k: Optional[int] = None, top_p: Optional[int] = None, repetition_penalty: float = 1.0, ): # if previous_tokens is not None: # previous_tokens = previous_tokens.squeeze() # print(logits.shape,previous_tokens.shape) # pdb.set_trace() if previous_tokens is not None and repetition_penalty != 1.0: previous_tokens = previous_tokens.long() score = torch.gather(logits, dim=1, index=previous_tokens) score = torch.where(score < 0, score * repetition_penalty, score / repetition_penalty) logits.scatter_(dim=1, index=previous_tokens, src=score) if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > top_p sorted_indices_to_remove[:, 0] = False # keep at least one option indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) logits = logits.masked_fill(indices_to_remove, -float("Inf")) logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) pivot = v[:, -1].unsqueeze(-1) logits = torch.where(logits < pivot, -float("Inf"), logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs @torch.jit.script def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization q = torch.empty_like(probs_sort).exponential_(1.0) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) @torch.jit.script def sample( logits, previous_tokens, temperature: float = 1.0, top_k: Optional[int] = None, top_p: Optional[int] = None, repetition_penalty: float = 1.35, ): probs = logits_to_probs( logits=logits, previous_tokens=previous_tokens, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, ) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs @torch.jit.script def spectrogram_torch( hann_window: Tensor, y: Tensor, n_fft: int, sampling_rate: int, hop_size: int, win_size: int, center: bool = False ): # hann_window = torch.hann_window(win_size, device=y.device, dtype=y.dtype) y = torch.nn.functional.pad( y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False, ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) return spec class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) for key, value in input_dict.items(): if isinstance(value, dict): value = DictToAttrRecursive(value) self[key] = value setattr(self, key, value) def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def __setattr__(self, key, value): if isinstance(value, dict): value = DictToAttrRecursive(value) super(DictToAttrRecursive, self).__setitem__(key, value) super().__setattr__(key, value) def __delattr__(self, item): try: del self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") @torch.jit.script class T2SMLP: def __init__(self, w1, b1, w2, b2): self.w1 = w1 self.b1 = b1 self.w2 = w2 self.b2 = b2 def forward(self, x): x = F.relu(F.linear(x, self.w1, self.b1)) x = F.linear(x, self.w2, self.b2) return x @torch.jit.script class T2SBlock: def __init__( self, num_heads: int, hidden_dim: int, mlp: T2SMLP, qkv_w, qkv_b, out_w, out_b, norm_w1, norm_b1, norm_eps1: float, norm_w2, norm_b2, norm_eps2: float, ): self.num_heads = num_heads self.mlp = mlp self.hidden_dim: int = hidden_dim self.qkv_w = qkv_w self.qkv_b = qkv_b self.out_w = out_w self.out_b = out_b self.norm_w1 = norm_w1 self.norm_b1 = norm_b1 self.norm_eps1 = norm_eps1 self.norm_w2 = norm_w2 self.norm_b2 = norm_b2 self.norm_eps2 = norm_eps2 self.false = torch.tensor(False, dtype=torch.bool) @torch.jit.ignore def to_mask(self, x: torch.Tensor, padding_mask: Optional[torch.Tensor]): if padding_mask is None: return x if padding_mask.dtype == torch.bool: return x.masked_fill(padding_mask, 0) else: return x * padding_mask def process_prompt(self, x: torch.Tensor, attn_mask: torch.Tensor, padding_mask: Optional[torch.Tensor] = None): q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1) batch_size = q.shape[0] q_len = q.shape[1] kv_len = k.shape[1] q = self.to_mask(q, padding_mask) k_cache = self.to_mask(k, padding_mask) v_cache = self.to_mask(v, padding_mask) q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2) k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask) # attn = attn.permute(2, 0, 1, 3).reshape(batch_size * q_len, self.hidden_dim) # attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0) attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1) attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b) x = x + attn x = F.layer_norm(x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1) x = x + self.mlp.forward(x) x = F.layer_norm( x, [self.hidden_dim], self.norm_w2, self.norm_b2, self.norm_eps2, ) return x, k_cache, v_cache def decode_next_token(self, x: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor): q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1) k_cache = torch.cat([k_cache, k], dim=1) v_cache = torch.cat([v_cache, v], dim=1) batch_size = q.shape[0] q_len = q.shape[1] kv_len = k_cache.shape[1] q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2) k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) attn = F.scaled_dot_product_attention(q, k, v) # attn = attn.permute(2, 0, 1, 3).reshape(batch_size * q_len, self.hidden_dim) # attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0) attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1) attn = F.linear(attn, self.out_w, self.out_b) x = x + attn x = F.layer_norm(x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1) x = x + self.mlp.forward(x) x = F.layer_norm( x, [self.hidden_dim], self.norm_w2, self.norm_b2, self.norm_eps2, ) return x, k_cache, v_cache @torch.jit.script class T2STransformer: def __init__(self, num_blocks: int, blocks: list[T2SBlock]): self.num_blocks: int = num_blocks self.blocks = blocks def process_prompt(self, x: torch.Tensor, attn_mask: torch.Tensor, padding_mask: Optional[torch.Tensor] = None): k_cache: list[torch.Tensor] = [] v_cache: list[torch.Tensor] = [] for i in range(self.num_blocks): x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask) k_cache.append(k_cache_) v_cache.append(v_cache_) return x, k_cache, v_cache def decode_next_token(self, x: torch.Tensor, k_cache: list[torch.Tensor], v_cache: list[torch.Tensor]): for i in range(self.num_blocks): x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(x, k_cache[i], v_cache[i]) return x, k_cache, v_cache class VitsModel(nn.Module): def __init__(self, vits_path, version=None, is_half=True, device="cpu"): super().__init__() # dict_s2 = torch.load(vits_path,map_location="cpu") dict_s2 = load_sovits_new(vits_path) self.hps = dict_s2["config"] if version is None: if dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: self.hps["model"]["version"] = "v1" else: self.hps["model"]["version"] = "v2" else: if version in ["v1", "v2", "v3", "v4", "v2Pro", "v2ProPlus"]: self.hps["model"]["version"] = version else: raise ValueError(f"Unsupported version: {version}") self.hps = DictToAttrRecursive(self.hps) self.hps.model.semantic_frame_rate = "25hz" self.vq_model = SynthesizerTrn( self.hps.data.filter_length // 2 + 1, self.hps.train.segment_size // self.hps.data.hop_length, n_speakers=self.hps.data.n_speakers, **self.hps.model, ) self.vq_model.load_state_dict(dict_s2["weight"], strict=False) self.vq_model.dec.remove_weight_norm() if is_half: self.vq_model = self.vq_model.half() self.vq_model = self.vq_model.to(device) self.vq_model.eval() self.hann_window = torch.hann_window( self.hps.data.win_length, device=device, dtype=torch.float16 if is_half else torch.float32 ) def forward(self, text_seq, pred_semantic, ref_audio, speed=1.0, sv_emb=None): refer = spectrogram_torch( self.hann_window, ref_audio, self.hps.data.filter_length, self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length, center=False, ) return self.vq_model(pred_semantic, text_seq, refer, speed=speed, sv_emb=sv_emb)[0, 0] class T2SModel(nn.Module): def __init__(self, raw_t2s: Text2SemanticLightningModule): super(T2SModel, self).__init__() self.model_dim = raw_t2s.model.model_dim self.embedding_dim = raw_t2s.model.embedding_dim self.num_head = raw_t2s.model.num_head self.num_layers = raw_t2s.model.num_layers self.vocab_size = raw_t2s.model.vocab_size self.phoneme_vocab_size = raw_t2s.model.phoneme_vocab_size # self.p_dropout = float(raw_t2s.model.p_dropout) self.EOS: int = int(raw_t2s.model.EOS) self.norm_first = raw_t2s.model.norm_first assert self.EOS == self.vocab_size - 1 self.hz = 50 self.bert_proj = raw_t2s.model.bert_proj self.ar_text_embedding = raw_t2s.model.ar_text_embedding self.ar_text_position = raw_t2s.model.ar_text_position self.ar_audio_embedding = raw_t2s.model.ar_audio_embedding self.ar_audio_position = raw_t2s.model.ar_audio_position # self.t2s_transformer = T2STransformer(self.num_layers, blocks) # self.t2s_transformer = raw_t2s.model.t2s_transformer blocks = [] h = raw_t2s.model.h for i in range(self.num_layers): layer = h.layers[i] t2smlp = T2SMLP(layer.linear1.weight, layer.linear1.bias, layer.linear2.weight, layer.linear2.bias) block = T2SBlock( self.num_head, self.model_dim, t2smlp, layer.self_attn.in_proj_weight, layer.self_attn.in_proj_bias, layer.self_attn.out_proj.weight, layer.self_attn.out_proj.bias, layer.norm1.weight, layer.norm1.bias, layer.norm1.eps, layer.norm2.weight, layer.norm2.bias, layer.norm2.eps, ) blocks.append(block) self.t2s_transformer = T2STransformer(self.num_layers, blocks) # self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False) self.ar_predict_layer = raw_t2s.model.ar_predict_layer # self.loss_fct = nn.CrossEntropyLoss(reduction="sum") self.max_sec = raw_t2s.config["data"]["max_sec"] self.top_k = int(raw_t2s.config["inference"]["top_k"]) self.early_stop_num = torch.LongTensor([self.hz * self.max_sec]) def forward( self, prompts: LongTensor, ref_seq: LongTensor, text_seq: LongTensor, ref_bert: torch.Tensor, text_bert: torch.Tensor, top_k: LongTensor, ): bert = torch.cat([ref_bert.T, text_bert.T], 1) all_phoneme_ids = torch.cat([ref_seq, text_seq], 1) bert = bert.unsqueeze(0) x = self.ar_text_embedding(all_phoneme_ids) # avoid dtype inconsistency when exporting bert = bert.to(dtype=self.bert_proj.weight.dtype) x = x + self.bert_proj(bert.transpose(1, 2)) x: torch.Tensor = self.ar_text_position(x) early_stop_num = self.early_stop_num # [1,N,512] [1,N] # y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) y = prompts # x_example = x[:,:,0] * 0.0 x_len = x.shape[1] x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool) y_emb = self.ar_audio_embedding(y) y_len = y_emb.shape[1] prefix_len = y.shape[1] y_pos = self.ar_audio_position(y_emb) xy_pos = torch.concat([x, y_pos], dim=1) bsz = x.shape[0] src_len = x_len + y_len x_attn_mask_pad = F.pad( x_attn_mask, (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y) value=True, ) y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), (x_len, 0), value=False, ) xy_attn_mask = ( torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) .unsqueeze(0) .expand(bsz * self.num_head, -1, -1) .view(bsz, self.num_head, src_len, src_len) .to(device=x.device, dtype=torch.bool) ) idx = 0 top_k = int(top_k) xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None) logits = self.ar_predict_layer(xy_dec[:, -1]) logits = logits[:, :-1] samples = sample(logits, y, top_k=top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0] y = torch.concat([y, samples], dim=1) y_emb = self.ar_audio_embedding(y[:, -1:]) xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[ :, y_len + idx ].to(dtype=y_emb.dtype, device=y_emb.device) stop = False # for idx in range(1, 50): for idx in range(1, 1500): # [1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N] # y, k, v, y_emb, logits, samples = self.stage_decoder(y, k, v, y_emb, x_example) xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache) logits = self.ar_predict_layer(xy_dec[:, -1]) if idx < 11: ###至少预测出10个token不然不给停止(0.4s) logits = logits[:, :-1] samples = sample(logits, y, top_k=top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0] y = torch.concat([y, samples], dim=1) if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: stop = True if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: stop = True if stop: if y.shape[1] == 0: y = torch.concat([y, torch.zeros_like(samples)], dim=1) break y_emb = self.ar_audio_embedding(y[:, -1:]) xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[ :, y_len + idx ].to(dtype=y_emb.dtype, device=y_emb.device) y[0, -1] = 0 return y[:, -idx:].unsqueeze(0) bert_path = os.environ.get("bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large") cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" cnhubert.cnhubert_base_path = cnhubert_base_path @torch.jit.script def build_phone_level_feature(res: Tensor, word2ph: IntTensor): phone_level_feature = [] for i in range(word2ph.shape[0]): repeat_feature = res[i].repeat(word2ph[i].item(), 1) phone_level_feature.append(repeat_feature) phone_level_feature = torch.cat(phone_level_feature, dim=0) # [sum(word2ph), 1024] return phone_level_feature class MyBertModel(torch.nn.Module): def __init__(self, bert_model): super(MyBertModel, self).__init__() self.bert = bert_model def forward( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, word2ph: IntTensor ): outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # res = torch.cat(outputs["hidden_states"][-3:-2], -1)[0][1:-1] res = torch.cat(outputs[1][-3:-2], -1)[0][1:-1] return build_phone_level_feature(res, word2ph) class SSLModel(torch.nn.Module): def __init__(self): super().__init__() self.ssl = cnhubert.get_model().model def forward(self, ref_audio_16k) -> torch.Tensor: ssl_content = self.ssl(ref_audio_16k)["last_hidden_state"].transpose(1, 2) return ssl_content class ExportSSLModel(torch.nn.Module): def __init__(self, ssl: SSLModel): super().__init__() self.ssl = ssl def forward(self, ref_audio: torch.Tensor): return self.ssl(ref_audio) @torch.jit.export def resample(self, ref_audio: torch.Tensor, src_sr: int, dst_sr: int) -> torch.Tensor: audio = resamplex(ref_audio, src_sr, dst_sr).float() return audio def export_bert(output_path): tokenizer = AutoTokenizer.from_pretrained(bert_path) text = "叹息声一声接着一声传出,木兰对着房门织布.听不见织布机织布的声音,只听见木兰在叹息.问木兰在想什么?问木兰在惦记什么?木兰答道,我也没有在想什么,也没有在惦记什么." ref_bert_inputs = tokenizer(text, return_tensors="pt") word2ph = [] for c in text: if c in [",", "。", ":", "?", ",", ".", "?"]: word2ph.append(1) else: word2ph.append(2) ref_bert_inputs["word2ph"] = torch.Tensor(word2ph).int() bert_model = AutoModelForMaskedLM.from_pretrained(bert_path, output_hidden_states=True, torchscript=True) my_bert_model = MyBertModel(bert_model) ref_bert_inputs = { "input_ids": ref_bert_inputs["input_ids"], "attention_mask": ref_bert_inputs["attention_mask"], "token_type_ids": ref_bert_inputs["token_type_ids"], "word2ph": ref_bert_inputs["word2ph"], } torch._dynamo.mark_dynamic(ref_bert_inputs["input_ids"], 1) torch._dynamo.mark_dynamic(ref_bert_inputs["attention_mask"], 1) torch._dynamo.mark_dynamic(ref_bert_inputs["token_type_ids"], 1) torch._dynamo.mark_dynamic(ref_bert_inputs["word2ph"], 0) my_bert_model = torch.jit.trace(my_bert_model, example_kwarg_inputs=ref_bert_inputs) output_path = os.path.join(output_path, "bert_model.pt") my_bert_model.save(output_path) print("#### exported bert ####") def export(gpt_path, vits_path, ref_audio_path, ref_text, output_path, export_bert_and_ssl=False, device="cpu"): if not os.path.exists(output_path): os.makedirs(output_path) print(f"目录已创建: {output_path}") else: print(f"目录已存在: {output_path}") ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float() ssl = SSLModel() if export_bert_and_ssl: s = ExportSSLModel(torch.jit.trace(ssl, example_inputs=(ref_audio))) ssl_path = os.path.join(output_path, "ssl_model.pt") torch.jit.script(s).save(ssl_path) print("#### exported ssl ####") export_bert(output_path) else: s = ExportSSLModel(ssl) print(f"device: {device}") ref_seq_id, ref_bert_T, ref_norm_text = get_phones_and_bert(ref_text, "all_zh", "v2") ref_seq = torch.LongTensor([ref_seq_id]).to(device) ref_bert = ref_bert_T.T.to(ref_seq.device) text_seq_id, text_bert_T, norm_text = get_phones_and_bert( "这是一个简单的示例,真没想到这么简单就完成了。The King and His Stories.Once there was a king. He likes to write stories, but his stories were not good. As people were afraid of him, they all said his stories were good.After reading them, the writer at once turned to the soldiers and said: Take me back to prison, please.", "auto", "v2", ) text_seq = torch.LongTensor([text_seq_id]).to(device) text_bert = text_bert_T.T.to(text_seq.device) ssl_content = ssl(ref_audio).to(device) # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth" vits = VitsModel(vits_path, device=device, is_half=False) vits.eval() # gpt_path = "GPT_weights_v2/xw-e15.ckpt" # dict_s1 = torch.load(gpt_path, map_location=device) dict_s1 = torch.load(gpt_path, weights_only=False) raw_t2s = get_raw_t2s_model(dict_s1).to(device) print("#### get_raw_t2s_model ####") print(raw_t2s.config) t2s_m = T2SModel(raw_t2s) t2s_m.eval() t2s = torch.jit.script(t2s_m).to(device) print("#### script t2s_m ####") print("vits.hps.data.sampling_rate:", vits.hps.data.sampling_rate) gpt_sovits = GPT_SoVITS(t2s, vits).to(device) gpt_sovits.eval() ref_audio_sr = s.resample(ref_audio, 16000, 32000).to(device) torch._dynamo.mark_dynamic(ssl_content, 2) torch._dynamo.mark_dynamic(ref_audio_sr, 1) torch._dynamo.mark_dynamic(ref_seq, 1) torch._dynamo.mark_dynamic(text_seq, 1) torch._dynamo.mark_dynamic(ref_bert, 0) torch._dynamo.mark_dynamic(text_bert, 0) top_k = torch.LongTensor([5]).to(device) with torch.no_grad(): gpt_sovits_export = torch.jit.trace( gpt_sovits, example_inputs=(ssl_content, ref_audio_sr, ref_seq, text_seq, ref_bert, text_bert, top_k) ) gpt_sovits_path = os.path.join(output_path, "gpt_sovits_model.pt") gpt_sovits_export.save(gpt_sovits_path) print("#### exported gpt_sovits ####") def export_prov2( gpt_path, vits_path, version, ref_audio_path, ref_text, output_path, export_bert_and_ssl=False, device="cpu", is_half=True, ): if sv_cn_model == None: init_sv_cn(device, is_half) if not os.path.exists(output_path): os.makedirs(output_path) print(f"目录已创建: {output_path}") else: print(f"目录已存在: {output_path}") ref_audio = torch.tensor([load_audio(ref_audio_path, 16000)]).float() ssl = SSLModel() if export_bert_and_ssl: s = ExportSSLModel(torch.jit.trace(ssl, example_inputs=(ref_audio))) ssl_path = os.path.join(output_path, "ssl_model.pt") torch.jit.script(s).save(ssl_path) print("#### exported ssl ####") export_bert(output_path) else: s = ExportSSLModel(ssl) print(f"device: {device}") ref_seq_id, ref_bert_T, ref_norm_text = get_phones_and_bert(ref_text, "all_zh", "v2") ref_seq = torch.LongTensor([ref_seq_id]).to(device) ref_bert = ref_bert_T.T if is_half: ref_bert = ref_bert.half() ref_bert = ref_bert.to(ref_seq.device) text_seq_id, text_bert_T, norm_text = get_phones_and_bert( "这是一个简单的示例,真没想到这么简单就完成了。The King and His Stories.Once there was a king. He likes to write stories, but his stories were not good. As people were afraid of him, they all said his stories were good.After reading them, the writer at once turned to the soldiers and said: Take me back to prison, please.", "auto", "v2", ) text_seq = torch.LongTensor([text_seq_id]).to(device) text_bert = text_bert_T.T if is_half: text_bert = text_bert.half() text_bert = text_bert.to(text_seq.device) ssl_content = ssl(ref_audio) if is_half: ssl_content = ssl_content.half() ssl_content = ssl_content.to(device) sv_model = ExportERes2NetV2(sv_cn_model) # vits_path = "SoVITS_weights_v2/xw_e8_s216.pth" vits = VitsModel(vits_path, version, is_half=is_half, device=device) vits.eval() # gpt_path = "GPT_weights_v2/xw-e15.ckpt" # dict_s1 = torch.load(gpt_path, map_location=device) dict_s1 = torch.load(gpt_path, weights_only=False) raw_t2s = get_raw_t2s_model(dict_s1).to(device) print("#### get_raw_t2s_model ####") print(raw_t2s.config) if is_half: raw_t2s = raw_t2s.half() t2s_m = T2SModel(raw_t2s) t2s_m.eval() t2s = torch.jit.script(t2s_m).to(device) print("#### script t2s_m ####") print("vits.hps.data.sampling_rate:", vits.hps.data.sampling_rate) gpt_sovits = GPT_SoVITS_V2Pro(t2s, vits, sv_model).to(device) gpt_sovits.eval() ref_audio_sr = s.resample(ref_audio, 16000, 32000) if is_half: ref_audio_sr = ref_audio_sr.half() ref_audio_sr = ref_audio_sr.to(device) torch._dynamo.mark_dynamic(ssl_content, 2) torch._dynamo.mark_dynamic(ref_audio_sr, 1) torch._dynamo.mark_dynamic(ref_seq, 1) torch._dynamo.mark_dynamic(text_seq, 1) torch._dynamo.mark_dynamic(ref_bert, 0) torch._dynamo.mark_dynamic(text_bert, 0) # torch._dynamo.mark_dynamic(sv_emb, 0) top_k = torch.LongTensor([5]).to(device) # 先跑一遍 sv_model 让它加载 cache,详情见 L880 gpt_sovits.sv_model(ref_audio_sr) with torch.no_grad(): gpt_sovits_export = torch.jit.trace( gpt_sovits, example_inputs=( ssl_content, ref_audio_sr, ref_seq, text_seq, ref_bert, text_bert, top_k, ), ) gpt_sovits_path = os.path.join(output_path, "gpt_sovits_model.pt") gpt_sovits_export.save(gpt_sovits_path) print("#### exported gpt_sovits ####") audio = gpt_sovits_export(ssl_content, ref_audio_sr, ref_seq, text_seq, ref_bert, text_bert, top_k) print("start write wav") soundfile.write("out.wav", audio.float().detach().cpu().numpy(), 32000) @torch.jit.script def parse_audio(ref_audio): ref_audio_16k = torchaudio.functional.resample(ref_audio, 48000, 16000).float() # .to(ref_audio.device) ref_audio_sr = torchaudio.functional.resample(ref_audio, 48000, 32000).float() # .to(ref_audio.device) return ref_audio_16k, ref_audio_sr @torch.jit.script def resamplex(ref_audio: torch.Tensor, src_sr: int, dst_sr: int) -> torch.Tensor: return torchaudio.functional.resample(ref_audio, src_sr, dst_sr).float() class GPT_SoVITS(nn.Module): def __init__(self, t2s: T2SModel, vits: VitsModel): super().__init__() self.t2s = t2s self.vits = vits def forward( self, ssl_content: torch.Tensor, ref_audio_sr: torch.Tensor, ref_seq: Tensor, text_seq: Tensor, ref_bert: Tensor, text_bert: Tensor, top_k: LongTensor, speed=1.0, ): codes = self.vits.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompts = prompt_semantic.unsqueeze(0) pred_semantic = self.t2s(prompts, ref_seq, text_seq, ref_bert, text_bert, top_k) audio = self.vits(text_seq, pred_semantic, ref_audio_sr, speed) return audio class ExportERes2NetV2(nn.Module): def __init__(self, sv_cn_model: SV): super(ExportERes2NetV2, self).__init__() self.bn1 = sv_cn_model.embedding_model.bn1 self.conv1 = sv_cn_model.embedding_model.conv1 self.layer1 = sv_cn_model.embedding_model.layer1 self.layer2 = sv_cn_model.embedding_model.layer2 self.layer3 = sv_cn_model.embedding_model.layer3 self.layer4 = sv_cn_model.embedding_model.layer4 self.layer3_ds = sv_cn_model.embedding_model.layer3_ds self.fuse34 = sv_cn_model.embedding_model.fuse34 # audio_16k.shape: [1,N] def forward(self, audio_16k): # 这个 fbank 函数有一个 cache, 不过不要紧,它跟 audio_16k 的长度无关 # 只跟 device 和 dtype 有关 x = Kaldi.fbank(audio_16k, num_mel_bins=80, sample_frequency=16000, dither=0) x = torch.stack([x]) x = x.permute(0, 2, 1) # (B,T,F) => (B,F,T) x = x.unsqueeze_(1) out = F.relu(self.bn1(self.conv1(x))) out1 = self.layer1(out) out2 = self.layer2(out1) out3 = self.layer3(out2) out4 = self.layer4(out3) out3_ds = self.layer3_ds(out3) fuse_out34 = self.fuse34(out4, out3_ds) return fuse_out34.flatten(start_dim=1, end_dim=2).mean(-1) class GPT_SoVITS_V2Pro(nn.Module): def __init__(self, t2s: T2SModel, vits: VitsModel, sv_model: ExportERes2NetV2): super().__init__() self.t2s = t2s self.vits = vits self.sv_model = sv_model def forward( self, ssl_content: torch.Tensor, ref_audio_sr: torch.Tensor, ref_seq: Tensor, text_seq: Tensor, ref_bert: Tensor, text_bert: Tensor, top_k: LongTensor, speed=1.0, ): codes = self.vits.vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] prompts = prompt_semantic.unsqueeze(0)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/sv.py
GPT_SoVITS/sv.py
import sys import os import torch sys.path.append(f"{os.getcwd()}/GPT_SoVITS/eres2net") sv_path = "GPT_SoVITS/pretrained_models/sv/pretrained_eres2netv2w24s4ep4.ckpt" from ERes2NetV2 import ERes2NetV2 import kaldi as Kaldi class SV: def __init__(self, device, is_half): pretrained_state = torch.load(sv_path, map_location="cpu", weights_only=False) embedding_model = ERes2NetV2(baseWidth=24, scale=4, expansion=4) embedding_model.load_state_dict(pretrained_state) embedding_model.eval() self.embedding_model = embedding_model if is_half == False: self.embedding_model = self.embedding_model.to(device) else: self.embedding_model = self.embedding_model.half().to(device) self.is_half = is_half def compute_embedding3(self, wav): with torch.no_grad(): if self.is_half == True: wav = wav.half() feat = torch.stack( [Kaldi.fbank(wav0.unsqueeze(0), num_mel_bins=80, sample_frequency=16000, dither=0) for wav0 in wav] ) sv_emb = self.embedding_model.forward3(feat) return sv_emb
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/onnx_export.py
GPT_SoVITS/onnx_export.py
import torch import torchaudio from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule from feature_extractor import cnhubert from module.models_onnx import SynthesizerTrn, symbols_v1, symbols_v2 from torch import nn cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" cnhubert.cnhubert_base_path = cnhubert_base_path ssl_model = cnhubert.get_model() import json import os import soundfile from text import cleaned_text_to_sequence def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): hann_window = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) y = torch.nn.functional.pad( y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=False, ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) return spec class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) for key, value in input_dict.items(): if isinstance(value, dict): value = DictToAttrRecursive(value) self[key] = value setattr(self, key, value) def __getattr__(self, item): try: return self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") def __setattr__(self, key, value): if isinstance(value, dict): value = DictToAttrRecursive(value) super(DictToAttrRecursive, self).__setitem__(key, value) super().__setattr__(key, value) def __delattr__(self, item): try: del self[item] except KeyError: raise AttributeError(f"Attribute {item} not found") class T2SEncoder(nn.Module): def __init__(self, t2s, vits): super().__init__() self.encoder = t2s.onnx_encoder self.vits = vits def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): codes = self.vits.extract_latent(ssl_content) prompt_semantic = codes[0, 0] bert = torch.cat([ref_bert.transpose(0, 1), text_bert.transpose(0, 1)], 1) all_phoneme_ids = torch.cat([ref_seq, text_seq], 1) bert = bert.unsqueeze(0) prompt = prompt_semantic.unsqueeze(0) return self.encoder(all_phoneme_ids, bert), prompt class T2SModel(nn.Module): def __init__(self, t2s_path, vits_model): super().__init__() dict_s1 = torch.load(t2s_path, map_location="cpu") self.config = dict_s1["config"] self.t2s_model = Text2SemanticLightningModule(self.config, "ojbk", is_train=False) self.t2s_model.load_state_dict(dict_s1["weight"]) self.t2s_model.eval() self.vits_model = vits_model.vq_model self.hz = 50 self.max_sec = self.config["data"]["max_sec"] self.t2s_model.model.top_k = torch.LongTensor([self.config["inference"]["top_k"]]) self.t2s_model.model.early_stop_num = torch.LongTensor([self.hz * self.max_sec]) self.t2s_model = self.t2s_model.model self.t2s_model.init_onnx() self.onnx_encoder = T2SEncoder(self.t2s_model, self.vits_model) self.first_stage_decoder = self.t2s_model.first_stage_decoder self.stage_decoder = self.t2s_model.stage_decoder # self.t2s_model = torch.jit.script(self.t2s_model) def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): early_stop_num = self.t2s_model.early_stop_num # [1,N] [1,N] [N, 1024] [N, 1024] [1, 768, N] x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) prefix_len = prompts.shape[1] # [1,N,512] [1,N] y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) stop = False for idx in range(1, 1500): # [1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N] enco = self.stage_decoder(y, k, v, y_emb, x_example) y, k, v, y_emb, logits, samples = enco if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: stop = True if torch.argmax(logits, dim=-1)[0] == self.t2s_model.EOS or samples[0, 0] == self.t2s_model.EOS: stop = True if stop: break y[0, -1] = 0 return y[:, -idx:].unsqueeze(0) def export(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name, dynamo=False): # self.onnx_encoder = torch.jit.script(self.onnx_encoder) if dynamo: export_options = torch.onnx.ExportOptions(dynamic_shapes=True) onnx_encoder_export_output = torch.onnx.dynamo_export( self.onnx_encoder, (ref_seq, text_seq, ref_bert, text_bert, ssl_content), export_options=export_options ) onnx_encoder_export_output.save(f"onnx/{project_name}/{project_name}_t2s_encoder.onnx") return torch.onnx.export( self.onnx_encoder, (ref_seq, text_seq, ref_bert, text_bert, ssl_content), f"onnx/{project_name}/{project_name}_t2s_encoder.onnx", input_names=["ref_seq", "text_seq", "ref_bert", "text_bert", "ssl_content"], output_names=["x", "prompts"], dynamic_axes={ "ref_seq": {1: "ref_length"}, "text_seq": {1: "text_length"}, "ref_bert": {0: "ref_length"}, "text_bert": {0: "text_length"}, "ssl_content": {2: "ssl_length"}, }, opset_version=16, ) x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) torch.onnx.export( self.first_stage_decoder, (x, prompts), f"onnx/{project_name}/{project_name}_t2s_fsdec.onnx", input_names=["x", "prompts"], output_names=["y", "k", "v", "y_emb", "x_example"], dynamic_axes={ "x": {1: "x_length"}, "prompts": {1: "prompts_length"}, }, verbose=False, opset_version=16, ) y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) torch.onnx.export( self.stage_decoder, (y, k, v, y_emb, x_example), f"onnx/{project_name}/{project_name}_t2s_sdec.onnx", input_names=["iy", "ik", "iv", "iy_emb", "ix_example"], output_names=["y", "k", "v", "y_emb", "logits", "samples"], dynamic_axes={ "iy": {1: "iy_length"}, "ik": {1: "ik_length"}, "iv": {1: "iv_length"}, "iy_emb": {1: "iy_emb_length"}, "ix_example": {1: "ix_example_length"}, }, verbose=False, opset_version=16, ) class VitsModel(nn.Module): def __init__(self, vits_path): super().__init__() dict_s2 = torch.load(vits_path, map_location="cpu") self.hps = dict_s2["config"] if dict_s2["weight"]["enc_p.text_embedding.weight"].shape[0] == 322: self.hps["model"]["version"] = "v1" else: self.hps["model"]["version"] = "v2" self.hps = DictToAttrRecursive(self.hps) self.hps.model.semantic_frame_rate = "25hz" self.vq_model = SynthesizerTrn( self.hps.data.filter_length // 2 + 1, self.hps.train.segment_size // self.hps.data.hop_length, n_speakers=self.hps.data.n_speakers, **self.hps.model, ) self.vq_model.eval() self.vq_model.load_state_dict(dict_s2["weight"], strict=False) def forward(self, text_seq, pred_semantic, ref_audio): refer = spectrogram_torch( ref_audio, self.hps.data.filter_length, self.hps.data.sampling_rate, self.hps.data.hop_length, self.hps.data.win_length, center=False, ) return self.vq_model(pred_semantic, text_seq, refer)[0, 0] class GptSoVits(nn.Module): def __init__(self, vits, t2s): super().__init__() self.vits = vits self.t2s = t2s def forward(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, debug=False): pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) audio = self.vits(text_seq, pred_semantic, ref_audio) if debug: import onnxruntime sess = onnxruntime.InferenceSession("onnx/koharu/koharu_vits.onnx", providers=["CPU"]) audio1 = sess.run( None, { "text_seq": text_seq.detach().cpu().numpy(), "pred_semantic": pred_semantic.detach().cpu().numpy(), "ref_audio": ref_audio.detach().cpu().numpy(), }, ) return audio, audio1 return audio def export(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, project_name): self.t2s.export(ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name) pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) torch.onnx.export( self.vits, (text_seq, pred_semantic, ref_audio), f"onnx/{project_name}/{project_name}_vits.onnx", input_names=["text_seq", "pred_semantic", "ref_audio"], output_names=["audio"], dynamic_axes={ "text_seq": {1: "text_length"}, "pred_semantic": {2: "pred_length"}, "ref_audio": {1: "audio_length"}, }, opset_version=17, verbose=False, ) class SSLModel(nn.Module): def __init__(self): super().__init__() self.ssl = ssl_model def forward(self, ref_audio_16k): return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2) def export(vits_path, gpt_path, project_name, vits_model="v2"): vits = VitsModel(vits_path) gpt = T2SModel(gpt_path, vits) gpt_sovits = GptSoVits(vits, gpt) ssl = SSLModel() ref_seq = torch.LongTensor( [ cleaned_text_to_sequence( [ "n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", ], version=vits_model, ) ] ) text_seq = torch.LongTensor( [ cleaned_text_to_sequence( [ "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", ], version=vits_model, ) ] ) ref_bert = torch.randn((ref_seq.shape[1], 1024)).float() text_bert = torch.randn((text_seq.shape[1], 1024)).float() ref_audio = torch.randn((1, 48000 * 5)).float() # ref_audio = torch.tensor([load_audio("rec.wav", 48000)]).float() ref_audio_16k = torchaudio.functional.resample(ref_audio, 48000, 16000).float() ref_audio_sr = torchaudio.functional.resample(ref_audio, 48000, vits.hps.data.sampling_rate).float() try: os.mkdir(f"onnx/{project_name}") except: pass ssl_content = ssl(ref_audio_16k).float() # debug = False debug = True # gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name) if debug: a, b = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, debug=debug) soundfile.write("out1.wav", a.cpu().detach().numpy(), vits.hps.data.sampling_rate) soundfile.write("out2.wav", b[0], vits.hps.data.sampling_rate) else: a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy() soundfile.write("out.wav", a, vits.hps.data.sampling_rate) if vits_model == "v1": symbols = symbols_v1 else: symbols = symbols_v2 MoeVSConf = { "Folder": f"{project_name}", "Name": f"{project_name}", "Type": "GPT-SoVits", "Rate": vits.hps.data.sampling_rate, "NumLayers": gpt.t2s_model.num_layers, "EmbeddingDim": gpt.t2s_model.embedding_dim, "Dict": "BasicDict", "BertPath": "chinese-roberta-wwm-ext-large", # "Symbol": symbols, "AddBlank": False, } MoeVSConfJson = json.dumps(MoeVSConf) with open(f"onnx/{project_name}.json", "w") as MoeVsConfFile: json.dump(MoeVSConf, MoeVsConfFile, indent=4) if __name__ == "__main__": try: os.mkdir("onnx") except: pass gpt_path = "GPT_weights/nahida-e25.ckpt" vits_path = "SoVITS_weights/nahida_e30_s3930.pth" exp_path = "nahida" export(vits_path, gpt_path, exp_path) # soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/meldataset.py
GPT_SoVITS/BigVGAN/meldataset.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import math import os import random import torch import torch.utils.data import numpy as np import librosa from librosa.filters import mel as librosa_mel_fn import pathlib from tqdm import tqdm from typing import List, Tuple, Optional from .env import AttrDict MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases) def dynamic_range_compression(x, C=1, clip_val=1e-5): return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) def dynamic_range_decompression(x, C=1): return np.exp(x) / C def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): return torch.log(torch.clamp(x, min=clip_val) * C) def dynamic_range_decompression_torch(x, C=1): return torch.exp(x) / C def spectral_normalize_torch(magnitudes): return dynamic_range_compression_torch(magnitudes) def spectral_de_normalize_torch(magnitudes): return dynamic_range_decompression_torch(magnitudes) mel_basis_cache = {} hann_window_cache = {} def mel_spectrogram( y: torch.Tensor, n_fft: int, num_mels: int, sampling_rate: int, hop_size: int, win_size: int, fmin: int, fmax: int = None, center: bool = False, ) -> torch.Tensor: """ Calculate the mel spectrogram of an input signal. This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft). Args: y (torch.Tensor): Input signal. n_fft (int): FFT size. num_mels (int): Number of mel bins. sampling_rate (int): Sampling rate of the input signal. hop_size (int): Hop size for STFT. win_size (int): Window size for STFT. fmin (int): Minimum frequency for mel filterbank. fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn center (bool): Whether to pad the input to center the frames. Default is False. Returns: torch.Tensor: Mel spectrogram. """ if torch.min(y) < -1.0: print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}") if torch.max(y) > 1.0: print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}") device = y.device key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}" if key not in mel_basis_cache: mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) mel_basis_cache[key] = torch.from_numpy(mel).float().to(device) hann_window_cache[key] = torch.hann_window(win_size).to(device) mel_basis = mel_basis_cache[key] hann_window = hann_window_cache[key] padding = (n_fft - hop_size) // 2 y = torch.nn.functional.pad(y.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1) spec = torch.stft( y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window, center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ) spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9) mel_spec = torch.matmul(mel_basis, spec) mel_spec = spectral_normalize_torch(mel_spec) return mel_spec def get_mel_spectrogram(wav, h): """ Generate mel spectrogram from a waveform using given hyperparameters. Args: wav (torch.Tensor): Input waveform. h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax. Returns: torch.Tensor: Mel spectrogram. """ return mel_spectrogram( wav, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax, ) def get_dataset_filelist(a): training_files = [] validation_files = [] list_unseen_validation_files = [] with open(a.input_training_file, "r", encoding="utf-8") as fi: training_files = [ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 ] print(f"first training file: {training_files[0]}") with open(a.input_validation_file, "r", encoding="utf-8") as fi: validation_files = [ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 ] print(f"first validation file: {validation_files[0]}") for i in range(len(a.list_input_unseen_validation_file)): with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi: unseen_validation_files = [ os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 ] print(f"first unseen {i}th validation fileset: {unseen_validation_files[0]}") list_unseen_validation_files.append(unseen_validation_files) return training_files, validation_files, list_unseen_validation_files class MelDataset(torch.utils.data.Dataset): def __init__( self, training_files: List[str], hparams: AttrDict, segment_size: int, n_fft: int, num_mels: int, hop_size: int, win_size: int, sampling_rate: int, fmin: int, fmax: Optional[int], split: bool = True, shuffle: bool = True, device: str = None, fmax_loss: Optional[int] = None, fine_tuning: bool = False, base_mels_path: str = None, is_seen: bool = True, ): self.audio_files = training_files random.seed(1234) if shuffle: random.shuffle(self.audio_files) self.hparams = hparams self.is_seen = is_seen if self.is_seen: self.name = pathlib.Path(self.audio_files[0]).parts[0] else: self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/") self.segment_size = segment_size self.sampling_rate = sampling_rate self.split = split self.n_fft = n_fft self.num_mels = num_mels self.hop_size = hop_size self.win_size = win_size self.fmin = fmin self.fmax = fmax self.fmax_loss = fmax_loss self.device = device self.fine_tuning = fine_tuning self.base_mels_path = base_mels_path print("[INFO] checking dataset integrity...") for i in tqdm(range(len(self.audio_files))): assert os.path.exists(self.audio_files[i]), f"{self.audio_files[i]} not found" def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, str, torch.Tensor]: try: filename = self.audio_files[index] # Use librosa.load that ensures loading waveform into mono with [-1, 1] float values # Audio is ndarray with shape [T_time]. Disable auto-resampling here to minimize overhead # The on-the-fly resampling during training will be done only for the obtained random chunk audio, source_sampling_rate = librosa.load(filename, sr=None, mono=True) # Main logic that uses <mel, audio> pair for training BigVGAN if not self.fine_tuning: if self.split: # Training step # Obtain randomized audio chunk if source_sampling_rate != self.sampling_rate: # Adjust segment size to crop if the source sr is different target_segment_size = math.ceil(self.segment_size * (source_sampling_rate / self.sampling_rate)) else: target_segment_size = self.segment_size # Compute upper bound index for the random chunk random_chunk_upper_bound = max(0, audio.shape[0] - target_segment_size) # Crop or pad audio to obtain random chunk with target_segment_size if audio.shape[0] >= target_segment_size: audio_start = random.randint(0, random_chunk_upper_bound) audio = audio[audio_start : audio_start + target_segment_size] else: audio = np.pad( audio, (0, target_segment_size - audio.shape[0]), mode="constant", ) # Resample audio chunk to self.sampling rate if source_sampling_rate != self.sampling_rate: audio = librosa.resample( audio, orig_sr=source_sampling_rate, target_sr=self.sampling_rate, ) if audio.shape[0] > self.segment_size: # trim last elements to match self.segment_size (e.g., 16385 for 44khz downsampled to 24khz -> 16384) audio = audio[: self.segment_size] else: # Validation step # Resample full audio clip to target sampling rate if source_sampling_rate != self.sampling_rate: audio = librosa.resample( audio, orig_sr=source_sampling_rate, target_sr=self.sampling_rate, ) # Trim last elements to match audio length to self.hop_size * n for evaluation if (audio.shape[0] % self.hop_size) != 0: audio = audio[: -(audio.shape[0] % self.hop_size)] # BigVGAN is trained using volume-normalized waveform audio = librosa.util.normalize(audio) * 0.95 # Cast ndarray to torch tensor audio = torch.FloatTensor(audio) audio = audio.unsqueeze(0) # [B(1), self.segment_size] # Compute mel spectrogram corresponding to audio mel = mel_spectrogram( audio, self.n_fft, self.num_mels, self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax, center=False, ) # [B(1), self.num_mels, self.segment_size // self.hop_size] # Fine-tuning logic that uses pre-computed mel. Example: Using TTS model-generated mel as input else: # For fine-tuning, assert that the waveform is in the defined sampling_rate # Fine-tuning won't support on-the-fly resampling to be fool-proof (the dataset should have been prepared properly) assert source_sampling_rate == self.sampling_rate, ( f"For fine_tuning, waveform must be in the spcified sampling rate {self.sampling_rate}, got {source_sampling_rate}" ) # Cast ndarray to torch tensor audio = torch.FloatTensor(audio) audio = audio.unsqueeze(0) # [B(1), T_time] # Load pre-computed mel from disk mel = np.load( os.path.join( self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + ".npy", ) ) mel = torch.from_numpy(mel) if len(mel.shape) < 3: mel = mel.unsqueeze(0) # ensure [B, C, T] if self.split: frames_per_seg = math.ceil(self.segment_size / self.hop_size) if audio.size(1) >= self.segment_size: mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) mel = mel[:, :, mel_start : mel_start + frames_per_seg] audio = audio[ :, mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size, ] # Pad pre-computed mel and audio to match length to ensuring fine-tuning without error. # NOTE: this may introduce a single-frame misalignment of the <pre-computed mel, audio> # To remove possible misalignment, it is recommended to prepare the <pre-computed mel, audio> pair where the audio length is the integer multiple of self.hop_size mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant") audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant") # Compute mel_loss used by spectral regression objective. Uses self.fmax_loss instead (usually None) mel_loss = mel_spectrogram( audio, self.n_fft, self.num_mels, self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss, center=False, ) # [B(1), self.num_mels, self.segment_size // self.hop_size] # Shape sanity checks assert ( audio.shape[1] == mel.shape[2] * self.hop_size and audio.shape[1] == mel_loss.shape[2] * self.hop_size ), ( f"Audio length must be mel frame length * hop_size. Got audio shape {audio.shape} mel shape {mel.shape} mel_loss shape {mel_loss.shape}" ) return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) # If it encounters error during loading the data, skip this sample and load random other sample to the batch except Exception as e: if self.fine_tuning: raise e # Terminate training if it is fine-tuning. The dataset should have been prepared properly. else: print(f"[WARNING] Failed to load waveform, skipping! filename: {filename} Error: {e}") return self[random.randrange(len(self))] def __len__(self): return len(self.audio_files)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/train.py
GPT_SoVITS/BigVGAN/train.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import itertools import os import time import argparse import json import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DistributedSampler, DataLoader import torch.multiprocessing as mp from torch.distributed import init_process_group from torch.nn.parallel import DistributedDataParallel from env import AttrDict, build_env from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist, MAX_WAV_VALUE from bigvgan import BigVGAN from discriminators import ( MultiPeriodDiscriminator, MultiResolutionDiscriminator, MultiBandDiscriminator, MultiScaleSubbandCQTDiscriminator, ) from loss import ( feature_loss, generator_loss, discriminator_loss, MultiScaleMelSpectrogramLoss, ) from utils import ( plot_spectrogram, plot_spectrogram_clipped, scan_checkpoint, load_checkpoint, save_checkpoint, save_audio, ) import torchaudio as ta from pesq import pesq from tqdm import tqdm import auraloss torch.backends.cudnn.benchmark = False def train(rank, a, h): if h.num_gpus > 1: # initialize distributed init_process_group( backend=h.dist_config["dist_backend"], init_method=h.dist_config["dist_url"], world_size=h.dist_config["world_size"] * h.num_gpus, rank=rank, ) # Set seed and device torch.cuda.manual_seed(h.seed) torch.cuda.set_device(rank) device = torch.device(f"cuda:{rank:d}") # Define BigVGAN generator generator = BigVGAN(h).to(device) # Define discriminators. MPD is used by default mpd = MultiPeriodDiscriminator(h).to(device) # Define additional discriminators. BigVGAN-v1 uses UnivNet's MRD as default # New in BigVGAN-v2: option to switch to new discriminators: MultiBandDiscriminator / MultiScaleSubbandCQTDiscriminator if h.get("use_mbd_instead_of_mrd", False): # Switch to MBD print("[INFO] using MultiBandDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") # Variable name is kept as "mrd" for backward compatibility & minimal code change mrd = MultiBandDiscriminator(h).to(device) elif h.get("use_cqtd_instead_of_mrd", False): # Switch to CQTD print("[INFO] using MultiScaleSubbandCQTDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") mrd = MultiScaleSubbandCQTDiscriminator(h).to(device) else: # Fallback to original MRD in BigVGAN-v1 mrd = MultiResolutionDiscriminator(h).to(device) # New in BigVGAN-v2: option to switch to multi-scale L1 mel loss if h.get("use_multiscale_melloss", False): print("[INFO] using multi-scale Mel l1 loss of BigVGAN-v2 instead of the original single-scale loss") fn_mel_loss_multiscale = MultiScaleMelSpectrogramLoss( sampling_rate=h.sampling_rate ) # NOTE: accepts waveform as input else: fn_mel_loss_singlescale = F.l1_loss # Print the model & number of parameters, and create or scan the latest checkpoint from checkpoints directory if rank == 0: print(generator) print(mpd) print(mrd) print(f"Generator params: {sum(p.numel() for p in generator.parameters())}") print(f"Discriminator mpd params: {sum(p.numel() for p in mpd.parameters())}") print(f"Discriminator mrd params: {sum(p.numel() for p in mrd.parameters())}") os.makedirs(a.checkpoint_path, exist_ok=True) print(f"Checkpoints directory: {a.checkpoint_path}") if os.path.isdir(a.checkpoint_path): # New in v2.1: If the step prefix pattern-based checkpoints are not found, also check for renamed files in Hugging Face Hub to resume training cp_g = scan_checkpoint(a.checkpoint_path, prefix="g_", renamed_file="bigvgan_generator.pt") cp_do = scan_checkpoint( a.checkpoint_path, prefix="do_", renamed_file="bigvgan_discriminator_optimizer.pt", ) # Load the latest checkpoint if exists steps = 0 if cp_g is None or cp_do is None: state_dict_do = None last_epoch = -1 else: state_dict_g = load_checkpoint(cp_g, device) state_dict_do = load_checkpoint(cp_do, device) generator.load_state_dict(state_dict_g["generator"]) mpd.load_state_dict(state_dict_do["mpd"]) mrd.load_state_dict(state_dict_do["mrd"]) steps = state_dict_do["steps"] + 1 last_epoch = state_dict_do["epoch"] # Initialize DDP, optimizers, and schedulers if h.num_gpus > 1: generator = DistributedDataParallel(generator, device_ids=[rank]).to(device) mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device) mrd = DistributedDataParallel(mrd, device_ids=[rank]).to(device) optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2]) optim_d = torch.optim.AdamW( itertools.chain(mrd.parameters(), mpd.parameters()), h.learning_rate, betas=[h.adam_b1, h.adam_b2], ) if state_dict_do is not None: optim_g.load_state_dict(state_dict_do["optim_g"]) optim_d.load_state_dict(state_dict_do["optim_d"]) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch) # Define training and validation datasets """ unseen_validation_filelist will contain sample filepaths outside the seen training & validation dataset Example: trained on LibriTTS, validate on VCTK """ training_filelist, validation_filelist, list_unseen_validation_filelist = get_dataset_filelist(a) trainset = MelDataset( training_filelist, h, h.segment_size, h.n_fft, h.num_mels, h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, shuffle=False if h.num_gpus > 1 else True, fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir, is_seen=True, ) train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None train_loader = DataLoader( trainset, num_workers=h.num_workers, shuffle=False, sampler=train_sampler, batch_size=h.batch_size, pin_memory=True, drop_last=True, ) if rank == 0: validset = MelDataset( validation_filelist, h, h.segment_size, h.n_fft, h.num_mels, h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir, is_seen=True, ) validation_loader = DataLoader( validset, num_workers=1, shuffle=False, sampler=None, batch_size=1, pin_memory=True, drop_last=True, ) list_unseen_validset = [] list_unseen_validation_loader = [] for i in range(len(list_unseen_validation_filelist)): unseen_validset = MelDataset( list_unseen_validation_filelist[i], h, h.segment_size, h.n_fft, h.num_mels, h.hop_size, h.win_size, h.sampling_rate, h.fmin, h.fmax, False, False, fmax_loss=h.fmax_for_loss, device=device, fine_tuning=a.fine_tuning, base_mels_path=a.input_mels_dir, is_seen=False, ) unseen_validation_loader = DataLoader( unseen_validset, num_workers=1, shuffle=False, sampler=None, batch_size=1, pin_memory=True, drop_last=True, ) list_unseen_validset.append(unseen_validset) list_unseen_validation_loader.append(unseen_validation_loader) # Tensorboard logger sw = SummaryWriter(os.path.join(a.checkpoint_path, "logs")) if a.save_audio: # Also save audio to disk if --save_audio is set to True os.makedirs(os.path.join(a.checkpoint_path, "samples"), exist_ok=True) """ Validation loop, "mode" parameter is automatically defined as (seen or unseen)_(name of the dataset). If the name of the dataset contains "nonspeech", it skips PESQ calculation to prevent errors """ def validate(rank, a, h, loader, mode="seen"): assert rank == 0, "validate should only run on rank=0" generator.eval() torch.cuda.empty_cache() val_err_tot = 0 val_pesq_tot = 0 val_mrstft_tot = 0 # Modules for evaluation metrics pesq_resampler = ta.transforms.Resample(h.sampling_rate, 16000).cuda() loss_mrstft = auraloss.freq.MultiResolutionSTFTLoss(device="cuda") if a.save_audio: # Also save audio to disk if --save_audio is set to True os.makedirs( os.path.join(a.checkpoint_path, "samples", f"gt_{mode}"), exist_ok=True, ) os.makedirs( os.path.join(a.checkpoint_path, "samples", f"{mode}_{steps:08d}"), exist_ok=True, ) with torch.no_grad(): print(f"step {steps} {mode} speaker validation...") # Loop over validation set and compute metrics for j, batch in enumerate(tqdm(loader)): x, y, _, y_mel = batch y = y.to(device) if hasattr(generator, "module"): y_g_hat = generator.module(x.to(device)) else: y_g_hat = generator(x.to(device)) y_mel = y_mel.to(device, non_blocking=True) y_g_hat_mel = mel_spectrogram( y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax_for_loss, ) min_t = min(y_mel.size(-1), y_g_hat_mel.size(-1)) val_err_tot += F.l1_loss(y_mel[..., :min_t], y_g_hat_mel[..., :min_t]).item() # PESQ calculation. only evaluate PESQ if it's speech signal (nonspeech PESQ will error out) if "nonspeech" not in mode: # Skips if the name of dataset (in mode string) contains "nonspeech" # Resample to 16000 for pesq y_16k = pesq_resampler(y) y_g_hat_16k = pesq_resampler(y_g_hat.squeeze(1)) y_int_16k = (y_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() y_g_hat_int_16k = (y_g_hat_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() val_pesq_tot += pesq(16000, y_int_16k, y_g_hat_int_16k, "wb") # MRSTFT calculation min_t = min(y.size(-1), y_g_hat.size(-1)) val_mrstft_tot += loss_mrstft(y_g_hat[..., :min_t], y[..., :min_t]).item() # Log audio and figures to Tensorboard if j % a.eval_subsample == 0: # Subsample every nth from validation set if steps >= 0: sw.add_audio(f"gt_{mode}/y_{j}", y[0], steps, h.sampling_rate) if a.save_audio: # Also save audio to disk if --save_audio is set to True save_audio( y[0], os.path.join( a.checkpoint_path, "samples", f"gt_{mode}", f"{j:04d}.wav", ), h.sampling_rate, ) sw.add_figure( f"gt_{mode}/y_spec_{j}", plot_spectrogram(x[0]), steps, ) sw.add_audio( f"generated_{mode}/y_hat_{j}", y_g_hat[0], steps, h.sampling_rate, ) if a.save_audio: # Also save audio to disk if --save_audio is set to True save_audio( y_g_hat[0, 0], os.path.join( a.checkpoint_path, "samples", f"{mode}_{steps:08d}", f"{j:04d}.wav", ), h.sampling_rate, ) # Spectrogram of synthesized audio y_hat_spec = mel_spectrogram( y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax, ) sw.add_figure( f"generated_{mode}/y_hat_spec_{j}", plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), steps, ) """ Visualization of spectrogram difference between GT and synthesized audio, difference higher than 1 is clipped for better visualization. """ spec_delta = torch.clamp( torch.abs(x[0] - y_hat_spec.squeeze(0).cpu()), min=1e-6, max=1.0, ) sw.add_figure( f"delta_dclip1_{mode}/spec_{j}", plot_spectrogram_clipped(spec_delta.numpy(), clip_max=1.0), steps, ) val_err = val_err_tot / (j + 1) val_pesq = val_pesq_tot / (j + 1) val_mrstft = val_mrstft_tot / (j + 1) # Log evaluation metrics to Tensorboard sw.add_scalar(f"validation_{mode}/mel_spec_error", val_err, steps) sw.add_scalar(f"validation_{mode}/pesq", val_pesq, steps) sw.add_scalar(f"validation_{mode}/mrstft", val_mrstft, steps) generator.train() # If the checkpoint is loaded, start with validation loop if steps != 0 and rank == 0 and not a.debug: if not a.skip_seen: validate( rank, a, h, validation_loader, mode=f"seen_{train_loader.dataset.name}", ) for i in range(len(list_unseen_validation_loader)): validate( rank, a, h, list_unseen_validation_loader[i], mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", ) # Exit the script if --evaluate is set to True if a.evaluate: exit() # Main training loop generator.train() mpd.train() mrd.train() for epoch in range(max(0, last_epoch), a.training_epochs): if rank == 0: start = time.time() print(f"Epoch: {epoch + 1}") if h.num_gpus > 1: train_sampler.set_epoch(epoch) for i, batch in enumerate(train_loader): if rank == 0: start_b = time.time() x, y, _, y_mel = batch x = x.to(device, non_blocking=True) y = y.to(device, non_blocking=True) y_mel = y_mel.to(device, non_blocking=True) y = y.unsqueeze(1) y_g_hat = generator(x) y_g_hat_mel = mel_spectrogram( y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax_for_loss, ) optim_d.zero_grad() # MPD y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach()) loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g) # MRD y_ds_hat_r, y_ds_hat_g, _, _ = mrd(y, y_g_hat.detach()) loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g) loss_disc_all = loss_disc_s + loss_disc_f # Set clip_grad_norm value clip_grad_norm = h.get("clip_grad_norm", 1000.0) # Default to 1000 # Whether to freeze D for initial training steps if steps >= a.freeze_step: loss_disc_all.backward() grad_norm_mpd = torch.nn.utils.clip_grad_norm_(mpd.parameters(), clip_grad_norm) grad_norm_mrd = torch.nn.utils.clip_grad_norm_(mrd.parameters(), clip_grad_norm) optim_d.step() else: print(f"[WARNING] skipping D training for the first {a.freeze_step} steps") grad_norm_mpd = 0.0 grad_norm_mrd = 0.0 # Generator optim_g.zero_grad() # L1 Mel-Spectrogram Loss lambda_melloss = h.get("lambda_melloss", 45.0) # Defaults to 45 in BigVGAN-v1 if not set if h.get("use_multiscale_melloss", False): # uses wav <y, y_g_hat> for loss loss_mel = fn_mel_loss_multiscale(y, y_g_hat) * lambda_melloss else: # Uses mel <y_mel, y_g_hat_mel> for loss loss_mel = fn_mel_loss_singlescale(y_mel, y_g_hat_mel) * lambda_melloss # MPD loss y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat) loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) # MRD loss y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = mrd(y, y_g_hat) loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) if steps >= a.freeze_step: loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel else: print(f"[WARNING] using regression loss only for G for the first {a.freeze_step} steps") loss_gen_all = loss_mel loss_gen_all.backward() grad_norm_g = torch.nn.utils.clip_grad_norm_(generator.parameters(), clip_grad_norm) optim_g.step() if rank == 0: # STDOUT logging if steps % a.stdout_interval == 0: mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to stdout print( f"Steps: {steps:d}, " f"Gen Loss Total: {loss_gen_all:4.3f}, " f"Mel Error: {mel_error:4.3f}, " f"s/b: {time.time() - start_b:4.3f} " f"lr: {optim_g.param_groups[0]['lr']:4.7f} " f"grad_norm_g: {grad_norm_g:4.3f}" ) # Checkpointing if steps % a.checkpoint_interval == 0 and steps != 0: checkpoint_path = f"{a.checkpoint_path}/g_{steps:08d}" save_checkpoint( checkpoint_path, {"generator": (generator.module if h.num_gpus > 1 else generator).state_dict()}, ) checkpoint_path = f"{a.checkpoint_path}/do_{steps:08d}" save_checkpoint( checkpoint_path, { "mpd": (mpd.module if h.num_gpus > 1 else mpd).state_dict(), "mrd": (mrd.module if h.num_gpus > 1 else mrd).state_dict(), "optim_g": optim_g.state_dict(), "optim_d": optim_d.state_dict(), "steps": steps, "epoch": epoch, }, ) # Tensorboard summary logging if steps % a.summary_interval == 0: mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to tensorboard sw.add_scalar("training/gen_loss_total", loss_gen_all.item(), steps) sw.add_scalar("training/mel_spec_error", mel_error, steps) sw.add_scalar("training/fm_loss_mpd", loss_fm_f.item(), steps) sw.add_scalar("training/gen_loss_mpd", loss_gen_f.item(), steps) sw.add_scalar("training/disc_loss_mpd", loss_disc_f.item(), steps) sw.add_scalar("training/grad_norm_mpd", grad_norm_mpd, steps) sw.add_scalar("training/fm_loss_mrd", loss_fm_s.item(), steps) sw.add_scalar("training/gen_loss_mrd", loss_gen_s.item(), steps) sw.add_scalar("training/disc_loss_mrd", loss_disc_s.item(), steps) sw.add_scalar("training/grad_norm_mrd", grad_norm_mrd, steps) sw.add_scalar("training/grad_norm_g", grad_norm_g, steps) sw.add_scalar("training/learning_rate_d", scheduler_d.get_last_lr()[0], steps) sw.add_scalar("training/learning_rate_g", scheduler_g.get_last_lr()[0], steps) sw.add_scalar("training/epoch", epoch + 1, steps) # Validation if steps % a.validation_interval == 0: # Plot training input x so far used for i_x in range(x.shape[0]): sw.add_figure( f"training_input/x_{i_x}", plot_spectrogram(x[i_x].cpu()), steps, ) sw.add_audio( f"training_input/y_{i_x}", y[i_x][0], steps, h.sampling_rate, ) # Seen and unseen speakers validation loops if not a.debug and steps != 0: validate( rank, a, h, validation_loader, mode=f"seen_{train_loader.dataset.name}", ) for i in range(len(list_unseen_validation_loader)): validate( rank, a, h, list_unseen_validation_loader[i], mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", ) steps += 1 # BigVGAN-v2 learning rate scheduler is changed from epoch-level to step-level scheduler_g.step() scheduler_d.step() if rank == 0: print(f"Time taken for epoch {epoch + 1} is {int(time.time() - start)} sec\n") def main(): print("Initializing Training Process..") parser = argparse.ArgumentParser() parser.add_argument("--group_name", default=None) parser.add_argument("--input_wavs_dir", default="LibriTTS") parser.add_argument("--input_mels_dir", default="ft_dataset") parser.add_argument("--input_training_file", default="tests/LibriTTS/train-full.txt") parser.add_argument("--input_validation_file", default="tests/LibriTTS/val-full.txt") parser.add_argument( "--list_input_unseen_wavs_dir", nargs="+", default=["tests/LibriTTS", "tests/LibriTTS"], ) parser.add_argument( "--list_input_unseen_validation_file", nargs="+", default=["tests/LibriTTS/dev-clean.txt", "tests/LibriTTS/dev-other.txt"], ) parser.add_argument("--checkpoint_path", default="exp/bigvgan") parser.add_argument("--config", default="") parser.add_argument("--training_epochs", default=100000, type=int) parser.add_argument("--stdout_interval", default=5, type=int) parser.add_argument("--checkpoint_interval", default=50000, type=int) parser.add_argument("--summary_interval", default=100, type=int) parser.add_argument("--validation_interval", default=50000, type=int) parser.add_argument( "--freeze_step", default=0, type=int, help="freeze D for the first specified steps. G only uses regression loss for these steps.", ) parser.add_argument("--fine_tuning", default=False, type=bool) parser.add_argument( "--debug", default=False, type=bool, help="debug mode. skips validation loop throughout training", ) parser.add_argument( "--evaluate", default=False, type=bool, help="only run evaluation from checkpoint and exit", ) parser.add_argument( "--eval_subsample", default=5, type=int, help="subsampling during evaluation loop", ) parser.add_argument( "--skip_seen", default=False, type=bool, help="skip seen dataset. useful for test set inference", ) parser.add_argument( "--save_audio", default=False, type=bool, help="save audio of test set inference to disk", ) a = parser.parse_args() with open(a.config) as f: data = f.read() json_config = json.loads(data) h = AttrDict(json_config) build_env(a.config, "config.json", a.checkpoint_path) torch.manual_seed(h.seed) if torch.cuda.is_available(): torch.cuda.manual_seed(h.seed) h.num_gpus = torch.cuda.device_count() h.batch_size = int(h.batch_size / h.num_gpus) print(f"Batch size per GPU: {h.batch_size}") else: pass if h.num_gpus > 1: mp.spawn( train, nprocs=h.num_gpus, args=( a, h, ), ) else: train(0, a, h) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/utils0.py
GPT_SoVITS/BigVGAN/utils0.py
# Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import glob import os import matplotlib import torch from torch.nn.utils import weight_norm matplotlib.use("Agg") import matplotlib.pylab as plt from .meldataset import MAX_WAV_VALUE from scipy.io.wavfile import write def plot_spectrogram(spectrogram): fig, ax = plt.subplots(figsize=(10, 2)) im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") plt.colorbar(im, ax=ax) fig.canvas.draw() plt.close() return fig def plot_spectrogram_clipped(spectrogram, clip_max=2.0): fig, ax = plt.subplots(figsize=(10, 2)) im = ax.imshow( spectrogram, aspect="auto", origin="lower", interpolation="none", vmin=1e-6, vmax=clip_max, ) plt.colorbar(im, ax=ax) fig.canvas.draw() plt.close() return fig def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: m.weight.data.normal_(mean, std) def apply_weight_norm(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: weight_norm(m) def get_padding(kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print(f"Loading '{filepath}'") checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict def save_checkpoint(filepath, obj): print(f"Saving checkpoint to {filepath}") torch.save(obj, filepath) print("Complete.") def scan_checkpoint(cp_dir, prefix, renamed_file=None): # Fallback to original scanning logic first pattern = os.path.join(cp_dir, prefix + "????????") cp_list = glob.glob(pattern) if len(cp_list) > 0: last_checkpoint_path = sorted(cp_list)[-1] print(f"[INFO] Resuming from checkpoint: '{last_checkpoint_path}'") return last_checkpoint_path # If no pattern-based checkpoints are found, check for renamed file if renamed_file: renamed_path = os.path.join(cp_dir, renamed_file) if os.path.isfile(renamed_path): print(f"[INFO] Resuming from renamed checkpoint: '{renamed_file}'") return renamed_path return None def save_audio(audio, path, sr): # wav: torch with 1d shape audio = audio * MAX_WAV_VALUE audio = audio.cpu().numpy().astype("int16") write(path, sr, audio)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/inference.py
GPT_SoVITS/BigVGAN/inference.py
# Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. from __future__ import absolute_import, division, print_function, unicode_literals import os import argparse import json import torch import librosa from utils import load_checkpoint from meldataset import get_mel_spectrogram from scipy.io.wavfile import write from env import AttrDict from meldataset import MAX_WAV_VALUE from bigvgan import BigVGAN as Generator h = None device = None torch.backends.cudnn.benchmark = False def inference(a, h): generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) state_dict_g = load_checkpoint(a.checkpoint_file, device) generator.load_state_dict(state_dict_g["generator"]) filelist = os.listdir(a.input_wavs_dir) os.makedirs(a.output_dir, exist_ok=True) generator.eval() generator.remove_weight_norm() with torch.no_grad(): for i, filname in enumerate(filelist): # Load the ground truth audio and resample if necessary wav, sr = librosa.load(os.path.join(a.input_wavs_dir, filname), sr=h.sampling_rate, mono=True) wav = torch.FloatTensor(wav).to(device) # Compute mel spectrogram from the ground truth audio x = get_mel_spectrogram(wav.unsqueeze(0), generator.h) y_g_hat = generator(x) audio = y_g_hat.squeeze() audio = audio * MAX_WAV_VALUE audio = audio.cpu().numpy().astype("int16") output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated.wav") write(output_file, h.sampling_rate, audio) print(output_file) def main(): print("Initializing Inference Process..") parser = argparse.ArgumentParser() parser.add_argument("--input_wavs_dir", default="test_files") parser.add_argument("--output_dir", default="generated_files") parser.add_argument("--checkpoint_file", required=True) parser.add_argument("--use_cuda_kernel", action="store_true", default=False) a = parser.parse_args() config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") with open(config_file) as f: data = f.read() global h json_config = json.loads(data) h = AttrDict(json_config) torch.manual_seed(h.seed) global device if torch.cuda.is_available(): torch.cuda.manual_seed(h.seed) device = torch.device("cuda") else: device = torch.device("cpu") inference(a, h) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/bigvgan.py
GPT_SoVITS/BigVGAN/bigvgan.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import os import json from pathlib import Path from typing import Optional, Union, Dict import torch import torch.nn as nn from torch.nn import Conv1d, ConvTranspose1d from torch.nn.utils import weight_norm, remove_weight_norm from . import activations from .utils0 import init_weights, get_padding from .alias_free_activation.torch.act import Activation1d as TorchActivation1d from .env import AttrDict from huggingface_hub import PyTorchModelHubMixin, hf_hub_download def load_hparams_from_json(path) -> AttrDict: with open(path) as f: data = f.read() return AttrDict(json.loads(data)) class AMPBlock1(torch.nn.Module): """ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 Args: h (AttrDict): Hyperparameters. channels (int): Number of convolution channels. kernel_size (int): Size of the convolution kernel. Default is 3. dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. """ def __init__( self, h: AttrDict, channels: int, kernel_size: int = 3, dilation: tuple = (1, 3, 5), activation: str = None, ): super().__init__() self.h = h self.convs1 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, stride=1, dilation=d, padding=get_padding(kernel_size, d), ) ) for d in dilation ] ) self.convs1.apply(init_weights) self.convs2 = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=get_padding(kernel_size, 1), ) ) for _ in range(len(dilation)) ] ) self.convs2.apply(init_weights) self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers # Select which Activation1d, lazy-load cuda version to ensure backward compatibility if self.h.get("use_cuda_kernel", False): from .alias_free_activation.cuda.activation1d import ( Activation1d as CudaActivation1d, ) Activation1d = CudaActivation1d else: Activation1d = TorchActivation1d # Activation functions if activation == "snake": self.activations = nn.ModuleList( [ Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) for _ in range(self.num_layers) ] ) elif activation == "snakebeta": self.activations = nn.ModuleList( [ Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) for _ in range(self.num_layers) ] ) else: raise NotImplementedError( "activation incorrectly specified. check the config file and look for 'activation'." ) def forward(self, x): acts1, acts2 = self.activations[::2], self.activations[1::2] for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): xt = a1(x) xt = c1(xt) xt = a2(xt) xt = c2(xt) x = xt + x return x def remove_weight_norm(self): for l in self.convs1: remove_weight_norm(l) for l in self.convs2: remove_weight_norm(l) class AMPBlock2(torch.nn.Module): """ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 Args: h (AttrDict): Hyperparameters. channels (int): Number of convolution channels. kernel_size (int): Size of the convolution kernel. Default is 3. dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. """ def __init__( self, h: AttrDict, channels: int, kernel_size: int = 3, dilation: tuple = (1, 3, 5), activation: str = None, ): super().__init__() self.h = h self.convs = nn.ModuleList( [ weight_norm( Conv1d( channels, channels, kernel_size, stride=1, dilation=d, padding=get_padding(kernel_size, d), ) ) for d in dilation ] ) self.convs.apply(init_weights) self.num_layers = len(self.convs) # Total number of conv layers # Select which Activation1d, lazy-load cuda version to ensure backward compatibility if self.h.get("use_cuda_kernel", False): from .alias_free_activation.cuda.activation1d import ( Activation1d as CudaActivation1d, ) Activation1d = CudaActivation1d else: Activation1d = TorchActivation1d # Activation functions if activation == "snake": self.activations = nn.ModuleList( [ Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) for _ in range(self.num_layers) ] ) elif activation == "snakebeta": self.activations = nn.ModuleList( [ Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) for _ in range(self.num_layers) ] ) else: raise NotImplementedError( "activation incorrectly specified. check the config file and look for 'activation'." ) def forward(self, x): for c, a in zip(self.convs, self.activations): xt = a(x) xt = c(xt) x = xt + x return x def remove_weight_norm(self): for l in self.convs: remove_weight_norm(l) class BigVGAN( torch.nn.Module, PyTorchModelHubMixin, # library_name="bigvgan", # repo_url="https://github.com/NVIDIA/BigVGAN", # docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", # pipeline_tag="audio-to-audio", # license="mit", # tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], ): """ BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. Args: h (AttrDict): Hyperparameters. use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. Note: - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). """ def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): super().__init__() self.h = h self.h["use_cuda_kernel"] = use_cuda_kernel # Select which Activation1d, lazy-load cuda version to ensure backward compatibility if self.h.get("use_cuda_kernel", False): from .alias_free_activation.cuda.activation1d import ( Activation1d as CudaActivation1d, ) Activation1d = CudaActivation1d else: Activation1d = TorchActivation1d self.num_kernels = len(h.resblock_kernel_sizes) self.num_upsamples = len(h.upsample_rates) # Pre-conv self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default if h.resblock == "1": resblock_class = AMPBlock1 elif h.resblock == "2": resblock_class = AMPBlock2 else: raise ValueError(f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}") # Transposed conv-based upsamplers. does not apply anti-aliasing self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): self.ups.append( nn.ModuleList( [ weight_norm( ConvTranspose1d( h.upsample_initial_channel // (2**i), h.upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2, ) ) ] ) ) # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = h.upsample_initial_channel // (2 ** (i + 1)) for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation)) # Post-conv activation_post = ( activations.Snake(ch, alpha_logscale=h.snake_logscale) if h.activation == "snake" else (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) if h.activation == "snakebeta" else None) ) if activation_post is None: raise NotImplementedError( "activation incorrectly specified. check the config file and look for 'activation'." ) self.activation_post = Activation1d(activation=activation_post) # Whether to use bias for the final conv_post. Default to True for backward compatibility self.use_bias_at_final = h.get("use_bias_at_final", True) self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)) # Weight initialization for i in range(len(self.ups)): self.ups[i].apply(init_weights) self.conv_post.apply(init_weights) # Final tanh activation. Defaults to True for backward compatibility self.use_tanh_at_final = h.get("use_tanh_at_final", True) def forward(self, x): # Pre-conv x = self.conv_pre(x) for i in range(self.num_upsamples): # Upsampling for i_up in range(len(self.ups[i])): x = self.ups[i][i_up](x) # AMP blocks xs = None for j in range(self.num_kernels): if xs is None: xs = self.resblocks[i * self.num_kernels + j](x) else: xs += self.resblocks[i * self.num_kernels + j](x) x = xs / self.num_kernels # Post-conv x = self.activation_post(x) x = self.conv_post(x) # Final tanh activation if self.use_tanh_at_final: x = torch.tanh(x) else: x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] return x def remove_weight_norm(self): try: # print("Removing weight norm...") for l in self.ups: for l_i in l: remove_weight_norm(l_i) for l in self.resblocks: l.remove_weight_norm() remove_weight_norm(self.conv_pre) remove_weight_norm(self.conv_post) except ValueError: print("[INFO] Model already removed weight norm. Skipping!") pass # Additional methods for huggingface_hub support def _save_pretrained(self, save_directory: Path) -> None: """Save weights and config.json from a Pytorch model to a local directory.""" model_path = save_directory / "bigvgan_generator.pt" torch.save({"generator": self.state_dict()}, model_path) config_path = save_directory / "config.json" with open(config_path, "w") as config_file: json.dump(self.h, config_file, indent=4) @classmethod def _from_pretrained( cls, *, model_id: str, revision: str, cache_dir: str, force_download: bool, proxies: Optional[Dict], resume_download: bool, local_files_only: bool, token: Union[str, bool, None], map_location: str = "cpu", # Additional argument strict: bool = False, # Additional argument use_cuda_kernel: bool = False, **model_kwargs, ): """Load Pytorch pretrained weights and return the loaded model.""" # Download and load hyperparameters (h) used by BigVGAN if os.path.isdir(model_id): # print("Loading config.json from local directory") config_file = os.path.join(model_id, "config.json") else: config_file = hf_hub_download( repo_id=model_id, filename="config.json", revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) h = load_hparams_from_json(config_file) # instantiate BigVGAN using h if use_cuda_kernel: print( "[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" ) print( "[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" ) print( "[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" ) model = cls(h, use_cuda_kernel=use_cuda_kernel) # Download and load pretrained generator weight if os.path.isdir(model_id): # print("Loading weights from local directory") model_file = os.path.join(model_id, "bigvgan_generator.pt") else: # print(f"Loading weights from {model_id}") model_file = hf_hub_download( repo_id=model_id, filename="bigvgan_generator.pt", revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) checkpoint_dict = torch.load(model_file, map_location=map_location) try: model.load_state_dict(checkpoint_dict["generator"]) except RuntimeError: print( "[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" ) model.remove_weight_norm() model.load_state_dict(checkpoint_dict["generator"]) return model
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/discriminators.py
GPT_SoVITS/BigVGAN/discriminators.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import torch import torch.nn.functional as F import torch.nn as nn from torch.nn import Conv2d from torch.nn.utils import weight_norm, spectral_norm from torchaudio.transforms import Spectrogram, Resample from env import AttrDict from utils import get_padding import typing from typing import List, Tuple class DiscriminatorP(torch.nn.Module): def __init__( self, h: AttrDict, period: List[int], kernel_size: int = 5, stride: int = 3, use_spectral_norm: bool = False, ): super().__init__() self.period = period self.d_mult = h.discriminator_channel_mult norm_f = weight_norm if not use_spectral_norm else spectral_norm self.convs = nn.ModuleList( [ norm_f( Conv2d( 1, int(32 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), ) ), norm_f( Conv2d( int(32 * self.d_mult), int(128 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), ) ), norm_f( Conv2d( int(128 * self.d_mult), int(512 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), ) ), norm_f( Conv2d( int(512 * self.d_mult), int(1024 * self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), ) ), norm_f( Conv2d( int(1024 * self.d_mult), int(1024 * self.d_mult), (kernel_size, 1), 1, padding=(2, 0), ) ), ] ) self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0))) def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: fmap = [] # 1d to 2d b, c, t = x.shape if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = F.pad(x, (0, n_pad), "reflect") t = t + n_pad x = x.view(b, c, t // self.period, self.period) for l in self.convs: x = l(x) x = F.leaky_relu(x, 0.1) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self, h: AttrDict): super().__init__() self.mpd_reshapes = h.mpd_reshapes print(f"mpd_reshapes: {self.mpd_reshapes}") self.discriminators = nn.ModuleList( [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes] ) def forward( self, y: torch.Tensor, y_hat: torch.Tensor ) -> Tuple[ List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]], ]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): y_d_r, fmap_r = d(y) y_d_g, fmap_g = d(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class DiscriminatorR(nn.Module): def __init__(self, cfg: AttrDict, resolution: List[List[int]]): super().__init__() self.resolution = resolution assert len(self.resolution) == 3, f"MRD layer requires list with len=3, got {self.resolution}" self.lrelu_slope = 0.1 norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm if hasattr(cfg, "mrd_use_spectral_norm"): print(f"[INFO] overriding MRD use_spectral_norm as {cfg.mrd_use_spectral_norm}") norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm self.d_mult = cfg.discriminator_channel_mult if hasattr(cfg, "mrd_channel_mult"): print(f"[INFO] overriding mrd channel multiplier as {cfg.mrd_channel_mult}") self.d_mult = cfg.mrd_channel_mult self.convs = nn.ModuleList( [ norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))), norm_f( nn.Conv2d( int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4), ) ), norm_f( nn.Conv2d( int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4), ) ), norm_f( nn.Conv2d( int(32 * self.d_mult), int(32 * self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4), ) ), norm_f( nn.Conv2d( int(32 * self.d_mult), int(32 * self.d_mult), (3, 3), padding=(1, 1), ) ), ] ) self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1))) def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: fmap = [] x = self.spectrogram(x) x = x.unsqueeze(1) for l in self.convs: x = l(x) x = F.leaky_relu(x, self.lrelu_slope) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, -1) return x, fmap def spectrogram(self, x: torch.Tensor) -> torch.Tensor: n_fft, hop_length, win_length = self.resolution x = F.pad( x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode="reflect", ) x = x.squeeze(1) x = torch.stft( x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True, ) x = torch.view_as_real(x) # [B, F, TT, 2] mag = torch.norm(x, p=2, dim=-1) # [B, F, TT] return mag class MultiResolutionDiscriminator(nn.Module): def __init__(self, cfg, debug=False): super().__init__() self.resolutions = cfg.resolutions assert len(self.resolutions) == 3, ( f"MRD requires list of list with len=3, each element having a list with len=3. Got {self.resolutions}" ) self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions]) def forward( self, y: torch.Tensor, y_hat: torch.Tensor ) -> Tuple[ List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]], ]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for i, d in enumerate(self.discriminators): y_d_r, fmap_r = d(x=y) y_d_g, fmap_g = d(x=y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs # Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec # Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. # LICENSE is in incl_licenses directory. class DiscriminatorB(nn.Module): def __init__( self, window_length: int, channels: int = 32, hop_factor: float = 0.25, bands: Tuple[Tuple[float, float], ...] = ( (0.0, 0.1), (0.1, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0), ), ): super().__init__() self.window_length = window_length self.hop_factor = hop_factor self.spec_fn = Spectrogram( n_fft=window_length, hop_length=int(window_length * hop_factor), win_length=window_length, power=None, ) n_fft = window_length // 2 + 1 bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands] self.bands = bands convs = lambda: nn.ModuleList( [ weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))), weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))), ] ) self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))]) self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1))) def spectrogram(self, x: torch.Tensor) -> List[torch.Tensor]: # Remove DC offset x = x - x.mean(dim=-1, keepdims=True) # Peak normalize the volume of input audio x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) x = self.spec_fn(x) x = torch.view_as_real(x) x = x.permute(0, 3, 2, 1) # [B, F, T, C] -> [B, C, T, F] # Split into bands x_bands = [x[..., b[0] : b[1]] for b in self.bands] return x_bands def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: x_bands = self.spectrogram(x.squeeze(1)) fmap = [] x = [] for band, stack in zip(x_bands, self.band_convs): for i, layer in enumerate(stack): band = layer(band) band = torch.nn.functional.leaky_relu(band, 0.1) if i > 0: fmap.append(band) x.append(band) x = torch.cat(x, dim=-1) x = self.conv_post(x) fmap.append(x) return x, fmap # Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec # Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. # LICENSE is in incl_licenses directory. class MultiBandDiscriminator(nn.Module): def __init__( self, h, ): """ Multi-band multi-scale STFT discriminator, with the architecture based on https://github.com/descriptinc/descript-audio-codec. and the modified code adapted from https://github.com/gemelo-ai/vocos. """ super().__init__() # fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h. self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512]) self.discriminators = nn.ModuleList([DiscriminatorB(window_length=w) for w in self.fft_sizes]) def forward( self, y: torch.Tensor, y_hat: torch.Tensor ) -> Tuple[ List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]], ]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for d in self.discriminators: y_d_r, fmap_r = d(x=y) y_d_g, fmap_g = d(x=y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs # Adapted from https://github.com/open-mmlab/Amphion/blob/main/models/vocoders/gan/discriminator/mssbcqtd.py under the MIT license. # LICENSE is in incl_licenses directory. class DiscriminatorCQT(nn.Module): def __init__(self, cfg: AttrDict, hop_length: int, n_octaves: int, bins_per_octave: int): super().__init__() self.cfg = cfg self.filters = cfg["cqtd_filters"] self.max_filters = cfg["cqtd_max_filters"] self.filters_scale = cfg["cqtd_filters_scale"] self.kernel_size = (3, 9) self.dilations = cfg["cqtd_dilations"] self.stride = (1, 2) self.in_channels = cfg["cqtd_in_channels"] self.out_channels = cfg["cqtd_out_channels"] self.fs = cfg["sampling_rate"] self.hop_length = hop_length self.n_octaves = n_octaves self.bins_per_octave = bins_per_octave # Lazy-load from nnAudio import features self.cqt_transform = features.cqt.CQT2010v2( sr=self.fs * 2, hop_length=self.hop_length, n_bins=self.bins_per_octave * self.n_octaves, bins_per_octave=self.bins_per_octave, output_format="Complex", pad_mode="constant", ) self.conv_pres = nn.ModuleList() for _ in range(self.n_octaves): self.conv_pres.append( nn.Conv2d( self.in_channels * 2, self.in_channels * 2, kernel_size=self.kernel_size, padding=self.get_2d_padding(self.kernel_size), ) ) self.convs = nn.ModuleList() self.convs.append( nn.Conv2d( self.in_channels * 2, self.filters, kernel_size=self.kernel_size, padding=self.get_2d_padding(self.kernel_size), ) ) in_chs = min(self.filters_scale * self.filters, self.max_filters) for i, dilation in enumerate(self.dilations): out_chs = min((self.filters_scale ** (i + 1)) * self.filters, self.max_filters) self.convs.append( weight_norm( nn.Conv2d( in_chs, out_chs, kernel_size=self.kernel_size, stride=self.stride, dilation=(dilation, 1), padding=self.get_2d_padding(self.kernel_size, (dilation, 1)), ) ) ) in_chs = out_chs out_chs = min( (self.filters_scale ** (len(self.dilations) + 1)) * self.filters, self.max_filters, ) self.convs.append( weight_norm( nn.Conv2d( in_chs, out_chs, kernel_size=(self.kernel_size[0], self.kernel_size[0]), padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), ) ) ) self.conv_post = weight_norm( nn.Conv2d( out_chs, self.out_channels, kernel_size=(self.kernel_size[0], self.kernel_size[0]), padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), ) ) self.activation = torch.nn.LeakyReLU(negative_slope=0.1) self.resample = Resample(orig_freq=self.fs, new_freq=self.fs * 2) self.cqtd_normalize_volume = self.cfg.get("cqtd_normalize_volume", False) if self.cqtd_normalize_volume: print( "[INFO] cqtd_normalize_volume set to True. Will apply DC offset removal & peak volume normalization in CQTD!" ) def get_2d_padding( self, kernel_size: typing.Tuple[int, int], dilation: typing.Tuple[int, int] = (1, 1), ): return ( ((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2, ) def forward(self, x: torch.tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: fmap = [] if self.cqtd_normalize_volume: # Remove DC offset x = x - x.mean(dim=-1, keepdims=True) # Peak normalize the volume of input audio x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) x = self.resample(x) z = self.cqt_transform(x) z_amplitude = z[:, :, :, 0].unsqueeze(1) z_phase = z[:, :, :, 1].unsqueeze(1) z = torch.cat([z_amplitude, z_phase], dim=1) z = torch.permute(z, (0, 1, 3, 2)) # [B, C, W, T] -> [B, C, T, W] latent_z = [] for i in range(self.n_octaves): latent_z.append( self.conv_pres[i]( z[ :, :, :, i * self.bins_per_octave : (i + 1) * self.bins_per_octave, ] ) ) latent_z = torch.cat(latent_z, dim=-1) for i, l in enumerate(self.convs): latent_z = l(latent_z) latent_z = self.activation(latent_z) fmap.append(latent_z) latent_z = self.conv_post(latent_z) return latent_z, fmap class MultiScaleSubbandCQTDiscriminator(nn.Module): def __init__(self, cfg: AttrDict): super().__init__() self.cfg = cfg # Using get with defaults self.cfg["cqtd_filters"] = self.cfg.get("cqtd_filters", 32) self.cfg["cqtd_max_filters"] = self.cfg.get("cqtd_max_filters", 1024) self.cfg["cqtd_filters_scale"] = self.cfg.get("cqtd_filters_scale", 1) self.cfg["cqtd_dilations"] = self.cfg.get("cqtd_dilations", [1, 2, 4]) self.cfg["cqtd_in_channels"] = self.cfg.get("cqtd_in_channels", 1) self.cfg["cqtd_out_channels"] = self.cfg.get("cqtd_out_channels", 1) # Multi-scale params to loop over self.cfg["cqtd_hop_lengths"] = self.cfg.get("cqtd_hop_lengths", [512, 256, 256]) self.cfg["cqtd_n_octaves"] = self.cfg.get("cqtd_n_octaves", [9, 9, 9]) self.cfg["cqtd_bins_per_octaves"] = self.cfg.get("cqtd_bins_per_octaves", [24, 36, 48]) self.discriminators = nn.ModuleList( [ DiscriminatorCQT( self.cfg, hop_length=self.cfg["cqtd_hop_lengths"][i], n_octaves=self.cfg["cqtd_n_octaves"][i], bins_per_octave=self.cfg["cqtd_bins_per_octaves"][i], ) for i in range(len(self.cfg["cqtd_hop_lengths"])) ] ) def forward( self, y: torch.Tensor, y_hat: torch.Tensor ) -> Tuple[ List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]], ]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for disc in self.discriminators: y_d_r, fmap_r = disc(y) y_d_g, fmap_g = disc(y_hat) y_d_rs.append(y_d_r) fmap_rs.append(fmap_r) y_d_gs.append(y_d_g) fmap_gs.append(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs class CombinedDiscriminator(nn.Module): """ Wrapper of chaining multiple discrimiantor architectures. Example: combine mbd and cqtd as a single class """ def __init__(self, list_discriminator: List[nn.Module]): super().__init__() self.discrimiantor = nn.ModuleList(list_discriminator) def forward( self, y: torch.Tensor, y_hat: torch.Tensor ) -> Tuple[ List[torch.Tensor], List[torch.Tensor], List[List[torch.Tensor]], List[List[torch.Tensor]], ]: y_d_rs = [] y_d_gs = [] fmap_rs = [] fmap_gs = [] for disc in self.discrimiantor: y_d_r, y_d_g, fmap_r, fmap_g = disc(y, y_hat) y_d_rs.extend(y_d_r) fmap_rs.extend(fmap_r) y_d_gs.extend(y_d_g) fmap_gs.extend(fmap_g) return y_d_rs, y_d_gs, fmap_rs, fmap_gs
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/loss.py
GPT_SoVITS/BigVGAN/loss.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. # Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import torch import torch.nn as nn from librosa.filters import mel as librosa_mel_fn from scipy import signal import typing from typing import List, Tuple from collections import namedtuple import math import functools # Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license. # LICENSE is in incl_licenses directory. class MultiScaleMelSpectrogramLoss(nn.Module): """Compute distance between mel spectrograms. Can be used in a multi-scale way. Parameters ---------- n_mels : List[int] Number of mels per STFT, by default [5, 10, 20, 40, 80, 160, 320], window_lengths : List[int], optional Length of each window of each STFT, by default [32, 64, 128, 256, 512, 1024, 2048] loss_fn : typing.Callable, optional How to compare each loss, by default nn.L1Loss() clamp_eps : float, optional Clamp on the log magnitude, below, by default 1e-5 mag_weight : float, optional Weight of raw magnitude portion of loss, by default 0.0 (no ampliciation on mag part) log_weight : float, optional Weight of log magnitude portion of loss, by default 1.0 pow : float, optional Power to raise magnitude to before taking log, by default 1.0 weight : float, optional Weight of this loss, by default 1.0 match_stride : bool, optional Whether to match the stride of convolutional layers, by default False Implementation copied from: https://github.com/descriptinc/lyrebird-audiotools/blob/961786aa1a9d628cca0c0486e5885a457fe70c1a/audiotools/metrics/spectral.py Additional code copied and modified from https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py """ def __init__( self, sampling_rate: int, n_mels: List[int] = [5, 10, 20, 40, 80, 160, 320], window_lengths: List[int] = [32, 64, 128, 256, 512, 1024, 2048], loss_fn: typing.Callable = nn.L1Loss(), clamp_eps: float = 1e-5, mag_weight: float = 0.0, log_weight: float = 1.0, pow: float = 1.0, weight: float = 1.0, match_stride: bool = False, mel_fmin: List[float] = [0, 0, 0, 0, 0, 0, 0], mel_fmax: List[float] = [None, None, None, None, None, None, None], window_type: str = "hann", ): super().__init__() self.sampling_rate = sampling_rate STFTParams = namedtuple( "STFTParams", ["window_length", "hop_length", "window_type", "match_stride"], ) self.stft_params = [ STFTParams( window_length=w, hop_length=w // 4, match_stride=match_stride, window_type=window_type, ) for w in window_lengths ] self.n_mels = n_mels self.loss_fn = loss_fn self.clamp_eps = clamp_eps self.log_weight = log_weight self.mag_weight = mag_weight self.weight = weight self.mel_fmin = mel_fmin self.mel_fmax = mel_fmax self.pow = pow @staticmethod @functools.lru_cache(None) def get_window( window_type, window_length, ): return signal.get_window(window_type, window_length) @staticmethod @functools.lru_cache(None) def get_mel_filters(sr, n_fft, n_mels, fmin, fmax): return librosa_mel_fn(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) def mel_spectrogram( self, wav, n_mels, fmin, fmax, window_length, hop_length, match_stride, window_type, ): """ Mirrors AudioSignal.mel_spectrogram used by BigVGAN-v2 training from: https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py """ B, C, T = wav.shape if match_stride: assert hop_length == window_length // 4, "For match_stride, hop must equal n_fft // 4" right_pad = math.ceil(T / hop_length) * hop_length - T pad = (window_length - hop_length) // 2 else: right_pad = 0 pad = 0 wav = torch.nn.functional.pad(wav, (pad, pad + right_pad), mode="reflect") window = self.get_window(window_type, window_length) window = torch.from_numpy(window).to(wav.device).float() stft = torch.stft( wav.reshape(-1, T), n_fft=window_length, hop_length=hop_length, window=window, return_complex=True, center=True, ) _, nf, nt = stft.shape stft = stft.reshape(B, C, nf, nt) if match_stride: """ Drop first two and last two frames, which are added, because of padding. Now num_frames * hop_length = num_samples. """ stft = stft[..., 2:-2] magnitude = torch.abs(stft) nf = magnitude.shape[2] mel_basis = self.get_mel_filters(self.sampling_rate, 2 * (nf - 1), n_mels, fmin, fmax) mel_basis = torch.from_numpy(mel_basis).to(wav.device) mel_spectrogram = magnitude.transpose(2, -1) @ mel_basis.T mel_spectrogram = mel_spectrogram.transpose(-1, 2) return mel_spectrogram def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: """Computes mel loss between an estimate and a reference signal. Parameters ---------- x : torch.Tensor Estimate signal y : torch.Tensor Reference signal Returns ------- torch.Tensor Mel loss. """ loss = 0.0 for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params): kwargs = { "n_mels": n_mels, "fmin": fmin, "fmax": fmax, "window_length": s.window_length, "hop_length": s.hop_length, "match_stride": s.match_stride, "window_type": s.window_type, } x_mels = self.mel_spectrogram(x, **kwargs) y_mels = self.mel_spectrogram(y, **kwargs) x_logmels = torch.log(x_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) y_logmels = torch.log(y_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) loss += self.log_weight * self.loss_fn(x_logmels, y_logmels) loss += self.mag_weight * self.loss_fn(x_logmels, y_logmels) return loss # Loss functions def feature_loss(fmap_r: List[List[torch.Tensor]], fmap_g: List[List[torch.Tensor]]) -> torch.Tensor: loss = 0 for dr, dg in zip(fmap_r, fmap_g): for rl, gl in zip(dr, dg): loss += torch.mean(torch.abs(rl - gl)) return loss * 2 # This equates to lambda=2.0 for the feature matching loss def discriminator_loss( disc_real_outputs: List[torch.Tensor], disc_generated_outputs: List[torch.Tensor] ) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]: loss = 0 r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg**2) loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) return loss, r_losses, g_losses def generator_loss( disc_outputs: List[torch.Tensor], ) -> Tuple[torch.Tensor, List[torch.Tensor]]: loss = 0 gen_losses = [] for dg in disc_outputs: l = torch.mean((1 - dg) ** 2) gen_losses.append(l) loss += l return loss, gen_losses
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/inference_e2e.py
GPT_SoVITS/BigVGAN/inference_e2e.py
# Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. from __future__ import absolute_import, division, print_function, unicode_literals import glob import os import numpy as np import argparse import json import torch from scipy.io.wavfile import write from env import AttrDict from meldataset import MAX_WAV_VALUE from bigvgan import BigVGAN as Generator h = None device = None torch.backends.cudnn.benchmark = False def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print(f"Loading '{filepath}'") checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict def scan_checkpoint(cp_dir, prefix): pattern = os.path.join(cp_dir, prefix + "*") cp_list = glob.glob(pattern) if len(cp_list) == 0: return "" return sorted(cp_list)[-1] def inference(a, h): generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) state_dict_g = load_checkpoint(a.checkpoint_file, device) generator.load_state_dict(state_dict_g["generator"]) filelist = os.listdir(a.input_mels_dir) os.makedirs(a.output_dir, exist_ok=True) generator.eval() generator.remove_weight_norm() with torch.no_grad(): for i, filname in enumerate(filelist): # Load the mel spectrogram in .npy format x = np.load(os.path.join(a.input_mels_dir, filname)) x = torch.FloatTensor(x).to(device) if len(x.shape) == 2: x = x.unsqueeze(0) y_g_hat = generator(x) audio = y_g_hat.squeeze() audio = audio * MAX_WAV_VALUE audio = audio.cpu().numpy().astype("int16") output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated_e2e.wav") write(output_file, h.sampling_rate, audio) print(output_file) def main(): print("Initializing Inference Process..") parser = argparse.ArgumentParser() parser.add_argument("--input_mels_dir", default="test_mel_files") parser.add_argument("--output_dir", default="generated_files_from_mel") parser.add_argument("--checkpoint_file", required=True) parser.add_argument("--use_cuda_kernel", action="store_true", default=False) a = parser.parse_args() config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") with open(config_file) as f: data = f.read() global h json_config = json.loads(data) h = AttrDict(json_config) torch.manual_seed(h.seed) global device if torch.cuda.is_available(): torch.cuda.manual_seed(h.seed) device = torch.device("cuda") else: device = torch.device("cpu") inference(a, h) if __name__ == "__main__": main()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/activations.py
GPT_SoVITS/BigVGAN/activations.py
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. # LICENSE is in incl_licenses directory. import torch from torch import nn, sin, pow from torch.nn import Parameter class Snake(nn.Module): """ Implementation of a sine-based periodic activation function Shape: - Input: (B, C, T) - Output: (B, C, T), same shape as the input Parameters: - alpha - trainable parameter References: - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: https://arxiv.org/abs/2006.08195 Examples: >>> a1 = snake(256) >>> x = torch.randn(256) >>> x = a1(x) """ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): """ Initialization. INPUT: - in_features: shape of the input - alpha: trainable parameter alpha is initialized to 1 by default, higher values = higher-frequency. alpha will be trained along with the rest of your model. """ super(Snake, self).__init__() self.in_features = in_features # Initialize alpha self.alpha_logscale = alpha_logscale if self.alpha_logscale: # Log scale alphas initialized to zeros self.alpha = Parameter(torch.zeros(in_features) * alpha) else: # Linear scale alphas initialized to ones self.alpha = Parameter(torch.ones(in_features) * alpha) self.alpha.requires_grad = alpha_trainable self.no_div_by_zero = 0.000000001 def forward(self, x): """ Forward pass of the function. Applies the function to the input elementwise. Snake ∶= x + 1/a * sin^2 (xa) """ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] if self.alpha_logscale: alpha = torch.exp(alpha) x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) return x class SnakeBeta(nn.Module): """ A modified Snake function which uses separate parameters for the magnitude of the periodic components Shape: - Input: (B, C, T) - Output: (B, C, T), same shape as the input Parameters: - alpha - trainable parameter that controls frequency - beta - trainable parameter that controls magnitude References: - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: https://arxiv.org/abs/2006.08195 Examples: >>> a1 = snakebeta(256) >>> x = torch.randn(256) >>> x = a1(x) """ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): """ Initialization. INPUT: - in_features: shape of the input - alpha - trainable parameter that controls frequency - beta - trainable parameter that controls magnitude alpha is initialized to 1 by default, higher values = higher-frequency. beta is initialized to 1 by default, higher values = higher-magnitude. alpha will be trained along with the rest of your model. """ super(SnakeBeta, self).__init__() self.in_features = in_features # Initialize alpha self.alpha_logscale = alpha_logscale if self.alpha_logscale: # Log scale alphas initialized to zeros self.alpha = Parameter(torch.zeros(in_features) * alpha) self.beta = Parameter(torch.zeros(in_features) * alpha) else: # Linear scale alphas initialized to ones self.alpha = Parameter(torch.ones(in_features) * alpha) self.beta = Parameter(torch.ones(in_features) * alpha) self.alpha.requires_grad = alpha_trainable self.beta.requires_grad = alpha_trainable self.no_div_by_zero = 0.000000001 def forward(self, x): """ Forward pass of the function. Applies the function to the input elementwise. SnakeBeta ∶= x + 1/b * sin^2 (xa) """ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] beta = self.beta.unsqueeze(0).unsqueeze(-1) if self.alpha_logscale: alpha = torch.exp(alpha) beta = torch.exp(beta) x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) return x
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/env.py
GPT_SoVITS/BigVGAN/env.py
# Adapted from https://github.com/jik876/hifi-gan under the MIT license. # LICENSE is in incl_licenses directory. import os import shutil class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def build_env(config, config_name, path): t_path = os.path.join(path, config_name) if config != t_path: os.makedirs(path, exist_ok=True) shutil.copyfile(config, os.path.join(path, config_name))
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py
GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. import os import sys # to import modules from parent_dir parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(parent_dir) import torch from alias_free_activation.cuda import activation1d from activations import SnakeBeta def test_load_fused_kernels(): try: print("[Success] load_fused_kernels") except ImportError as e: print("[Fail] load_fused_kernels") raise e def test_anti_alias_activation(): data = torch.rand((10, 10, 200), device="cuda") # Check activations, Snake CUDA vs. Torch fused_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=True).cuda() fused_activation_output = fused_anti_alias_activation(data) torch_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=False).cuda() torch_activation_output = torch_anti_alias_activation(data) test_result = (fused_activation_output - torch_activation_output).abs() while test_result.dim() != 1: test_result = test_result.mean(dim=-1) diff = test_result.mean(dim=-1) if diff <= 1e-3: print( f"\n[Success] test_fused_anti_alias_activation" f"\n > mean_difference={diff}" f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" ) else: print( f"\n[Fail] test_fused_anti_alias_activation" f"\n > mean_difference={diff}, " f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" ) if __name__ == "__main__": from alias_free_activation.cuda import load load.load() test_load_fused_kernels() test_anti_alias_activation()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py
GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. import os import sys # to import modules from parent_dir parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(parent_dir) import torch import json from env import AttrDict from bigvgan import BigVGAN from time import time from tqdm import tqdm from meldataset import mel_spectrogram, MAX_WAV_VALUE from scipy.io.wavfile import write import numpy as np import argparse torch.backends.cudnn.benchmark = True # For easier debugging torch.set_printoptions(linewidth=200, threshold=10_000) def generate_soundwave(duration=5.0, sr=24000): t = np.linspace(0, duration, int(sr * duration), False, dtype=np.float32) modulation = np.sin(2 * np.pi * t / duration) min_freq = 220 max_freq = 1760 frequencies = min_freq + (max_freq - min_freq) * (modulation + 1) / 2 soundwave = np.sin(2 * np.pi * frequencies * t) soundwave = soundwave / np.max(np.abs(soundwave)) * 0.95 return soundwave, sr def get_mel(x, h): return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print(f"Loading '{filepath}'") checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict if __name__ == "__main__": parser = argparse.ArgumentParser(description="Test script to check CUDA kernel correctness.") parser.add_argument( "--checkpoint_file", type=str, required=True, help="Path to the checkpoint file. Assumes config.json exists in the directory.", ) args = parser.parse_args() config_file = os.path.join(os.path.split(args.checkpoint_file)[0], "config.json") with open(config_file) as f: config = f.read() json_config = json.loads(config) h = AttrDict({**json_config}) print("loading plain Pytorch BigVGAN") generator_original = BigVGAN(h).to("cuda") print("loading CUDA kernel BigVGAN with auto-build") generator_cuda_kernel = BigVGAN(h, use_cuda_kernel=True).to("cuda") state_dict_g = load_checkpoint(args.checkpoint_file, "cuda") generator_original.load_state_dict(state_dict_g["generator"]) generator_cuda_kernel.load_state_dict(state_dict_g["generator"]) generator_original.remove_weight_norm() generator_original.eval() generator_cuda_kernel.remove_weight_norm() generator_cuda_kernel.eval() # define number of samples and length of mel frame to benchmark num_sample = 10 num_mel_frame = 16384 # CUDA kernel correctness check diff = 0.0 for i in tqdm(range(num_sample)): # Random mel data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") with torch.inference_mode(): audio_original = generator_original(data) with torch.inference_mode(): audio_cuda_kernel = generator_cuda_kernel(data) # Both outputs should be (almost) the same test_result = (audio_original - audio_cuda_kernel).abs() diff += test_result.mean(dim=-1).item() diff /= num_sample if diff <= 2e-3: # We can expect a small difference (~1e-3) which does not affect perceptual quality print( f"\n[Success] test CUDA fused vs. plain torch BigVGAN inference" f"\n > mean_difference={diff}" f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}" f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" ) else: print( f"\n[Fail] test CUDA fused vs. plain torch BigVGAN inference" f"\n > mean_difference={diff}" f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}, " f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" ) del data, audio_original, audio_cuda_kernel # Variables for tracking total time and VRAM usage toc_total_original = 0 toc_total_cuda_kernel = 0 vram_used_original_total = 0 vram_used_cuda_kernel_total = 0 audio_length_total = 0 # Measure Original inference in isolation for i in tqdm(range(num_sample)): torch.cuda.reset_peak_memory_stats(device="cuda") data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") torch.cuda.synchronize() tic = time() with torch.inference_mode(): audio_original = generator_original(data) torch.cuda.synchronize() toc = time() - tic toc_total_original += toc vram_used_original_total += torch.cuda.max_memory_allocated(device="cuda") del data, audio_original torch.cuda.empty_cache() # Measure CUDA kernel inference in isolation for i in tqdm(range(num_sample)): torch.cuda.reset_peak_memory_stats(device="cuda") data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") torch.cuda.synchronize() tic = time() with torch.inference_mode(): audio_cuda_kernel = generator_cuda_kernel(data) torch.cuda.synchronize() toc = time() - tic toc_total_cuda_kernel += toc audio_length_total += audio_cuda_kernel.shape[-1] vram_used_cuda_kernel_total += torch.cuda.max_memory_allocated(device="cuda") del data, audio_cuda_kernel torch.cuda.empty_cache() # Calculate metrics audio_second = audio_length_total / h.sampling_rate khz_original = audio_length_total / toc_total_original / 1000 khz_cuda_kernel = audio_length_total / toc_total_cuda_kernel / 1000 vram_used_original_gb = vram_used_original_total / num_sample / (1024**3) vram_used_cuda_kernel_gb = vram_used_cuda_kernel_total / num_sample / (1024**3) # Print results print( f"Original BigVGAN: took {toc_total_original:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_original:.1f}kHz, {audio_second / toc_total_original:.1f} faster than realtime, VRAM used {vram_used_original_gb:.1f} GB" ) print( f"CUDA kernel BigVGAN: took {toc_total_cuda_kernel:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_cuda_kernel:.1f}kHz, {audio_second / toc_total_cuda_kernel:.1f} faster than realtime, VRAM used {vram_used_cuda_kernel_gb:.1f} GB" ) print(f"speedup of CUDA kernel: {khz_cuda_kernel / khz_original}") print(f"VRAM saving of CUDA kernel: {vram_used_original_gb / vram_used_cuda_kernel_gb}") # Use artificial sine waves for inference test audio_real, sr = generate_soundwave(duration=5.0, sr=h.sampling_rate) audio_real = torch.tensor(audio_real).to("cuda") # Compute mel spectrogram from the ground truth audio x = get_mel(audio_real.unsqueeze(0), h) with torch.inference_mode(): y_g_hat_original = generator_original(x) y_g_hat_cuda_kernel = generator_cuda_kernel(x) audio_real = audio_real.squeeze() audio_real = audio_real * MAX_WAV_VALUE audio_real = audio_real.cpu().numpy().astype("int16") audio_original = y_g_hat_original.squeeze() audio_original = audio_original * MAX_WAV_VALUE audio_original = audio_original.cpu().numpy().astype("int16") audio_cuda_kernel = y_g_hat_cuda_kernel.squeeze() audio_cuda_kernel = audio_cuda_kernel * MAX_WAV_VALUE audio_cuda_kernel = audio_cuda_kernel.cpu().numpy().astype("int16") os.makedirs("tmp", exist_ok=True) output_file_real = os.path.join("tmp", "audio_real.wav") output_file_original = os.path.join("tmp", "audio_generated_original.wav") output_file_cuda_kernel = os.path.join("tmp", "audio_generated_cuda_kernel.wav") write(output_file_real, h.sampling_rate, audio_real) write(output_file_original, h.sampling_rate, audio_original) write(output_file_cuda_kernel, h.sampling_rate, audio_cuda_kernel) print("Example generated audios of original vs. fused CUDA kernel written to tmp!") print("Done")
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/tests/test_activation.py
GPT_SoVITS/BigVGAN/tests/test_activation.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. import os import sys # to import modules from parent_dir parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.append(parent_dir) import torch from alias_free_activation.cuda import activation1d from activations import Snake def test_load_fused_kernels(): try: print("[Success] load_fused_kernels") except ImportError as e: print("[Fail] load_fused_kernels") raise e def test_anti_alias_activation(): data = torch.rand((10, 10, 200), device="cuda") # Check activations.Snake cuda vs. torch fused_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=True).cuda() fused_activation_output = fused_anti_alias_activation(data) torch_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=False).cuda() torch_activation_output = torch_anti_alias_activation(data) test_result = (fused_activation_output - torch_activation_output).abs() while test_result.dim() != 1: test_result = test_result.mean(dim=-1) diff = test_result.mean(dim=-1) if diff <= 1e-3: print( f"\n[Success] test_fused_anti_alias_activation" f"\n > mean_difference={diff}" f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" ) else: print( f"\n[Fail] test_fused_anti_alias_activation" f"\n > mean_difference={diff}, " f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" ) if __name__ == "__main__": from alias_free_activation.cuda import load load.load() test_load_fused_kernels() test_anti_alias_activation()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. import os import pathlib import subprocess from torch.utils import cpp_extension """ Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below """ os.environ["TORCH_CUDA_ARCH_LIST"] = "" def load(): # Check if cuda 11 is installed for compute capability 8.0 cc_flag = [] _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) if int(bare_metal_major) >= 11: cc_flag.append("-gencode") cc_flag.append("arch=compute_80,code=sm_80") # Build path srcpath = pathlib.Path(__file__).parent.absolute() buildpath = srcpath / "build" _create_build_dir(buildpath) # Helper function to build the kernels. def _cpp_extention_load_helper(name, sources, extra_cuda_flags): return cpp_extension.load( name=name, sources=sources, build_directory=buildpath, extra_cflags=[ "-O3", ], extra_cuda_cflags=[ "-O3", "-gencode", "arch=compute_70,code=sm_70", "--use_fast_math", ] + extra_cuda_flags + cc_flag, verbose=True, ) extra_cuda_flags = [ "-U__CUDA_NO_HALF_OPERATORS__", "-U__CUDA_NO_HALF_CONVERSIONS__", "--expt-relaxed-constexpr", "--expt-extended-lambda", ] sources = [ srcpath / "anti_alias_activation.cpp", srcpath / "anti_alias_activation_cuda.cu", ] anti_alias_activation_cuda = _cpp_extention_load_helper("anti_alias_activation_cuda", sources, extra_cuda_flags) return anti_alias_activation_cuda def _get_cuda_bare_metal_version(cuda_dir): raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) output = raw_output.split() release_idx = output.index("release") + 1 release = output[release_idx].split(".") bare_metal_major = release[0] bare_metal_minor = release[1][0] return raw_output, bare_metal_major, bare_metal_minor def _create_build_dir(buildpath): try: os.mkdir(buildpath) except OSError: if not os.path.isdir(buildpath): print(f"Creation of the build directory {buildpath} failed")
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py
# Copyright (c) 2024 NVIDIA CORPORATION. # Licensed under the MIT license. import torch import torch.nn as nn from alias_free_activation.torch.resample import UpSample1d, DownSample1d # load fused CUDA kernel: this enables importing anti_alias_activation_cuda from alias_free_activation.cuda import load anti_alias_activation_cuda = load.load() class FusedAntiAliasActivation(torch.autograd.Function): """ Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. The hyperparameters are hard-coded in the kernel to maximize speed. NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. """ @staticmethod def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): activation_results = anti_alias_activation_cuda.forward(inputs, up_ftr, down_ftr, alpha, beta) return activation_results @staticmethod def backward(ctx, output_grads): raise NotImplementedError return output_grads, None, None class Activation1d(nn.Module): def __init__( self, activation, up_ratio: int = 2, down_ratio: int = 2, up_kernel_size: int = 12, down_kernel_size: int = 12, fused: bool = True, ): super().__init__() self.up_ratio = up_ratio self.down_ratio = down_ratio self.act = activation self.upsample = UpSample1d(up_ratio, up_kernel_size) self.downsample = DownSample1d(down_ratio, down_kernel_size) self.fused = fused # Whether to use fused CUDA kernel or not def forward(self, x): if not self.fused: x = self.upsample(x) x = self.act(x) x = self.downsample(x) return x else: if self.act.__class__.__name__ == "Snake": beta = self.act.alpha.data # Snake uses same params for alpha and beta else: beta = self.act.beta.data # Snakebeta uses different params for alpha and beta alpha = self.act.alpha.data if not self.act.alpha_logscale: # Exp baked into cuda kernel, cancel it out with a log alpha = torch.log(alpha) beta = torch.log(beta) x = FusedAntiAliasActivation.apply(x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta) return x
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py
GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 # LICENSE is in incl_licenses directory. import torch import torch.nn as nn import torch.nn.functional as F import math if "sinc" in dir(torch): sinc = torch.sinc else: # This code is adopted from adefossez's julius.core.sinc under the MIT License # https://adefossez.github.io/julius/julius/core.html # LICENSE is in incl_licenses directory. def sinc(x: torch.Tensor): """ Implementation of sinc, i.e. sin(pi * x) / (pi * x) __Warning__: Different to julius.sinc, the input is multiplied by `pi`! """ return torch.where( x == 0, torch.tensor(1.0, device=x.device, dtype=x.dtype), torch.sin(math.pi * x) / math.pi / x, ) # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License # https://adefossez.github.io/julius/julius/lowpass.html # LICENSE is in incl_licenses directory. def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] even = kernel_size % 2 == 0 half_size = kernel_size // 2 # For kaiser window delta_f = 4 * half_width A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 if A > 50.0: beta = 0.1102 * (A - 8.7) elif A >= 21.0: beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) else: beta = 0.0 window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio if even: time = torch.arange(-half_size, half_size) + 0.5 else: time = torch.arange(kernel_size) - half_size if cutoff == 0: filter_ = torch.zeros_like(time) else: filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) """ Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. """ filter_ /= filter_.sum() filter = filter_.view(1, 1, kernel_size) return filter class LowPassFilter1d(nn.Module): def __init__( self, cutoff=0.5, half_width=0.6, stride: int = 1, padding: bool = True, padding_mode: str = "replicate", kernel_size: int = 12, ): """ kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. """ super().__init__() if cutoff < -0.0: raise ValueError("Minimum cutoff must be larger than zero.") if cutoff > 0.5: raise ValueError("A cutoff above 0.5 does not make sense.") self.kernel_size = kernel_size self.even = kernel_size % 2 == 0 self.pad_left = kernel_size // 2 - int(self.even) self.pad_right = kernel_size // 2 self.stride = stride self.padding = padding self.padding_mode = padding_mode filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) self.register_buffer("filter", filter) # Input [B, C, T] def forward(self, x): _, C, _ = x.shape if self.padding: x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) return out
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py
GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 # LICENSE is in incl_licenses directory. import torch.nn as nn from .resample import UpSample1d, DownSample1d class Activation1d(nn.Module): def __init__( self, activation, up_ratio: int = 2, down_ratio: int = 2, up_kernel_size: int = 12, down_kernel_size: int = 12, ): super().__init__() self.up_ratio = up_ratio self.down_ratio = down_ratio self.act = activation self.upsample = UpSample1d(up_ratio, up_kernel_size) self.downsample = DownSample1d(down_ratio, down_kernel_size) # x: [B,C,T] def forward(self, x): x = self.upsample(x) x = self.act(x) x = self.downsample(x) return x
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py
GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 # LICENSE is in incl_licenses directory. from .filter import * from .resample import * from .act import *
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py
GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py
# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 # LICENSE is in incl_licenses directory. import torch.nn as nn from torch.nn import functional as F from .filter import LowPassFilter1d from .filter import kaiser_sinc_filter1d class UpSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() self.ratio = ratio self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size self.stride = ratio self.pad = self.kernel_size // ratio - 1 self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size) self.register_buffer("filter", filter) # x: [B, C, T] def forward(self, x): _, C, _ = x.shape x = F.pad(x, (self.pad, self.pad), mode="replicate") x = self.ratio * F.conv_transpose1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) x = x[..., self.pad_left : -self.pad_right] return x class DownSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() self.ratio = ratio self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size self.lowpass = LowPassFilter1d( cutoff=0.5 / ratio, half_width=0.6 / ratio, stride=ratio, kernel_size=self.kernel_size, ) def forward(self, x): xx = self.lowpass(x) return xx
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/feature_extractor/whisper_enc.py
GPT_SoVITS/feature_extractor/whisper_enc.py
import torch def get_model(): import whisper model = whisper.load_model("small", device="cpu") return model.encoder def get_content(model=None, wav_16k_tensor=None): from whisper import log_mel_spectrogram, pad_or_trim dev = next(model.parameters()).device mel = log_mel_spectrogram(wav_16k_tensor).to(dev)[:, :3000] # if torch.cuda.is_available(): # mel = mel.to(torch.float16) feature_len = mel.shape[-1] // 2 assert mel.shape[-1] < 3000, "输入音频过长,只允许输入30以内音频" with torch.no_grad(): feature = model(pad_or_trim(mel, 3000).unsqueeze(0))[:1, :feature_len, :].transpose(1, 2) return feature
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/feature_extractor/cnhubert.py
GPT_SoVITS/feature_extractor/cnhubert.py
import torch import os from transformers import logging as tf_logging tf_logging.set_verbosity_error() import logging logging.getLogger("numba").setLevel(logging.WARNING) from transformers import ( Wav2Vec2FeatureExtractor, HubertModel, ) import utils import torch.nn as nn cnhubert_base_path = None class CNHubert(nn.Module): def __init__(self, base_path: str = None): super().__init__() if base_path is None: base_path = cnhubert_base_path if os.path.exists(base_path): ... else: raise FileNotFoundError(base_path) self.model = HubertModel.from_pretrained(base_path, local_files_only=True) self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(base_path, local_files_only=True) def forward(self, x): input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) feats = self.model(input_values)["last_hidden_state"] return feats # class CNHubertLarge(nn.Module): # def __init__(self): # super().__init__() # self.model = HubertModel.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large") # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large") # def forward(self, x): # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) # feats = self.model(input_values)["last_hidden_state"] # return feats # # class CVec(nn.Module): # def __init__(self): # super().__init__() # self.model = HubertModel.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base") # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base") # def forward(self, x): # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) # feats = self.model(input_values)["last_hidden_state"] # return feats # # class cnw2v2base(nn.Module): # def __init__(self): # super().__init__() # self.model = Wav2Vec2Model.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base") # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base") # def forward(self, x): # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device) # feats = self.model(input_values)["last_hidden_state"] # return feats def get_model(): model = CNHubert() model.eval() return model # def get_large_model(): # model = CNHubertLarge() # model.eval() # return model # # def get_model_cvec(): # model = CVec() # model.eval() # return model # # def get_model_cnw2v2base(): # model = cnw2v2base() # model.eval() # return model def get_content(hmodel, wav_16k_tensor): with torch.no_grad(): feats = hmodel(wav_16k_tensor) return feats.transpose(1, 2) if __name__ == "__main__": model = get_model() src_path = "/Users/Shared/原音频2.wav" wav_16k_tensor = utils.load_wav_to_torch_and_resample(src_path, 16000) model = model wav_16k_tensor = wav_16k_tensor feats = get_content(model, wav_16k_tensor) print(feats.shape)
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/feature_extractor/__init__.py
GPT_SoVITS/feature_extractor/__init__.py
from . import cnhubert, whisper_enc content_module_map = {"cnhubert": cnhubert, "whisper": whisper_enc}
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/__init__.py
GPT_SoVITS/AR/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/text_processing/symbols.py
GPT_SoVITS/AR/text_processing/symbols.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py # reference: https://github.com/lifeiteng/vall-e PAD = "_" PUNCTUATION = ';:,.!?¡¿—…"«»“” ' LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" IPA_LETTERS = ( "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" ) SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS) SPACE_ID = SYMBOLS.index(" ") SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)} ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)}
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/text_processing/phonemizer.py
GPT_SoVITS/AR/text_processing/phonemizer.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py # reference: https://github.com/lifeiteng/vall-e import itertools import re from typing import Dict from typing import List import regex from gruut import sentences from gruut.const import Sentence from gruut.const import Word from AR.text_processing.symbols import SYMBOL_TO_ID class GruutPhonemizer: def __init__(self, language: str): self._phonemizer = sentences self.lang = language self.symbol_to_id = SYMBOL_TO_ID self._special_cases_dict: Dict[str] = { r"\.\.\.": "... ", ";": "; ", ":": ": ", ",": ", ", r"\.": ". ", "!": "! ", r"\?": "? ", "—": "—", "…": "… ", "«": "«", "»": "»", } self._punctuation_regexp: str = rf"([{''.join(self._special_cases_dict.keys())}])" def _normalize_punctuation(self, text: str) -> str: text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text) text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text) text = regex.sub(r"\pZ+", r" ", text) return text.strip() def _convert_punctuation(self, word: Word) -> str: if not word.phonemes: return "" if word.phonemes[0] in ["‖", "|"]: return word.text.strip() phonemes = "".join(word.phonemes) # remove modifier characters ˈˌː with regex phonemes = re.sub(r"[ˈˌː͡]", "", phonemes) return phonemes.strip() def phonemize(self, text: str, espeak: bool = False) -> str: text_to_phonemize: str = self._normalize_punctuation(text) sents: List[Sentence] = [sent for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)] words: List[str] = [self._convert_punctuation(word) for word in itertools.chain(*sents)] return " ".join(words) def transform(self, phonemes): # convert phonemes to ids # dictionary is in symbols.py return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()] if __name__ == "__main__": phonemizer = GruutPhonemizer("en-us") # text -> IPA phonemes = phonemizer.phonemize("Hello, wor-ld ?") print("phonemes:", phonemes) print("len(phonemes):", len(phonemes)) phoneme_ids = phonemizer.transform(phonemes) print("phoneme_ids:", phoneme_ids) print("len(phoneme_ids):", len(phoneme_ids))
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/text_processing/__init__.py
GPT_SoVITS/AR/text_processing/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/t2s_model.py
GPT_SoVITS/AR/models/t2s_model.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py # reference: https://github.com/lifeiteng/vall-e import math from typing import List, Optional import torch from torch import nn from torch.nn import functional as F from torchmetrics.classification import MulticlassAccuracy from tqdm import tqdm from AR.models.utils import ( dpo_loss, get_batch_logps, make_pad_mask, make_pad_mask_left, make_reject_y, sample, topk_sampling, ) from AR.modules.embedding import SinePositionalEmbedding, TokenEmbedding from AR.modules.transformer import LayerNorm, TransformerEncoder, TransformerEncoderLayer default_config = { "embedding_dim": 512, "hidden_dim": 512, "num_head": 8, "num_layers": 12, "num_codebook": 8, "p_dropout": 0.0, "vocab_size": 1024 + 1, "phoneme_vocab_size": 512, "EOS": 1024, } # @torch.jit.script ## 使用的话首次推理会非常慢,而且推理速度不稳定 # Efficient implementation equivalent to the following: def scaled_dot_product_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, ) -> torch.Tensor: B, H, L, S = query.size(0), query.size(1), query.size(-2), key.size(-2) if scale is None: scale_factor = torch.tensor(1 / math.sqrt(query.size(-1))) else: scale_factor = scale attn_bias = torch.zeros(B, H, L, S, dtype=query.dtype, device=query.device) if attn_mask is not None: if attn_mask.dtype == torch.bool: attn_bias.masked_fill_(attn_mask, float("-inf")) else: attn_bias += attn_mask attn_weight = query @ key.transpose(-2, -1) * scale_factor attn_weight += attn_bias attn_weight = torch.softmax(attn_weight, dim=-1) if attn_mask is not None: if attn_mask.dtype == torch.bool: attn_weight.masked_fill_(attn_mask, 0) else: attn_mask[attn_mask != float("-inf")] = 0 attn_mask[attn_mask == float("-inf")] = 1 attn_weight.masked_fill_(attn_mask, 0) return attn_weight @ value @torch.jit.script class T2SMLP: def __init__(self, w1, b1, w2, b2): self.w1 = w1 self.b1 = b1 self.w2 = w2 self.b2 = b2 def forward(self, x): x = F.relu(F.linear(x, self.w1, self.b1)) x = F.linear(x, self.w2, self.b2) return x @torch.jit.script class T2SBlock: def __init__( self, num_heads, hidden_dim: int, mlp: T2SMLP, qkv_w, qkv_b, out_w, out_b, norm_w1, norm_b1, norm_eps1, norm_w2, norm_b2, norm_eps2, ): self.num_heads = num_heads self.mlp = mlp self.hidden_dim: int = hidden_dim self.qkv_w = qkv_w self.qkv_b = qkv_b self.out_w = out_w self.out_b = out_b self.norm_w1 = norm_w1 self.norm_b1 = norm_b1 self.norm_eps1 = norm_eps1 self.norm_w2 = norm_w2 self.norm_b2 = norm_b2 self.norm_eps2 = norm_eps2 self.false = torch.tensor(False, dtype=torch.bool) @torch.jit.ignore def to_mask( self, x: torch.Tensor, padding_mask: Optional[torch.Tensor], ): if padding_mask is None: return x if padding_mask.dtype == torch.bool: return x.masked_fill(padding_mask, 0) else: return x * padding_mask def process_prompt( self, x: torch.Tensor, attn_mask: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, torch_sdpa: bool = True, ): q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1) batch_size = q.shape[0] q_len = q.shape[1] kv_len = k.shape[1] q = self.to_mask(q, padding_mask) k_cache = self.to_mask(k, padding_mask) v_cache = self.to_mask(v, padding_mask) q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2) k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) if torch_sdpa: attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask) else: attn = scaled_dot_product_attention(q, k, v, attn_mask) attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1) attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b) x = x + attn x = F.layer_norm(x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1) x = x + self.mlp.forward(x) x = F.layer_norm( x, [self.hidden_dim], self.norm_w2, self.norm_b2, self.norm_eps2, ) return x, k_cache, v_cache def decode_next_token( self, x: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, attn_mask: torch.Tensor = None, torch_sdpa: bool = True, ): q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1) k_cache = torch.cat([k_cache, k], dim=1) v_cache = torch.cat([v_cache, v], dim=1) batch_size = q.shape[0] q_len = q.shape[1] kv_len = k_cache.shape[1] q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2) k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2) if torch_sdpa: attn = F.scaled_dot_product_attention(q, k, v, (~attn_mask) if attn_mask is not None else None) else: attn = scaled_dot_product_attention(q, k, v, attn_mask) attn = attn.transpose(1, 2).reshape(batch_size, q_len, -1) attn = F.linear(attn, self.out_w, self.out_b) x = x + attn x = F.layer_norm( x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1, ) x = x + self.mlp.forward(x) x = F.layer_norm( x, [self.hidden_dim], self.norm_w2, self.norm_b2, self.norm_eps2, ) return x, k_cache, v_cache @torch.jit.script class T2STransformer: def __init__(self, num_blocks: int, blocks: List[T2SBlock]): self.num_blocks: int = num_blocks self.blocks = blocks def process_prompt( self, x: torch.Tensor, attn_mask: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, torch_sdpa: bool = True, ): k_cache: List[torch.Tensor] = [] v_cache: List[torch.Tensor] = [] for i in range(self.num_blocks): x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask, torch_sdpa) k_cache.append(k_cache_) v_cache.append(v_cache_) return x, k_cache, v_cache def decode_next_token( self, x: torch.Tensor, k_cache: List[torch.Tensor], v_cache: List[torch.Tensor], attn_mask: torch.Tensor = None, torch_sdpa: bool = True, ): for i in range(self.num_blocks): x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token( x, k_cache[i], v_cache[i], attn_mask, torch_sdpa ) return x, k_cache, v_cache class Text2SemanticDecoder(nn.Module): def __init__(self, config, norm_first=False, top_k=3): super(Text2SemanticDecoder, self).__init__() self.model_dim = config["model"]["hidden_dim"] self.embedding_dim = config["model"]["embedding_dim"] self.num_head = config["model"]["head"] self.num_layers = config["model"]["n_layer"] self.norm_first = norm_first self.vocab_size = config["model"]["vocab_size"] self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"] self.p_dropout = config["model"]["dropout"] self.EOS = config["model"]["EOS"] self.norm_first = norm_first assert self.EOS == self.vocab_size - 1 # should be same as num of kmeans bin # assert self.EOS == 1024 self.bert_proj = nn.Linear(1024, self.embedding_dim) self.ar_text_embedding = TokenEmbedding( self.embedding_dim, self.phoneme_vocab_size, self.p_dropout, ) self.ar_text_position = SinePositionalEmbedding( self.embedding_dim, dropout=0.1, scale=False, alpha=True, ) self.ar_audio_embedding = TokenEmbedding( self.embedding_dim, self.vocab_size, self.p_dropout, ) self.ar_audio_position = SinePositionalEmbedding( self.embedding_dim, dropout=0.1, scale=False, alpha=True, ) self.h = TransformerEncoder( TransformerEncoderLayer( d_model=self.model_dim, nhead=self.num_head, dim_feedforward=self.model_dim * 4, dropout=0.1, batch_first=True, norm_first=norm_first, ), num_layers=self.num_layers, norm=LayerNorm(self.model_dim) if norm_first else None, ) self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False) self.loss_fct = nn.CrossEntropyLoss(reduction="sum") self.ar_accuracy_metric = MulticlassAccuracy( self.vocab_size, top_k=top_k, average="micro", multidim_average="global", ignore_index=self.EOS, ) blocks = [] for i in range(self.num_layers): layer = self.h.layers[i] t2smlp = T2SMLP( layer.linear1.weight, layer.linear1.bias, layer.linear2.weight, layer.linear2.bias, ) block = T2SBlock( self.num_head, self.model_dim, t2smlp, layer.self_attn.in_proj_weight, layer.self_attn.in_proj_bias, layer.self_attn.out_proj.weight, layer.self_attn.out_proj.bias, layer.norm1.weight, layer.norm1.bias, layer.norm1.eps, layer.norm2.weight, layer.norm2.bias, layer.norm2.eps, ) blocks.append(block) self.t2s_transformer = T2STransformer(self.num_layers, blocks) def make_input_data(self, x, x_lens, y, y_lens, bert_feature): x = self.ar_text_embedding(x) x = x + self.bert_proj(bert_feature.transpose(1, 2)) x = self.ar_text_position(x) x_mask = make_pad_mask_left(x_lens) y_mask = make_pad_mask(y_lens) y_mask_int = y_mask.type(torch.int64) codes = y.type(torch.int64) * (1 - y_mask_int) # Training # AR Decoder y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS) x_len = x_lens.max() y_len = y_lens.max() y_emb = self.ar_audio_embedding(y) y_pos = self.ar_audio_position(y_emb) xy_padding_mask = torch.concat([x_mask, y_mask], dim=1) ar_xy_padding_mask = xy_padding_mask x_attn_mask = F.pad( torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device), (0, y_len), value=True, ) # x_attn_mask[:, x_len]=False y_attn_mask = F.pad( torch.triu( torch.ones(y_len, y_len, dtype=torch.bool, device=x.device), diagonal=1, ), (x_len, 0), value=False, ) xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0) bsz, src_len = x.shape[0], x_len + y_len _xy_padding_mask = ( ar_xy_padding_mask.view(bsz, 1, 1, src_len) .expand(-1, self.num_head, -1, -1) .reshape(bsz * self.num_head, 1, src_len) ) xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask) new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype) new_attn_mask.masked_fill_(xy_attn_mask, float("-inf")) xy_attn_mask = new_attn_mask # x 和完整的 y 一次性输入模型 xy_pos = torch.concat([x, y_pos], dim=1) return xy_pos, xy_attn_mask, targets def forward(self, x, x_lens, y, y_lens, bert_feature): """ x: phoneme_ids y: semantic_ids """ reject_y, reject_y_lens = make_reject_y(y, y_lens) xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature) xy_dec, _ = self.h( (xy_pos, None), mask=xy_attn_mask, ) x_len = x_lens.max() logits = self.ar_predict_layer(xy_dec[:, x_len-1:]) ###### DPO ############# reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data( x, x_lens, reject_y, reject_y_lens, bert_feature ) reject_xy_dec, _ = self.h( (reject_xy_pos, None), mask=reject_xy_attn_mask, ) x_len = x_lens.max() reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len-1:]) # loss # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum") acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item() A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets) loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True) loss = loss_1 + loss_2 return loss, acc def forward_old(self, x, x_lens, y, y_lens, bert_feature): """ x: phoneme_ids y: semantic_ids """ x = self.ar_text_embedding(x) x = x + self.bert_proj(bert_feature.transpose(1, 2)) x = self.ar_text_position(x) x_mask = make_pad_mask_left(x_lens) y_mask = make_pad_mask(y_lens) y_mask_int = y_mask.type(torch.int64) codes = y.type(torch.int64) * (1 - y_mask_int) # Training # AR Decoder y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS) x_len = x_lens.max() y_len = y_lens.max() y_emb = self.ar_audio_embedding(y) y_pos = self.ar_audio_position(y_emb) xy_padding_mask = torch.concat([x_mask, y_mask], dim=1) ar_xy_padding_mask = xy_padding_mask x_attn_mask = F.pad( torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device), (0, y_len), value=True, ) y_attn_mask = F.pad( torch.triu( torch.ones(y_len, y_len, dtype=torch.bool, device=x.device), diagonal=1, ), (x_len, 0), value=False, ) xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0) bsz, src_len = x.shape[0], x_len + y_len _xy_padding_mask = ( ar_xy_padding_mask.view(bsz, 1, 1, src_len) .expand(-1, self.num_head, -1, -1) .reshape(bsz * self.num_head, 1, src_len) ) xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask) new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype) new_attn_mask.masked_fill_(xy_attn_mask, float("-inf")) xy_attn_mask = new_attn_mask # x 和完整的 y 一次性输入模型 xy_pos = torch.concat([x, y_pos], dim=1) xy_dec, _ = self.h( (xy_pos, None), mask=xy_attn_mask, ) logits = self.ar_predict_layer(xy_dec[:, x_len-1:]).permute(0, 2, 1) # loss # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum loss = F.cross_entropy(logits, targets, reduction="sum") acc = self.ar_accuracy_metric(logits.detach(), targets).item() return loss, acc # 需要看下这个函数和 forward 的区别以及没有 semantic 的时候 prompts 输入什么 def infer( self, x, x_lens, prompts, bert_feature, top_k: int = -100, early_stop_num: int = -1, temperature: float = 1.0, ): x = self.ar_text_embedding(x) x = x + self.bert_proj(bert_feature.transpose(1, 2)) x = self.ar_text_position(x) # AR Decoder y = prompts prefix_len = y.shape[1] x_len = x.shape[1] x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool) stop = False for _ in tqdm(range(1500)): y_emb = self.ar_audio_embedding(y) y_pos = self.ar_audio_position(y_emb) # x 和逐渐增长的 y 一起输入给模型 xy_pos = torch.concat([x, y_pos], dim=1) y_len = y.shape[1] x_attn_mask_pad = F.pad( x_attn_mask, (0, y_len), value=True, ) y_attn_mask = F.pad( torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), (x_len, 0), value=False, ) xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(y.device) xy_dec, _ = self.h( (xy_pos, None), mask=xy_attn_mask, ) logits = self.ar_predict_layer(xy_dec[:, -1]) samples = topk_sampling(logits, top_k=top_k, top_p=1.0, temperature=temperature) if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: print("use early stop num:", early_stop_num) stop = True if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS) stop = True if stop: if prompts.shape[1] == y.shape[1]: y = torch.concat([y, torch.zeros_like(samples)], dim=1) print("bad zero prediction") print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]") break # 本次生成的 semantic_ids 和之前的 y 构成新的 y # print(samples.shape)#[1,1]#第一个1是bs # import os # os._exit(2333) y = torch.concat([y, samples], dim=1) return y def pad_y_eos(self, y, y_mask_int, eos_id): targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(y_mask_int, (0, 1), value=1) # 错位 return targets[:, :-1], targets def infer_panel_batch_infer( self, x: List[torch.LongTensor], #####全部文本token x_lens: torch.LongTensor, prompts: torch.LongTensor, ####参考音频token bert_feature: List[torch.LongTensor], top_k: int = -100, top_p: int = 100, early_stop_num: int = -1, temperature: float = 1.0, repetition_penalty: float = 1.35, **kwargs, ): if prompts is None: print("Warning: Prompt free is not supported batch_infer! switch to naive_infer") return self.infer_panel_naive_batched( x, x_lens, prompts, bert_feature, top_k=top_k, top_p=top_p, early_stop_num=early_stop_num, temperature=temperature, **kwargs, ) max_len = kwargs.get("max_len", x_lens.max()) x_list = [] for x_item, bert_item in zip(x, bert_feature): # max_len = max(max_len, x_item.shape[0], bert_item.shape[1]) x_item = self.ar_text_embedding(x_item.unsqueeze(0)) x_item = x_item + self.bert_proj(bert_item.transpose(0, 1).unsqueeze(0)) x_item = self.ar_text_position(x_item).squeeze(0) # x_item = F.pad(x_item,(0,0,0,max_len-x_item.shape[0]),value=0) if x_item.shape[0]<max_len else x_item ### padding right x_item = ( F.pad(x_item, (0, 0, max_len - x_item.shape[0], 0), value=0) if x_item.shape[0] < max_len else x_item ) ### padding left x_list.append(x_item) x: torch.Tensor = torch.stack(x_list, dim=0) # AR Decoder y = prompts x_len = x.shape[1] stop = False k_cache = None v_cache = None ################### first step ########################## assert y is not None, "Error: Prompt free is not supported batch_infer!" ref_free = False y_emb = self.ar_audio_embedding(y) y_len = y_emb.shape[1] prefix_len = y.shape[1] y_lens = torch.LongTensor([y_emb.shape[1]] * y_emb.shape[0]).to(x.device) y_pos = self.ar_audio_position(y_emb) xy_pos = torch.concat([x, y_pos], dim=1) ##### create mask ##### bsz = x.shape[0] src_len = x_len + y_len y_paddind_mask = make_pad_mask_left(y_lens, y_len) x_paddind_mask = make_pad_mask_left(x_lens, max_len) # (bsz, x_len + y_len) padding_mask = torch.concat([x_paddind_mask, y_paddind_mask], dim=1) x_mask = F.pad( torch.zeros(x_len, x_len, dtype=torch.bool, device=x.device), (0, y_len), value=True, ) y_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) torch.triu(torch.ones(y_len, y_len, dtype=torch.bool, device=x.device), diagonal=1), (x_len, 0), value=False, ) causal_mask = torch.concat([x_mask, y_mask], dim=0).view(1, src_len, src_len).repeat(bsz, 1, 1).to(x.device) # padding_mask = padding_mask.unsqueeze(1) * padding_mask.unsqueeze(2) ### [b, x+y, x+y] ### 上面是错误的,会导致padding的token被"看见" # 正确的padding_mask应该是: # | pad_len | x_len | y_len | # [[PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], 前3行按理说也应该被mask掉,但是为了防止计算attention时不出现nan,还是保留了,不影响结果 # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6]] padding_mask = padding_mask.view(bsz, 1, src_len).repeat(1, src_len, 1) attn_mask: torch.Tensor = causal_mask.logical_or(padding_mask) attn_mask = attn_mask.unsqueeze(1).expand(-1, self.num_head, -1, -1).bool() # 正确的attn_mask应该是这样的: # | pad_len | x_len | y_len | # [[PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], 前3行按理说也应该被mask掉,但是为了防止计算attention时不出现nan,还是保留了,不影响结果 # [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, EOS, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, 4, EOS, EOS], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, EOS], # [PAD, PAD, PAD, 1, 2, 3, 4, 5, 6]] ###### decode ##### y_list = [None] * y.shape[0] batch_idx_map = list(range(y.shape[0])) idx_list = [None] * y.shape[0] for idx in tqdm(range(1500)): if idx == 0: xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, attn_mask, None) else: xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache, attn_mask) logits = self.ar_predict_layer(xy_dec[:, -1]) if idx == 0: attn_mask = F.pad(attn_mask[:, :, -1].unsqueeze(-2), (0, 1), value=False) else: attn_mask = F.pad(attn_mask, (0, 1), value=False) if idx < 11: ###至少预测出10个token不然不给停止(0.4s) logits = logits[:, :-1] samples = sample( logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature )[0] y = torch.concat([y, samples], dim=1) ####### 移除batch中已经生成完毕的序列,进一步优化计算量 tokens = torch.argmax(logits, dim=-1) reserved_idx_of_batch_for_y = None if (self.EOS in samples[:, 0]) or (self.EOS in tokens): ###如果生成到EOS,则停止 l1 = samples[:, 0] == self.EOS l2 = tokens == self.EOS l = l1.logical_or(l2) removed_idx_of_batch_for_y = torch.where(l == True)[0].tolist() reserved_idx_of_batch_for_y = torch.where(l == False)[0] # batch_indexs = torch.tensor(batch_idx_map, device=y.device)[removed_idx_of_batch_for_y] for i in removed_idx_of_batch_for_y: batch_index = batch_idx_map[i] idx_list[batch_index] = idx y_list[batch_index] = y[i, :-1] batch_idx_map = [batch_idx_map[i] for i in reserved_idx_of_batch_for_y.tolist()] # 只保留batch中未生成完毕的序列 if reserved_idx_of_batch_for_y is not None: # index = torch.LongTensor(batch_idx_map).to(y.device) y = torch.index_select(y, dim=0, index=reserved_idx_of_batch_for_y) attn_mask = torch.index_select(attn_mask, dim=0, index=reserved_idx_of_batch_for_y) if k_cache is not None: for i in range(len(k_cache)): k_cache[i] = torch.index_select(k_cache[i], dim=0, index=reserved_idx_of_batch_for_y) v_cache[i] = torch.index_select(v_cache[i], dim=0, index=reserved_idx_of_batch_for_y) if (early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num) or idx == 1499: print("use early stop num:", early_stop_num) stop = True for i, batch_index in enumerate(batch_idx_map): batch_index = batch_idx_map[i] idx_list[batch_index] = idx y_list[batch_index] = y[i, :-1] if None not in idx_list: stop = True if stop: if y.shape[1] == 0: y = torch.concat([y, torch.zeros_like(samples)], dim=1) print("bad zero prediction") print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]") break ####################### update next step ################################### y_emb = self.ar_audio_embedding(y[:, -1:]) xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[ :, y_len + idx ].to(dtype=y_emb.dtype, device=y_emb.device) if None in idx_list: for i in range(x.shape[0]): if idx_list[i] is None: idx_list[i] = 1500 - 1 ###如果没有生成到EOS,就用最大长度代替 if ref_free: return y_list, [0] * x.shape[0] # print(idx_list) return y_list, idx_list def infer_panel_naive_batched( self, x: List[torch.LongTensor], #####全部文本token x_lens: torch.LongTensor, prompts: torch.LongTensor, ####参考音频token bert_feature: List[torch.LongTensor], top_k: int = -100, top_p: int = 100, early_stop_num: int = -1, temperature: float = 1.0, repetition_penalty: float = 1.35, **kwargs, ): y_list = [] idx_list = [] for i in range(len(x)): y, idx = next(self.infer_panel_naive( x[i].unsqueeze(0), x_lens[i], prompts[i].unsqueeze(0) if prompts is not None else None, bert_feature[i].unsqueeze(0), top_k, top_p, early_stop_num, temperature, repetition_penalty, **kwargs, )) y_list.append(y[0]) idx_list.append(idx) return y_list, idx_list def infer_panel_naive( self, x: torch.LongTensor, #####全部文本token x_lens: torch.LongTensor, prompts: torch.LongTensor, ####参考音频token bert_feature: torch.LongTensor, top_k: int = -100, top_p: int = 100, early_stop_num: int = -1, temperature: float = 1.0, repetition_penalty: float = 1.35, streaming_mode: bool = False, chunk_length: int = 24, **kwargs, ): mute_emb_sim_matrix = kwargs.get("mute_emb_sim_matrix", None) chunk_split_thershold = kwargs.get("chunk_split_thershold", 0.3) check_token_num = 2 x = self.ar_text_embedding(x) x = x + self.bert_proj(bert_feature.transpose(1, 2)) x = self.ar_text_position(x) # AR Decoder y = prompts x_len = x.shape[1] x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool) stop = False # print(1111111,self.num_layers) k_cache = None v_cache = None ################### first step ########################## if y is not None: y_emb = self.ar_audio_embedding(y) y_len = y_emb.shape[1] prefix_len = y.shape[1] y_pos = self.ar_audio_position(y_emb) xy_pos = torch.concat([x, y_pos], dim=1) ref_free = False else: y_emb = None y_len = 0 prefix_len = 0 y_pos = None xy_pos = x y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device) ref_free = True bsz = x.shape[0] src_len = x_len + y_len x_attn_mask_pad = F.pad( x_attn_mask, (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y) value=True, ) y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), (x_len, 0), value=False, ) xy_attn_mask = ( torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) .unsqueeze(0) .expand(bsz * self.num_head, -1, -1) .view(bsz, self.num_head, src_len, src_len) .to(device=x.device, dtype=torch.bool) ) token_counter = 0 curr_ptr = prefix_len for idx in tqdm(range(1500)): token_counter+=1 if xy_attn_mask is not None: xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None) else: xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache) logits = self.ar_predict_layer(xy_dec[:, -1]) if idx == 0: xy_attn_mask = None if idx < 11: ###至少预测出10个token不然不给停止(0.4s) logits = logits[:, :-1] samples = sample( logits, y, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, temperature=temperature )[0] y = torch.concat([y, samples], dim=1) if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: print("use early stop num:", early_stop_num) stop = True if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: stop = True y=y[:, :-1] token_counter -= 1
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
true
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/t2s_lightning_module.py
GPT_SoVITS/AR/models/t2s_lightning_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py # reference: https://github.com/lifeiteng/vall-e import os import sys now_dir = os.getcwd() sys.path.append(now_dir) from typing import Dict import torch from pytorch_lightning import LightningModule from AR.models.t2s_model import Text2SemanticDecoder from AR.modules.lr_schedulers import WarmupCosineLRSchedule from AR.modules.optim import ScaledAdam class Text2SemanticLightningModule(LightningModule): def __init__(self, config, output_dir, is_train=True): super().__init__() self.config = config self.top_k = 3 self.model = Text2SemanticDecoder(config=config, top_k=self.top_k) pretrained_s1 = config.get("pretrained_s1") if pretrained_s1 and is_train: # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"])) print( self.load_state_dict( torch.load( pretrained_s1, map_location="cpu", weights_only=False, )["weight"], ) ) if is_train: self.automatic_optimization = False self.save_hyperparameters() self.eval_dir = output_dir / "eval" self.eval_dir.mkdir(parents=True, exist_ok=True) def training_step(self, batch: Dict, batch_idx: int): opt = self.optimizers() scheduler = self.lr_schedulers() forward = self.model.forward if self.config["train"].get("if_dpo", False) == True else self.model.forward_old loss, acc = forward( batch["phoneme_ids"], batch["phoneme_ids_len"], batch["semantic_ids"], batch["semantic_ids_len"], batch["bert_feature"], ) self.manual_backward(loss) if batch_idx > 0 and batch_idx % 4 == 0: opt.step() opt.zero_grad() scheduler.step() self.log( "total_loss", loss, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, ) self.log( "lr", scheduler.get_last_lr()[0], on_epoch=True, prog_bar=True, sync_dist=True, ) self.log( f"top_{self.top_k}_acc", acc, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, ) def validation_step(self, batch: Dict, batch_idx: int): return # # get loss # loss, acc = self.model.forward( # batch['phoneme_ids'], batch['phoneme_ids_len'], # batch['semantic_ids'], batch['semantic_ids_len'], # batch['bert_feature'] # ) # # self.log( # "val_total_loss", # loss, # on_step=True, # on_epoch=True, # prog_bar=True, # sync_dist=True) # self.log( # f"val_top_{self.top_k}_acc", # acc, # on_step=True, # on_epoch=True, # prog_bar=True, # sync_dist=True) # # # get infer output # semantic_len = batch['semantic_ids'].size(1) # prompt_len = min(int(semantic_len * 0.5), 150) # prompt = batch['semantic_ids'][:, :prompt_len] # pred_semantic = self.model.infer(batch['phoneme_ids'], # batch['phoneme_ids_len'], prompt, # batch['bert_feature'] # ) # save_name = f'semantic_toks_{batch_idx}.pt' # save_path = os.path.join(self.eval_dir, save_name) # torch.save(pred_semantic.detach().cpu(), save_path) def configure_optimizers(self): model_parameters = self.model.parameters() parameters_names = [] parameters_names.append([name_param_pair[0] for name_param_pair in self.model.named_parameters()]) lm_opt = ScaledAdam( model_parameters, lr=0.01, betas=(0.9, 0.95), clipping_scale=2.0, parameters_names=parameters_names, show_dominant_parameters=False, clipping_update_period=1000, ) return { "optimizer": lm_opt, "lr_scheduler": { "scheduler": WarmupCosineLRSchedule( lm_opt, init_lr=self.config["optimizer"]["lr_init"], peak_lr=self.config["optimizer"]["lr"], end_lr=self.config["optimizer"]["lr_end"], warmup_steps=self.config["optimizer"]["warmup_steps"], total_steps=self.config["optimizer"]["decay_steps"], ) }, }
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/utils.py
GPT_SoVITS/AR/models/utils.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py # reference: https://github.com/lifeiteng/vall-e from typing import Tuple import torch import torch.nn.functional as F def sequence_mask(length, max_length=None): if max_length is None: max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) return x.unsqueeze(0) < length.unsqueeze(1) def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor: """ Args: lengths: A 1-D tensor containing sentence lengths. max_len: The length of masks. Returns: Return a 2-D bool tensor, where masked positions are filled with `True` and non-masked positions are filled with `False`. #>>> lengths = torch.tensor([1, 3, 2, 5]) #>>> make_pad_mask(lengths) tensor([[False, True, True, True, True], [False, False, False, True, True], [False, False, True, True, True], [False, False, False, False, False]]) """ assert lengths.ndim == 1, lengths.ndim max_len = max(max_len, lengths.max()) n = lengths.size(0) seq_range = torch.arange(0, max_len, device=lengths.device) expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len) return expaned_lengths >= lengths.unsqueeze(-1) def make_pad_mask_left(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor: """ Args: lengths: A 1-D tensor containing sentence lengths. max_len: The length of masks. Returns: Return a 2-D bool tensor, where masked positions are filled with `True` and non-masked positions are filled with `False`. #>>> lengths = torch.tensor([1, 3, 2, 5]) #>>> make_pad_mask(lengths) tensor( [ [True, True, False], [True, False, False], [True, True, False], ... ] ) """ assert lengths.ndim == 1, lengths.ndim max_len = max(max_len, lengths.max()) n = lengths.size(0) seq_range = torch.arange(0, max_len, device=lengths.device) expaned_lengths = seq_range.unsqueeze(0).repeat(n, 1) expaned_lengths -= (max_len - lengths).unsqueeze(-1) return expaned_lengths < 0 # https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py def top_k_top_p_filtering( logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1, ): """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0): # temperature: (`optional`) float # The value used to module the next token probabilities. Must be strictly positive. Default to 1.0. # top_k: (`optional`) int # The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50. # top_p: (`optional`) float # The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1. # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: logits = logits / temperature # Top-p/top-k filtering logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p) # Sample token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1) return token from typing import Optional def multinomial_sample_one_no_sync( probs_sort, ): # Does multinomial sampling without a cuda synchronization q = torch.empty_like(probs_sort).exponential_(1) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) def logits_to_probs( logits, previous_tokens: Optional[torch.Tensor] = None, temperature: float = 1.0, top_k: Optional[int] = None, top_p: Optional[int] = None, repetition_penalty: float = 1.0, ): # if previous_tokens is not None: # previous_tokens = previous_tokens.squeeze() # print(logits.shape,previous_tokens.shape) # pdb.set_trace() if previous_tokens is not None and repetition_penalty != 1.0: previous_tokens = previous_tokens.long() score = torch.gather(logits, dim=1, index=previous_tokens) score = torch.where( score < 0, score * repetition_penalty, score / repetition_penalty, ) logits.scatter_(dim=1, index=previous_tokens, src=score) if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > top_p sorted_indices_to_remove[:, 0] = False # keep at least one option indices_to_remove = sorted_indices_to_remove.scatter( dim=1, index=sorted_indices, src=sorted_indices_to_remove, ) logits = logits.masked_fill(indices_to_remove, -float("Inf")) logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, min(top_k, logits.size(-1))) pivot = v[:, -1].unsqueeze(-1) logits = torch.where(logits < pivot, -float("Inf"), logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs def sample( logits, previous_tokens: Optional[torch.Tensor] = None, **sampling_kwargs, ) -> Tuple[torch.Tensor, torch.Tensor]: probs = logits_to_probs(logits=logits, previous_tokens=previous_tokens, **sampling_kwargs) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs def dpo_loss( policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, reference_chosen_logps: torch.FloatTensor, reference_rejected_logps: torch.FloatTensor, beta: float, reference_free: bool = False, ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: pi_logratios = policy_chosen_logps - policy_rejected_logps ref_logratios = reference_chosen_logps - reference_rejected_logps if reference_free: ref_logratios = 0 logits = pi_logratios - ref_logratios losses = -F.logsigmoid(beta * logits) chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach() rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach() return losses.mean(), chosen_rewards, rejected_rewards def get_batch_logps( logits_target: torch.FloatTensor, logits_reject: torch.FloatTensor, labels_target: torch.LongTensor, labels_reject: torch.LongTensor, average_log_prob: bool = False, ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: # dummy token; we'll ignore the losses on these tokens later per_token_logps_target = torch.gather( logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2) ).squeeze(2) per_token_logps_reject = torch.gather( logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2) ).squeeze(2) return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1) def make_reject_y(y_o, y_lens): def repeat_P(y): range_idx, _ = torch.randint(0, len(y), size=(2,)).sort() pre = y[: range_idx[0]] shf = y[range_idx[1] :] range_text = y[range_idx[0] : range_idx[1]] new_y = torch.cat([pre, range_text, range_text, shf]) return new_y def lost_P(y): range_idx, _ = torch.randint(0, len(y), size=(2,)).sort() pre = y[: range_idx[0]] shf = y[range_idx[1] :] range_text = y[range_idx[0] : range_idx[1]] new_y = torch.cat([pre, shf]) return new_y bs = len(y_lens) reject_y = [] reject_y_lens = [] for b in range(bs): process_item_idx = torch.randint(0, 1, size=(1,))[0] if process_item_idx == 0: new_y = repeat_P(y_o[b]) reject_y.append(new_y) reject_y_lens.append(len(new_y)) elif process_item_idx == 1: new_y = lost_P(y_o[b]) reject_y.append(new_y) reject_y_lens.append(len(new_y)) max_length = max(reject_y_lens) for b in range(bs): pad_length = max_length - reject_y_lens[b] reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0) reject_y = torch.stack(reject_y, dim=0) reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device) return reject_y, reject_y_lens
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/t2s_model_onnx.py
GPT_SoVITS/AR/models/t2s_model_onnx.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py # reference: https://github.com/lifeiteng/vall-e import torch from torch import nn from torch.nn import functional as F from torchmetrics.classification import MulticlassAccuracy from AR.modules.embedding_onnx import SinePositionalEmbedding, TokenEmbedding from AR.modules.transformer_onnx import LayerNorm, TransformerEncoder, TransformerEncoderLayer default_config = { "embedding_dim": 512, "hidden_dim": 512, "num_head": 8, "num_layers": 12, "num_codebook": 8, "p_dropout": 0.0, "vocab_size": 1024 + 1, "phoneme_vocab_size": 512, "EOS": 1024, } inf_tensor_value = torch.FloatTensor([-float("Inf")]).float() def logits_to_probs( logits, previous_tokens=None, temperature: float = 1.0, top_k=None, top_p=None, repetition_penalty: float = 1.0, ): previous_tokens = previous_tokens.squeeze() if previous_tokens is not None and repetition_penalty != 1.0: previous_tokens = previous_tokens.long() score = torch.gather(logits, dim=0, index=previous_tokens) score = torch.where( score < 0, score * repetition_penalty, score / repetition_penalty, ) logits.scatter_(dim=0, index=previous_tokens, src=score) if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum( torch.nn.functional.softmax( sorted_logits, dim=-1, ), dim=-1, ) sorted_indices_to_remove = cum_probs > top_p sorted_indices_to_remove[0] = False # keep at least one option indices_to_remove = sorted_indices_to_remove.scatter( dim=0, index=sorted_indices, src=sorted_indices_to_remove, ) logits = logits.masked_fill(indices_to_remove, -float("Inf")) logits = logits / max(temperature, 1e-5) if top_k is not None: v, _ = torch.topk(logits, top_k) pivot = v.select(-1, -1).unsqueeze(-1) logits = torch.where(logits < pivot, inf_tensor_value, logits) probs = torch.nn.functional.softmax(logits, dim=-1) return probs def multinomial_sample_one_no_sync( probs_sort, ): # Does multinomial sampling without a cuda synchronization q = torch.randn_like(probs_sort) return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) def sample( logits, previous_tokens, **sampling_kwargs, ): probs = logits_to_probs( logits=logits, previous_tokens=previous_tokens, **sampling_kwargs, ) idx_next = multinomial_sample_one_no_sync(probs) return idx_next, probs class OnnxEncoder(nn.Module): def __init__(self, ar_text_embedding, bert_proj, ar_text_position): super().__init__() self.ar_text_embedding = ar_text_embedding self.bert_proj = bert_proj self.ar_text_position = ar_text_position def forward(self, x, bert_feature): x = self.ar_text_embedding(x) x = x + self.bert_proj(bert_feature.transpose(1, 2)) return self.ar_text_position(x) class T2SFirstStageDecoder(nn.Module): def __init__( self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, top_k, early_stop_num, num_layers, ): super().__init__() self.ar_audio_embedding = ar_audio_embedding self.ar_audio_position = ar_audio_position self.h = h self.ar_predict_layer = ar_predict_layer self.loss_fct = loss_fct self.ar_accuracy_metric = ar_accuracy_metric self.top_k = top_k self.early_stop_num = early_stop_num self.num_layers = num_layers def forward(self, x, prompt): y = prompt x_example = x[:, :, 0] * 0.0 # N, 1, 512 cache = { "all_stage": self.num_layers, "k": None, "v": None, "y_emb": None, "first_infer": 1, "stage": 0, } y_emb = self.ar_audio_embedding(y) cache["y_emb"] = y_emb y_pos = self.ar_audio_position(y_emb) xy_pos = torch.concat([x, y_pos], dim=1) y_example = y_pos[:, :, 0] * 0.0 x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example).bool() y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64) y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum( torch.ones_like( y_example.transpose(0, 1), dtype=torch.int64, ), dim=0, ) y_attn_mask = y_attn_mask > 0 x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool() y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool() x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1) y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1) xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) cache["k"] = ( torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512))) .unsqueeze(1) .repeat(self.num_layers, 1, 1, 1) ) cache["v"] = ( torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512))) .unsqueeze(1) .repeat(self.num_layers, 1, 1, 1) ) xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) logits = self.ar_predict_layer(xy_dec[:, -1]) samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) y = torch.concat([y, samples], dim=1) return y, cache["k"], cache["v"], cache["y_emb"], x_example class T2SStageDecoder(nn.Module): def __init__( self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, top_k, early_stop_num, num_layers, ): super().__init__() self.ar_audio_embedding = ar_audio_embedding self.ar_audio_position = ar_audio_position self.h = h self.ar_predict_layer = ar_predict_layer self.loss_fct = loss_fct self.ar_accuracy_metric = ar_accuracy_metric self.top_k = top_k self.early_stop_num = early_stop_num self.num_layers = num_layers def forward(self, y, k, v, y_emb, x_example): cache = { "all_stage": self.num_layers, "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)), "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)), "y_emb": y_emb, "first_infer": 0, "stage": 0, } y_emb = torch.cat( [ cache["y_emb"], self.ar_audio_embedding(y[:, -1:]), ], 1, ) cache["y_emb"] = y_emb y_pos = self.ar_audio_position(y_emb) xy_pos = y_pos[:, -1:] y_example = y_pos[:, :, 0] * 0.0 xy_attn_mask = torch.cat([x_example, y_example], dim=1) xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool) xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) logits = self.ar_predict_layer(xy_dec[:, -1]) samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) y = torch.concat([y, samples], dim=1) return y, cache["k"], cache["v"], cache["y_emb"], logits, samples class Text2SemanticDecoder(nn.Module): def __init__(self, config, norm_first=False, top_k=3): super(Text2SemanticDecoder, self).__init__() self.model_dim = config["model"]["hidden_dim"] self.embedding_dim = config["model"]["embedding_dim"] self.num_head = config["model"]["head"] self.num_layers = config["model"]["n_layer"] self.norm_first = norm_first self.vocab_size = config["model"]["vocab_size"] self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"] self.p_dropout = float(config["model"]["dropout"]) self.EOS = config["model"]["EOS"] self.norm_first = norm_first assert self.EOS == self.vocab_size - 1 self.bert_proj = nn.Linear(1024, self.embedding_dim) self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout) self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout) self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) self.h = TransformerEncoder( TransformerEncoderLayer( d_model=self.model_dim, nhead=self.num_head, dim_feedforward=self.model_dim * 4, dropout=0.1, batch_first=True, norm_first=norm_first, ), num_layers=self.num_layers, norm=LayerNorm(self.model_dim) if norm_first else None, ) self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False) self.loss_fct = nn.CrossEntropyLoss(reduction="sum") self.ar_accuracy_metric = MulticlassAccuracy( self.vocab_size, top_k=top_k, average="micro", multidim_average="global", ignore_index=self.EOS, ) self.top_k = torch.LongTensor([1]) self.early_stop_num = torch.LongTensor([-1]) def init_onnx(self): self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position) self.first_stage_decoder = T2SFirstStageDecoder( self.ar_audio_embedding, self.ar_audio_position, self.h, self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, self.num_layers, ) self.stage_decoder = T2SStageDecoder( self.ar_audio_embedding, self.ar_audio_position, self.h, self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, self.num_layers, ) def forward(self, x, prompts, bert_feature): early_stop_num = self.early_stop_num prefix_len = prompts.shape[1] x = self.onnx_encoder(x, bert_feature) y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts) stop = False for idx in range(1, 1500): enco = self.stage_decoder(y, k, v, y_emb, stage, x_example) y, k, v, y_emb, stage, logits, samples = enco if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: stop = True if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: stop = True if stop: break y[0, -1] = 0 return y, idx def infer(self, x, prompts, bert_feature): top_k = self.top_k early_stop_num = self.early_stop_num x = self.onnx_encoder(x, bert_feature) y = prompts prefix_len = y.shape[1] x_len = x.shape[1] x_example = x[:, :, 0] * 0.0 x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example) x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool) stop = False cache = { "all_stage": self.num_layers, "k": [None] * self.num_layers, "v": [None] * self.num_layers, "y_emb": None, "first_infer": 1, "stage": 0, } for idx in range(1500): if cache["first_infer"] == 1: y_emb = self.ar_audio_embedding(y) else: y_emb = torch.cat([cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1) cache["y_emb"] = y_emb y_pos = self.ar_audio_position(y_emb) if cache["first_infer"] == 1: xy_pos = torch.concat([x, y_pos], dim=1) else: xy_pos = y_pos[:, -1:] y_len = y_pos.shape[1] if cache["first_infer"] == 1: x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True) y_attn_mask = F.pad( torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), (x_len, 0), value=False, ) xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) else: xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool) xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) logits = self.ar_predict_layer(xy_dec[:, -1]) samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: stop = True if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: stop = True if stop: if prompts.shape[1] == y.shape[1]: y = torch.concat([y, torch.zeros_like(samples)], dim=1) break y = torch.concat([y, samples], dim=1) cache["first_infer"] = 0 return y, idx
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/__init__.py
GPT_SoVITS/AR/models/__init__.py
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py
GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py # reference: https://github.com/lifeiteng/vall-e import os import sys now_dir = os.getcwd() sys.path.append(now_dir) from typing import Dict import torch from pytorch_lightning import LightningModule from AR.models.t2s_model_onnx import Text2SemanticDecoder from AR.modules.lr_schedulers import WarmupCosineLRSchedule from AR.modules.optim import ScaledAdam class Text2SemanticLightningModule(LightningModule): def __init__(self, config, output_dir, is_train=True): super().__init__() self.config = config self.top_k = 3 self.model = Text2SemanticDecoder(config=config, top_k=self.top_k) pretrained_s1 = config.get("pretrained_s1") if pretrained_s1 and is_train: # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"])) print( self.load_state_dict( torch.load( pretrained_s1, map_location="cpu", )["weight"], ), ) if is_train: self.automatic_optimization = False self.save_hyperparameters() self.eval_dir = output_dir / "eval" self.eval_dir.mkdir(parents=True, exist_ok=True) def training_step(self, batch: Dict, batch_idx: int): opt = self.optimizers() scheduler = self.lr_schedulers() loss, acc = self.model.forward( batch["phoneme_ids"], batch["phoneme_ids_len"], batch["semantic_ids"], batch["semantic_ids_len"], batch["bert_feature"], ) self.manual_backward(loss) if batch_idx > 0 and batch_idx % 4 == 0: opt.step() opt.zero_grad() scheduler.step() self.log( "total_loss", loss, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, ) self.log( "lr", scheduler.get_last_lr()[0], on_epoch=True, prog_bar=True, sync_dist=True, ) self.log( f"top_{self.top_k}_acc", acc, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, ) def validation_step(self, batch: Dict, batch_idx: int): return def configure_optimizers(self): model_parameters = self.model.parameters() parameters_names = [] parameters_names.append([name_param_pair[0] for name_param_pair in self.model.named_parameters()]) lm_opt = ScaledAdam( model_parameters, lr=0.01, betas=(0.9, 0.95), clipping_scale=2.0, parameters_names=parameters_names, show_dominant_parameters=False, clipping_update_period=1000, ) return { "optimizer": lm_opt, "lr_scheduler": { "scheduler": WarmupCosineLRSchedule( lm_opt, init_lr=self.config["optimizer"]["lr_init"], peak_lr=self.config["optimizer"]["lr"], end_lr=self.config["optimizer"]["lr_end"], warmup_steps=self.config["optimizer"]["warmup_steps"], total_steps=self.config["optimizer"]["decay_steps"], ) }, }
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/utils/__init__.py
GPT_SoVITS/AR/utils/__init__.py
import re def str2bool(str): return True if str.lower() == "true" else False def get_newest_ckpt(string_list): # 定义一个正则表达式模式,用于匹配字符串中的数字 pattern = r"epoch=(\d+)-step=(\d+)\.ckpt" # 使用正则表达式提取每个字符串中的数字信息,并创建一个包含元组的列表 extracted_info = [] for string in string_list: match = re.match(pattern, string) if match: epoch = int(match.group(1)) step = int(match.group(2)) extracted_info.append((epoch, step, string)) # 按照 epoch 后面的数字和 step 后面的数字进行排序 sorted_info = sorted(extracted_info, key=lambda x: (x[0], x[1]), reverse=True) # 获取最新的 ckpt 文件名 newest_ckpt = sorted_info[0][2] return newest_ckpt # 文本存在且不为空时 return True def check_txt_file(file_path): try: with open(file_path, "r") as file: text = file.readline().strip() assert text.strip() != "" return text except Exception: return False return False
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/utils/initialize.py
GPT_SoVITS/AR/utils/initialize.py
#!/usr/bin/env python3 """Initialize modules for espnet2 neural networks.""" import torch from typeguard import check_argument_types def initialize(model: torch.nn.Module, init: str): """Initialize weights of a neural network module. Parameters are initialized using the given method or distribution. Custom initialization routines can be implemented into submodules as function `espnet_initialization_fn` within the custom module. Args: model: Target. init: Method of initialization. """ assert check_argument_types() print("init with", init) # weight init for p in model.parameters(): if p.dim() > 1: if init == "xavier_uniform": torch.nn.init.xavier_uniform_(p.data) elif init == "xavier_normal": torch.nn.init.xavier_normal_(p.data) elif init == "kaiming_uniform": torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu") elif init == "kaiming_normal": torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu") else: raise ValueError("Unknown initialization: " + init) # bias init for name, p in model.named_parameters(): if ".bias" in name and p.dim() == 1: p.data.zero_()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false
RVC-Boss/GPT-SoVITS
https://github.com/RVC-Boss/GPT-SoVITS/blob/c767f0b83b998e996a4d230d86da575a03f54a3f/GPT_SoVITS/AR/utils/io.py
GPT_SoVITS/AR/utils/io.py
import sys import torch import yaml def load_yaml_config(path): with open(path) as f: config = yaml.full_load(f) return config def save_config_to_yaml(config, path): assert path.endswith(".yaml") with open(path, "w") as f: f.write(yaml.dump(config)) f.close() def write_args(args, path): args_dict = dict((name, getattr(args, name)) for name in dir(args) if not name.startswith("_")) with open(path, "a") as args_file: args_file.write("==> torch version: {}\n".format(torch.__version__)) args_file.write("==> cudnn version: {}\n".format(torch.backends.cudnn.version())) args_file.write("==> Cmd:\n") args_file.write(str(sys.argv)) args_file.write("\n==> args:\n") for k, v in sorted(args_dict.items()): args_file.write(" %s: %s\n" % (str(k), str(v))) args_file.close()
python
MIT
c767f0b83b998e996a4d230d86da575a03f54a3f
2026-01-04T14:39:21.477961Z
false