dat_test_voice / app.py
presencesw's picture
Chore: turn off log debug
1a5a163
# import gradio as gr
# def greet(name):
# return "Hello " + name + "!!"
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
# demo.launch()
import torch
torch.manual_seed(0)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import random
random.seed(0)
import numpy as np
np.random.seed(0)
# load packages
import time
import random
import yaml
from munch import Munch
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
import librosa
from nltk.tokenize import word_tokenize
from models import *
from utils import *
from text_utils import TextCleaner
textcleaner = TextCleaner()
from huggingface_hub import hf_hub_download
import os
import nltk
nltk.download('punkt')
nltk.download('punkt_tab')
print("nltk downloaded punkt and punkt_tab")
HF_TOKEN = os.getenv("HF_TOKEN", None)
hf_hub_download(
repo_id="presencesw/tts",
filename="phoaudio_single_v1.pth",
local_dir="Models/phoaudio_single_v1",
local_dir_use_symlinks=False,
token=HF_TOKEN
)
to_mel = torchaudio.transforms.MelSpectrogram(
n_mels=80, n_fft=2048, win_length=1200, hop_length=300)
mean, std = -4, 4
def length_to_mask(lengths):
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
mask = torch.gt(mask+1, lengths.unsqueeze(1))
return mask
def preprocess(wave):
wave_tensor = torch.from_numpy(wave).float()
mel_tensor = to_mel(wave_tensor)
mel_tensor = (torch.log(1e-5 + mel_tensor.unsqueeze(0)) - mean) / std
return mel_tensor
def compute_style(path):
wave, sr = librosa.load(path, sr=24000)
audio, index = librosa.effects.trim(wave, top_db=30)
if sr != 24000:
audio = librosa.resample(audio, sr, 24000)
mel_tensor = preprocess(audio).to(device)
with torch.no_grad():
ref_s = model.style_encoder(mel_tensor.unsqueeze(1))
ref_p = model.predictor_encoder(mel_tensor.unsqueeze(1))
return torch.cat([ref_s, ref_p], dim=1)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# load phonemizer
import phonemizer
# global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
vi_phonemizer = phonemizer.backend.EspeakBackend(language='vi', preserve_punctuation=True, with_stress=True, words_mismatch='ignore')
# config = yaml.safe_load(open("Models/LibriTTS/config.yml"))
config = yaml.safe_load(open("Models/phoaudio_single_v1/config_phoaudio_single_v1.yml"))
# load pretrained ASR model
ASR_config = config.get('ASR_config', False)
ASR_path = config.get('ASR_path', False)
text_aligner = load_ASR_models(ASR_path, ASR_config)
# load pretrained F0 model
F0_path = config.get('F0_path', False)
pitch_extractor = load_F0_models(F0_path)
# # load BERT model
# from Utils.PLBERT.util import load_plbert
# BERT_path = config.get('PLBERT_dir', False)
# plbert = load_plbert(BERT_path)
from Utils_extend_v1.PLBERT.util import load_plbert
BERT_path = config.get('PLBERT_dir', False)
plbert = load_plbert(BERT_path)
model_params = recursive_munch(config['model_params'])
model = build_model(model_params, text_aligner, pitch_extractor, plbert)
_ = [model[key].eval() for key in model]
_ = [model[key].to(device) for key in model]
params_whole = torch.load("Models/phoaudio_single_v1/phoaudio_single_v1.pth", map_location='cpu')
params = params_whole['net']
for key in model:
if key in params:
print('%s loaded' % key)
try:
model[key].load_state_dict(params[key])
except:
from collections import OrderedDict
state_dict = params[key]
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model[key].load_state_dict(new_state_dict, strict=False)
# except:
# _load(params[key], model[key])
_ = [model[key].eval() for key in model]
from Modules.diffusion.sampler import DiffusionSampler, ADPM2Sampler, KarrasSchedule
sampler = DiffusionSampler(
model.diffusion.diffusion,
sampler=ADPM2Sampler(),
sigma_schedule=KarrasSchedule(sigma_min=0.0001, sigma_max=3.0, rho=9.0), # empirical parameters
clamp=False
)
def inference(text, ref_s, alpha = 0.3, beta = 0.7, diffusion_steps=5, embedding_scale=1):
text = text.strip()
ps = vi_phonemizer.phonemize([text])
ps = ps[0].replace("(en)", "").replace("(vi)", "").strip()
# ps = word_tokenize(temp)
# ps = word_tokenize(ps[0])
# ps = ' '.join(ps)
# ps = ps.replace(" .", ".").replace(" ,", ",").replace(" !", "!").replace(" ?", "?").replace(" :", ":").replace(" ;", ";")
# ps = ps.replace('( en ) ', '(en)').replace(' ( vi )', '(vi)')
# print(f"Phonemizer: {ps}")
tokens = textcleaner(ps)
# print(f"TextCleaner: {tokens}")
tokens.insert(0, 0)
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)
with torch.no_grad():
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
text_mask = length_to_mask(input_lengths).to(device)
t_en = model.text_encoder(tokens, input_lengths, text_mask)
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
s_pred = sampler(noise = torch.randn((1, 256)).unsqueeze(1).to(device),
embedding=bert_dur,
embedding_scale=embedding_scale,
features=ref_s, # reference from the same speaker as the embedding
num_steps=diffusion_steps).squeeze(1)
s = s_pred[:, 128:]
ref = s_pred[:, :128]
ref = alpha * ref + (1 - alpha) * ref_s[:, :128]
s = beta * s + (1 - beta) * ref_s[:, 128:]
d = model.predictor.text_encoder(d_en,
s, input_lengths, text_mask)
x, _ = model.predictor.lstm(d)
duration = model.predictor.duration_proj(x)
duration = torch.sigmoid(duration).sum(axis=-1)
pred_dur = torch.round(duration.squeeze()).clamp(min=1)
pred_aln_trg = torch.zeros(input_lengths, int(pred_dur.sum().data))
c_frame = 0
for i in range(pred_aln_trg.size(0)):
pred_aln_trg[i, c_frame:c_frame + int(pred_dur[i].data)] = 1
c_frame += int(pred_dur[i].data)
# encode prosody
en = (d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device))
if model_params.decoder.type == "hifigan":
asr_new = torch.zeros_like(en)
asr_new[:, :, 0] = en[:, :, 0]
asr_new[:, :, 1:] = en[:, :, 0:-1]
en = asr_new
F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
asr = (t_en @ pred_aln_trg.unsqueeze(0).to(device))
if model_params.decoder.type == "hifigan":
asr_new = torch.zeros_like(asr)
asr_new[:, :, 0] = asr[:, :, 0]
asr_new[:, :, 1:] = asr[:, :, 0:-1]
asr = asr_new
# print(f"asr shape: {asr.shape}")
# print(f"F0_pred shape: {F0_pred.shape}")
# print(f"N_pred shape: {N_pred.shape}")
# print(f"ref shape: {ref.shape}")
out = model.decoder(asr,
F0_pred, N_pred, ref.squeeze().unsqueeze(0))
return out.squeeze().cpu().numpy()[..., :-50], ps # weird pulse at the end of the model, need to be fixed later
voice_ref = "voice_ref/Tram_audio.wav"
import gradio as gr
def generate_voice(input_string):
noise = torch.randn(1,1,256).to(device)
ref = compute_style(voice_ref)
start = time.time()
wav, phonemizer = inference(input_string, ref, alpha=0.3, beta=0.7, diffusion_steps=5, embedding_scale=1)
wav = wav.astype(np.float32)
rtf = (time.time() - start) / (len(wav) / 24000)
return (24000, wav), rtf, phonemizer
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
with gr.Blocks() as demo:
# with gr.Row():
# text_input = gr.Textbox(value="Đây là một ví dụ về tổng hợp giọng nói.")
# audio_output = gr.Audio()
# with gr.Row():
# run_button = gr.Button(value="generate voice")
# rtf_log = gr.Number(label="Real Time Factor")
with gr.Row():
with gr.Column():
text_input = gr.Textbox(value="Đây là một ví dụ về tổng hợp giọng nói.", label="Input text")
phonemizer = gr.Textbox(label="Phonemizer of the input text")
run_button = gr.Button(value="Generate voice")
with gr.Column():
audio_output = gr.Audio(label="Generated audio")
rtf_log = gr.Number(label="Real Time Factor")
# run_button.click(fn=generate_voice, inputs=[text_input], outputs=[audio_output])
run_button.click(fn=generate_voice, inputs=[text_input], outputs=[audio_output, rtf_log, phonemizer],)
if __name__ == "__main__":
demo.launch(server_port=7860, server_name="0.0.0.0")