NewVoice / xtts_voice.py
IAsistemofinteres's picture
Update xtts_voice.py
e679636 verified
import argparse
import os
import sys
import tempfile
import gradio as gr
import librosa.display
import numpy as np
import os
import torch
import torchaudio
import traceback
from TTS.demos.xtts_ft_demo.utils.formatter import format_audio_list
from TTS.demos.xtts_ft_demo.utils.gpt_train import train_gpt
from TTS.tts.configs.xtts_config import XttsConfig
from TTS.tts.models.xtts import Xtts
def clear_gpu_cache():
# clear the GPU cache
if torch.cuda.is_available():
torch.cuda.empty_cache()
XTTS_MODEL = None
def load_model(xtts_checkpoint, xtts_config, xtts_vocab):
global XTTS_MODEL
clear_gpu_cache()
if not xtts_checkpoint or not xtts_config or not xtts_vocab:
return "You need to run the previous steps or manually set the `XTTS checkpoint path`, `XTTS config path`, and `XTTS vocab path` fields !!"
config = XttsConfig()
config.load_json(xtts_config)
XTTS_MODEL = Xtts.init_from_config(config)
print("Loading XTTS model! ")
XTTS_MODEL.load_checkpoint(config, checkpoint_path=xtts_checkpoint, vocab_path=xtts_vocab, use_deepspeed=False)
if torch.cuda.is_available():
XTTS_MODEL.cuda()
print("Model Loaded!")
return "Model Loaded!"
#
def run_tts(lang, tts_text, speaker_audio_file):
if XTTS_MODEL is None or not speaker_audio_file:
return "You need to run the previous step to load the model !!", None, None
# Get the selected value from the Dropdown
selected_speaker = speaker_audio_file
# Construct the file path
speaker_audio_path = f"/content/Model/Voice/{selected_speaker}.mp3"
gpt_cond_latent, speaker_embedding = XTTS_MODEL.get_conditioning_latents(audio_path=speaker_audio_path, gpt_cond_len=XTTS_MODEL.config.gpt_cond_len, max_ref_length=XTTS_MODEL.config.max_ref_len, sound_norm_refs=XTTS_MODEL.config.sound_norm_refs)
out = XTTS_MODEL.inference(
text=tts_text,
language=lang,
gpt_cond_latent=gpt_cond_latent,
speaker_embedding=speaker_embedding,
temperature=XTTS_MODEL.config.temperature, # Add custom parameters here
length_penalty=XTTS_MODEL.config.length_penalty,
repetition_penalty=XTTS_MODEL.config.repetition_penalty,
top_k=XTTS_MODEL.config.top_k,
top_p=XTTS_MODEL.config.top_p,
)
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
out["wav"] = torch.tensor(out["wav"]).unsqueeze(0)
out_path = fp.name
torchaudio.save(out_path, out["wav"], 24000)
return "Speech generated !", out_path, speaker_audio_path
# define a logger to redirect
class Logger:
def __init__(self, filename="log.out"):
self.log_file = filename
self.terminal = sys.stdout
self.log = open(self.log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
def isatty(self):
return False
# redirect stdout and stderr to a file
sys.stdout = Logger()
sys.stderr = sys.stdout
# logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
def read_logs():
sys.stdout.flush()
with open(sys.stdout.log_file, "r") as f:
return f.read()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""XTTS fine-tuning demo\n\n"""
"""
Example runs:
python3 TTS/demos/xtts_ft_demo/xtts_demo.py --port
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--port",
type=int,
help="Port to run the gradio demo. Default: 5003",
default=5003,
)
parser.add_argument(
"--out_path",
type=str,
help="Output path (where data and checkpoints will be saved) Default: /tmp/xtts_ft/",
default="/tmp/xtts_ft/",
)
parser.add_argument(
"--num_epochs",
type=int,
help="Number of epochs to train. Default: 10",
default=10,
)
parser.add_argument(
"--batch_size",
type=int,
help="Batch size. Default: 4",
default=4,
)
parser.add_argument(
"--grad_acumm",
type=int,
help="Grad accumulation steps. Default: 1",
default=1,
)
parser.add_argument(
"--max_audio_length",
type=int,
help="Max permitted audio size in seconds. Default: 11",
default=11,
)
args = parser.parse_args()
language_names = {
"en": "English",
"es": "Spanish",
"fr": "French",
"de": "German",
"it": "Italian",
"pt": "Portuguese",
"pl": "Polish",
"tr": "Turkish",
"ru": "Russian",
"nl": "Dutch",
"cs": "Czech",
"ar": "Arabic",
"zh": "Chinese",
"hu": "Hungarian",
"ko": "Korean",
"ja": "Japanese",
}
with gr.Blocks() as demo:
with gr.Tab("Inference"):
with gr.Row():
with gr.Column() as col1:
xtts_checkpoint = gr.Textbox(
label="XTTS checkpoint path:",
value="/content/Model/model.pth",
)
xtts_config = gr.Textbox(
label="XTTS config path:",
value="/content/Model/config.json",
)
xtts_vocab = gr.Textbox(
label="XTTS vocab path:",
value="/content/Model/vocab.json",
)
progress_load = gr.Label(
label="Progress:"
)
load_btn = gr.Button(value="Load Fine-tuned XTTS model")
with gr.Column() as col2:
speaker_reference_audio = gr.Dropdown(
label="Speaker reference audio:",
value="Adam",
choices=[
"Adam", "Alice", "Antoni", "Arnold", "Bill", "Brian", "Callum", "Charlie", "Charlotte", "Chris", "Clyde", "Daniel", "Dave", "Domi", "Dorothy", "Drew", "Elli", "Emily", "Ethan", "Fin", "Freya", "George", "Gigi", "Giovanni", "Glinda", "Grace", "Harry", "James", "Jessie", "Joseph", "Josh", "Liam", "Lily", "Matilda", "Matthew", "Michael", "Mimi", "Nicole", "Paul", "Rachel", "Sam", "Sarah", "Serena", "Thomas", "----------New Voice----------", "Brian - deep narrator", "Sara Martin 2", "Soothing Sam", "CRISTINA VOICE", "Enrique M. Nieto", "Jadon - YouTube PRO Voiceover", "Alex - Australian Male - Casual - Melbourne City", "Dante - Castilian Spanish", "Jameson - Guided Meditation & Narration", "David - American Narrator", "Ryan Kurk", "Géza B.", "Aerylla", "Tom - trailer narrator", "Karl", "Frederick Surrey", "Marcelo Costa_Brasileiro", "Kingsley - Royal and Deep", "Paul - Narration ", "Fowler - scary and authoratative", "Haseeb - Canadian Narration", "David - British Storyteller", "Adriano - Narrator", "Mary", "Fernando Martinez", "Nichalia Schwartz", "Cole - Gritty-Rough-Strong", "Bill Oxley ", "Adam - low, rough, and full", "David - Deep and narrative", "Pro Narrator - Convincing story teller", "Neil - calm and deep", "Will - Young Australian Male", "Haroldo ", "Mia - Clear, Smooth, Professional", "Lyle - Western Narrator", "Michael Reed", "Shannon B - Warm Southern Woman", "Natasha - African American Woman", "Enrique Mondragón", "Booker - Story Man", "Luis Guary", "Lawrence Mayles", "Shannon - Soft American Woman", "Isabela - Spanish Children's Book Narrator", "Mohammed - Profound and Deep ", "Jeremie", "Neal", "Julian - deep rich mature British voice", "Scarlett - Western Narrator", "Martin Osborne 1", "Vidhi - Young & Bold", "Tony - middle aged, male, Northern English native accent", "Samantha Narrations", "Matt Snowden", "Allison - inviting and velvety British accent", "Zeus Epic", "Angela", "Emily - Calm yet charismatic", "Kade Murdock HQ", "Ellie", "Matthew - American Male Narrator", "Leon Deep", "Martin Osborne 2", "Henry - Sport Narrator_Commentator", "Eden", "harry deep and warm", "Lerato", "Zoe Drake - Professional", "Diego Galán", "Phoebe", "Old Wizard"
]
)
tts_language = gr.Dropdown(
label="Language",
value="en",
choices=list(zip(language_names.values(), language_names.keys()))
)
tts_text = gr.Textbox(
label="Input Text.",
value="This model sounds really good and above all, it's reasonably fast.",
)
tts_btn = gr.Button(value="Step 4 - Inference")
with gr.Column() as col3:
progress_gen = gr.Label(
label="Progress:"
)
tts_output_audio = gr.Audio(label="Generated Audio.")
reference_audio = gr.Audio(label="Reference audio used.")
load_btn.click(
fn=load_model,
inputs=[
xtts_checkpoint,
xtts_config,
xtts_vocab
],
outputs=[progress_load],
)
tts_btn.click(
fn=run_tts,
inputs=[
tts_language,
tts_text,
speaker_reference_audio,
],
outputs=[progress_gen, tts_output_audio, reference_audio],
)
demo.launch(
share=True,
debug=False,
server_port=args.port,
server_name="0.0.0.0"
)