|
|
import json |
|
|
import os |
|
|
import re |
|
|
import librosa |
|
|
import numpy as np |
|
|
import torch |
|
|
from torch import no_grad, LongTensor |
|
|
import commons |
|
|
import utils |
|
|
import gradio as gr |
|
|
from models import SynthesizerTrn |
|
|
from text import text_to_sequence |
|
|
from text.symbols import symbols |
|
|
from transformers import pipeline |
|
|
|
|
|
limitation = os.getenv("SYSTEM") == "spaces" |
|
|
|
|
|
|
|
|
def get_text(text, hps): |
|
|
text_norm = text_to_sequence(text, hps.data.text_cleaners) |
|
|
if hps.data.add_blank: |
|
|
text_norm = commons.intersperse(text_norm, 0) |
|
|
text_norm = torch.LongTensor(text_norm) |
|
|
return text_norm |
|
|
|
|
|
|
|
|
def create_tts_fn(net_g, hps, speaker_ids): |
|
|
def tts_fn(text, speaker, speed): |
|
|
if limitation: |
|
|
text_len = len(text) |
|
|
max_len = 5000 |
|
|
if text_len > max_len: |
|
|
return "Error: Text is too long", None |
|
|
|
|
|
speaker_id = speaker_ids[speaker] |
|
|
stn_tst = get_text(text, hps) |
|
|
|
|
|
with no_grad(): |
|
|
x_tst = stn_tst.unsqueeze(0) |
|
|
x_tst_lengths = LongTensor([stn_tst.size(0)]) |
|
|
sid = LongTensor([speaker_id]) |
|
|
audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, |
|
|
length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() |
|
|
del stn_tst, x_tst, x_tst_lengths, sid |
|
|
return "Success", (hps.data.sampling_rate, audio) |
|
|
|
|
|
return tts_fn |
|
|
|
|
|
|
|
|
css = """ |
|
|
#advanced-btn { |
|
|
color: white; |
|
|
border-color: black; |
|
|
background: black; |
|
|
font-size: .7rem !important; |
|
|
line-height: 19px; |
|
|
margin-top: 24px; |
|
|
margin-bottom: 12px; |
|
|
padding: 2px 8px; |
|
|
border-radius: 14px !important; |
|
|
} |
|
|
#advanced-options { |
|
|
display: none; |
|
|
margin-bottom: 20px; |
|
|
} |
|
|
""" |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
print("Initializing STT model (Whisper)...") |
|
|
stt_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
print("STT model loaded.") |
|
|
|
|
|
models_tts = [] |
|
|
name = 'AronaTTS' |
|
|
lang = 'μΌλ³Έμ΄ / νκ΅μ΄ (Japanese / Korean)' |
|
|
example = '[JA]ε
ηγδ»ζ₯γ―倩ζ°γζ¬ε½γ«γγγ§γγγ[JA][KO]μ μλ, μλ
νμΈμ. my name is arona[KO]' |
|
|
config_path = f"pretrained_model/arona_ms_istft_vits.json" |
|
|
model_path = f"pretrained_model/arona_ms_istft_vits.pth" |
|
|
cover_path = f"pretrained_model/cover.gif" |
|
|
|
|
|
hps = utils.get_hparams_from_file(config_path) |
|
|
|
|
|
net_g = SynthesizerTrn( |
|
|
len(symbols), |
|
|
hps.data.filter_length // 2 + 1, |
|
|
hps.train.segment_size // hps.data.hop_length, |
|
|
n_speakers=hps.data.n_speakers, |
|
|
**hps.model) |
|
|
_ = net_g.eval() |
|
|
|
|
|
utils.load_checkpoint(model_path, net_g, None) |
|
|
net_g.eval() |
|
|
|
|
|
speaker_ids = [0] |
|
|
speakers = [name] |
|
|
|
|
|
|
|
|
tts_fn = create_tts_fn(net_g, hps, speaker_ids) |
|
|
|
|
|
|
|
|
def stt_tts_fn(audio_filepath, speaker, speed): |
|
|
if audio_filepath is None: |
|
|
return "Error: Audio not provided.", None, "Please record or upload audio first." |
|
|
|
|
|
print("Transcribing audio...") |
|
|
|
|
|
transcription_result = stt_pipeline(audio_filepath) |
|
|
transcribed_text = transcription_result['text'] |
|
|
print(f"Transcribed text: {transcribed_text}") |
|
|
|
|
|
if not transcribed_text.strip(): |
|
|
return "Error: Could not transcribe audio.", None, "No text detected in audio." |
|
|
|
|
|
print("Generating speech from transcribed text...") |
|
|
|
|
|
status, audio_output = tts_fn(transcribed_text, speaker, speed) |
|
|
print("Speech generation complete.") |
|
|
|
|
|
|
|
|
return status, audio_output, transcribed_text |
|
|
|
|
|
|
|
|
app = gr.Blocks(css=css) |
|
|
|
|
|
|
|
|
with app: |
|
|
gr.Markdown("# BlueArchive Arona TTS Using VITS Model\n" |
|
|
"\n\n") |
|
|
|
|
|
with gr.Column(): |
|
|
gr.Markdown(f"## {name}\n\n" |
|
|
f"lang: {lang}") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.TabItem("Text to Speech"): |
|
|
tts_input_text = gr.TextArea(label="Text (5000 words limitation)", value=example) |
|
|
tts_speaker_text = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[0]) |
|
|
tts_speed_text = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1) |
|
|
tts_submit_text = gr.Button("Generate from Text", variant="primary") |
|
|
|
|
|
|
|
|
with gr.TabItem("Voice to Speech"): |
|
|
audio_input = gr.Audio(type="filepath", label="Record or Upload Voice") |
|
|
tts_speaker_audio = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[0]) |
|
|
tts_speed_audio = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1) |
|
|
transcribed_text_output = gr.Textbox(label="Transcribed Text", interactive=False) |
|
|
tts_submit_audio = gr.Button("Generate from Voice", variant="primary") |
|
|
|
|
|
|
|
|
gr.Markdown("---") |
|
|
gr.Markdown("### Output") |
|
|
output_message = gr.Textbox(label="Output Message") |
|
|
output_audio = gr.Audio(label="Output Audio") |
|
|
|
|
|
|
|
|
tts_submit_text.click( |
|
|
tts_fn, |
|
|
[tts_input_text, tts_speaker_text, tts_speed_text], |
|
|
[output_message, output_audio] |
|
|
) |
|
|
|
|
|
tts_submit_audio.click( |
|
|
stt_tts_fn, |
|
|
[audio_input, tts_speaker_audio, tts_speed_audio], |
|
|
[output_message, output_audio, transcribed_text_output] |
|
|
) |
|
|
|
|
|
app.queue(concurrency_count=3).launch(show_api=False) |