Dionyssos's picture
letters
e889983
# -*- coding: utf-8 -*-
import typing
import gradio as gr
import numpy as np
import os
import torch
import torch.nn as nn
import audiofile
from tts import StyleTTS2
from textual import only_greek_or_only_latin, transliterate_number, fix_vocals
import audresample
import textwrap
import nltk
from audionar import VitsModel, VitsTokenizer
from audiocraft import AudioGen
audiogen = AudioGen().eval().to('cpu')
nltk.download('punkt', download_dir='./')
nltk.download('punkt_tab', download_dir='./')
nltk.data.path.append('.')
language_names = ['Ancient greek',
'English',
'Deutsch',
'French',
'Hungarian',
'Romanian',
'Serbian (Approx.)']
def audionar_tts(text=None,
lang='Romanian',
soundscape='frogs',
max_tokens=24):
# https://huggingface.co/dkounadis/artificial-styletts2/blob/main/msinference.py
lang_map = {
'ancient greek': 'grc',
'english': 'eng',
'deutsch': 'deu',
'french': 'fra',
'hungarian': 'hun',
'romanian': 'ron',
'serbian (approx.)': 'rmc-script_latin',
}
final_audio = None
if text is None or text.strip() == '':
x = np.zeros(4 * 16000, dtype=np.float32) # If no txt 4s of audiogen
elif lang not in language_names: # text exists / StyleTTS2
text = only_greek_or_only_latin(text, lang='eng')
x = _tts.inference(text,
ref_s='wav/' + lang + '.wav')[0, 0, :].numpy() # 24 Khz
if x.shape[0] > 10:
x = audresample.resample(signal=x.astype(np.float32),
original_rate=24000,
target_rate=16000)[0, :] # 16 KHz
else: # VITS
lang_code = lang_map.get(lang.lower(), lang.lower().split()[0].strip())
global cached_lang_code, cached_net_g, cached_tokenizer
if 'cached_lang_code' not in globals() or cached_lang_code != lang_code:
cached_lang_code = lang_code
cached_net_g = VitsModel.from_pretrained(f'facebook/mms-tts-{lang_code}').eval()
cached_tokenizer = VitsTokenizer.from_pretrained(f'facebook/mms-tts-{lang_code}')
net_g = cached_net_g
tokenizer = cached_tokenizer
text = only_greek_or_only_latin(text, lang=lang_code)
text = transliterate_number(text, lang=lang_code)
text = fix_vocals(text, lang=lang_code) + '!' # assures the text has at least 1 character that has token emb
sentences = textwrap.wrap(text, width=439)
total_audio_parts = []
for sentence in sentences:
inputs = cached_tokenizer(sentence, return_tensors="pt")
with torch.no_grad():
audio_part = cached_net_g(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
lang_code=lang_code,
)[0, :]
total_audio_parts.append(audio_part)
x = torch.cat(total_audio_parts).cpu().numpy()
if soundscape and soundscape.strip():
speech_duration_secs = len(x) / 16000
target_duration = max(speech_duration_secs + 0.74, 2.0)
background_audio = audiogen.generate(
soundscape[:64], # to have shape of cross attention not grow large of T5 Num tokens
duration=target_duration,
max_tokens=min( max(7, int(max_tokens)), 288 ) # limit sounds tokens (clone beyond)
).numpy()
# PAD
len_speech = len(x)
len_background = len(background_audio)
if len_background > len_speech:
padding = np.zeros(len_background - len_speech,
dtype=np.float32)
x = np.concatenate([x, padding])
elif len_speech > len_background:
padding = np.zeros(len_speech - len_background,
dtype=np.float32)
background_audio = np.concatenate([background_audio, padding])
x = x[None, :]
background_audio = background_audio[None, :]
final_audio = np.concatenate([
0.49 * x + 0.51 * background_audio,
0.51 * background_audio + 0.49 * x
], 0)
else:
final_audio = x
wavfile = '_vits_.wav'
audiofile.write(wavfile, final_audio, 16000)
return wavfile # 2x file for [audio out & state to pass to the Emotion reco tAB]
# TTS
VOICES = [
'jv_ID_google-gmu_04982.wav',
'en_US_vctk_p303.wav',
'en_US_vctk_p306.wav',
'en_US_vctk_p318.wav',
'en_US_vctk_p269.wav',
'en_US_vctk_p316.wav',
'en_US_vctk_p362.wav', # cls
'fr_FR_tom.wav',
'bn_multi_5958.wav',
'en_US_vctk_p287.wav',
'en_US_vctk_p260.wav',
'en_US_cmu_arctic_fem.wav',
'en_US_cmu_arctic_rms.wav',
'fr_FR_m-ailabs_nadine_eckert_boulet.wav',
'en_US_vctk_p237.wav',
'en_US_vctk_p317.wav',
'tn_ZA_google-nwu_0378.wav',
'nl_pmk.wav',
'tn_ZA_google-nwu_3342.wav',
'ne_NP_ne-google_3997.wav',
'tn_ZA_google-nwu_8914.wav',
'en_US_vctk_p238.wav',
'en_US_vctk_p275.wav',
'af_ZA_google-nwu_0184.wav',
'af_ZA_google-nwu_8148.wav',
'en_US_vctk_p326.wav',
'en_US_vctk_p264.wav',
'en_US_vctk_p295.wav',
'en_US_vctk_p294.wav',
'en_US_vctk_p330.wav',
'gu_IN_cmu-indic_cmu_indic_guj_ad.wav',
'jv_ID_google-gmu_05219.wav',
'en_US_vctk_p284.wav',
'en_US_m-ailabs_mary_ann.wav',
'bn_multi_01701.wav',
'en_US_vctk_p262.wav',
'en_US_vctk_p243.wav',
'en_US_vctk_p278.wav',
'en_US_vctk_p250.wav',
'nl_femal.wav',
'en_US_vctk_p228.wav',
'ne_NP_ne-google_0649.wav',
'en_US_cmu_arctic_gka.wav',
'en_US_vctk_p361.wav',
'jv_ID_google-gmu_02326.wav',
'tn_ZA_google-nwu_1932.wav',
'de_DE_thorsten-emotion_amused.wav',
'jv_ID_google-gmu_08002.wav',
'tn_ZA_google-nwu_3629.wav',
'en_US_vctk_p230.wav',
'af_ZA_google-nwu_7214.wav',
'nl_nathalie.wav',
'en_US_cmu_arctic_lnh.wav',
'tn_ZA_google-nwu_6459.wav',
'tn_ZA_google-nwu_6206.wav',
'en_US_vctk_p323.wav',
'en_US_m-ailabs_judy_bieber.wav',
'en_US_vctk_p261.wav',
'fa_haaniye.wav',
# 'en_US_vctk_p339.wav',
'tn_ZA_google-nwu_7896.wav',
'en_US_vctk_p258.wav',
'tn_ZA_google-nwu_7674.wav',
'en_US_hifi-tts_6097.wav',
'en_US_vctk_p304.wav',
'en_US_vctk_p307.wav',
'fr_FR_m-ailabs_bernard.wav',
'en_US_cmu_arctic_jmk.wav',
'ne_NP_ne-google_0283.wav',
'en_US_vctk_p246.wav',
'en_US_vctk_p276.wav',
'style_o22050.wav',
'en_US_vctk_s5.wav',
'en_US_vctk_p268.wav', # reduce clip
'af_ZA_google-nwu_8924.wav',
'en_US_vctk_p363.wav',
'ne_NP_ne-google_3614.wav',
'ne_NP_ne-google_3154.wav',
'en_US_cmu_arctic_eey.wav', # y fix styl
'tn_ZA_google-nwu_2839.wav',
'af_ZA_google-nwu_7130.wav',
'ne_NP_ne-google_2139.wav',
'jv_ID_google-gmu_04715.wav',
'en_US_vctk_p273.wav'
]
VOICES = [t[:-4] for t in VOICES] # crop .wav for visuals in gr.DropDown
_tts = StyleTTS2().to('cpu')
with gr.Blocks() as demo:
with gr.Row():
text_input = gr.Textbox(
label="Type text for TTS:",
placeholder="Type Text for TTS",
lines=4,
value='Η γρηγορη καφετι αλεπου πειδαει πανω απο τον τεμπελη σκυλο.',
)
choice_dropdown = gr.Dropdown(
choices=language_names + VOICES,
label="Vox :",
value=language_names[0], #VOICES[0]
)
soundscape_input = gr.Textbox(
lines=1,
value="swims in lake frogs",
label="AudioGen Txt:"
)
kv_input = gr.Number(
label="Tokens:",
value=24,
)
generate_button = gr.Button("Generate Audio", variant="primary")
output_audio = gr.Audio(label="TTS Output")
generate_button.click(
fn=audionar_tts,
inputs=[text_input, choice_dropdown, soundscape_input, kv_input],
outputs=[output_audio]
)
demo.launch(debug=True)