import streamlit as st import asyncio import tempfile import parameters import utils import classes import json import random from S3_bucket import AWS import warnings warnings.filterwarnings("ignore", category=RuntimeWarning) aws = AWS() # Hook: apply pending random text before rendering widgets if "set_random_next_run" in st.session_state and st.session_state.set_random_next_run: st.session_state.input_text = st.session_state.pending_random_text st.session_state.set_random_next_run = False if "has_audio" not in st.session_state: st.session_state.has_audio = False if "last_msg" not in st.session_state: st.session_state.last_msg = None if "last_audio" not in st.session_state: st.session_state.last_audio = None if "last_sr" not in st.session_state: st.session_state.last_sr = None if "show_feedback" not in st.session_state: st.session_state.show_feedback = False if "last_session_id" not in st.session_state: st.session_state.last_session_id = None if "pronunc_dict" not in st.session_state: with aws.fs.open(parameters.GLOBAL_PRONUNCIATION_DICT_PATH, "r") as f: global_pronunc_dict = json.loads(f.read()) st.session_state.pronunc_dict = global_pronunc_dict if "voice_cache" not in st.session_state: st.session_state.voice_cache = {} if "page" not in st.session_state: st.session_state.page = "Home" # Top nav (always visible) col_h, col_u, col_a, _ = st.columns([0.2, 0.2, 0.2, 0.4]) with col_h: if st.session_state.page == "Home": if st.button("🏠 Home", key="nav_home", type='primary', use_container_width=True): st.session_state.page = "Home" else: if st.button("Home", key="nav_home", use_container_width=True): st.session_state.page = "Home" with col_u: if st.session_state.page == "Use": if st.button("Use", key="nav_use", type='primary', use_container_width=True): st.session_state.page = "Use" else: if st.button("How to use app", key="nav_use", use_container_width=True): st.session_state.page = "Use" with col_a: if st.session_state.page == "About": if st.button("ℹ️ About", key="nav_about", type='primary', use_container_width=True): st.session_state.page = "About" else: if st.button("About", key="nav_about", use_container_width=True): st.session_state.page = "About" if st.session_state.page == "Home": # Streamlit UI st.set_page_config(page_title="Ori TTS & Voice Cloning", layout="wide") st.title("🎙️ Ori TTS & Voice Cloning System") st.markdown("Choose a default speaker or upload reference audio (min 5 sec), select a language, and enter text to generate speech") with st.sidebar: st.title("Home") st.markdown("---") st.header("Models......") model = st.radio("Select Model", ["V2", "V1"]) if model == "V1": st.header("Languages.....") language = st.selectbox("Select Language", list(utils.V1_LANGUAGES.keys())) st.header("Voice Settings.....") voice_mode = st.radio("Voice Selection Mode", ["Default Speaker", "Upload Audio"]) if voice_mode == "Default Speaker": default_speaker = st.selectbox("Select Default Speaker", list(utils.V1_SPEAKERS[utils.V1_LANGUAGES[language]])) reference_audio = None else: st.info("Give a reference audio (min 5 seconds)") audio_source = st.radio( "Reference audio source", ["Upload file", "Record audio"], horizontal=True, key="v1_audio_source", ) default_speaker = None if audio_source == "Upload file": reference_audio = st.file_uploader( "Upload Reference Audio", type=["wav", "mp3", "flac"], key="v1_file_uploader", ) else: # Record audio reference_audio = st.audio_input( "Record Reference Audio", key="v1_audio_input", ) else: st.header("Languages.....") language = st.selectbox("Select Language", list(utils.V2_LANGUAGES.keys())) st.header("Voice Settings.....") voice_mode = st.radio("Voice Selection Mode", ["Default Speaker", "Upload Audio"]) if voice_mode == "Default Speaker": default_speaker = st.selectbox("Select Default Speaker", list(utils.V2_SPEAKERS[utils.V2_LANGUAGES[language]])) reference_audio = None else: st.info("Give a reference audio (min 5 seconds)") audio_source = st.radio( "Reference audio source", ["Upload file", "Record audio"], horizontal=True, key="v2_audio_source", ) default_speaker = None if audio_source == "Upload file": reference_audio = st.file_uploader( "Upload Reference Audio", type=["wav", "mp3", "flac"], key="v2_file_uploader", ) else: reference_audio = st.audio_input( "Record Reference Audio", key="v2_audio_input", ) with st.expander("Advanced Settings"): speech_rate = st.slider("Speech Rate", 0.25, 2.0, 1.0, 0.25) speed = st.slider("Speed", 0.5, 2.0, 1.0, 0.1) expressive = st.slider("Expressive", 0.0, 1.0, 0.1, 0.05) stability = st.slider("Stability", 0, 10, 1, 1) clarity = st.slider("Clarity", 0.0, 1.0, 0.1, 0.1) volume_level = st.slider("Volume Level", 0.5, 3.0, 1.0, 0.1) stitch_request = st.checkbox("Stitch Request ()", value=False) # Main content col1, col2 = st.columns([2, 1]) with col1: if 'input_text' not in st.session_state: st.session_state['input_text'] = '' if 'set_random_next_run' not in st.session_state: st.session_state.set_random_next_run = False if 'pending_random_text' not in st.session_state: st.session_state.pending_random_text = '' input_text = st.text_area( "Input Text", key='input_text', placeholder="Enter the text you want to synthesize...", height=130 ) btn_col1, btn_col2 = st.columns(2) with btn_col1: random_btn = st.button("🎲 Random Text", use_container_width=True) with btn_col2: generate_btn = st.button("🎵 Generate Speech", type="primary", use_container_width=True) with col2: st.markdown("### Add Pronunciation Pair") key_col1, value_col2 = st.columns(2) with key_col1: pr_key = st.text_input( "Pronunciation key 👇", label_visibility="visible", disabled=False, placeholder="Enter word", key="pr_key", ) with value_col2: pr_value = st.text_input( "Pronunciation value 👇", label_visibility="visible", disabled=False, placeholder="Enter correct pronunciation", key="pr_value", ) add_pair = st.button("Add Pronunciation Pair", type='primary', use_container_width=True) if add_pair: if pr_key.strip() and pr_value.strip(): st.session_state.pronunc_dict[pr_key.strip()] = pr_value.strip() st.success(f"Added pronunciation pair: {pr_key.strip()} → {pr_value.strip()}") # do NOT assign st.session_state.pr_key / pr_value here else: st.warning("Both key and value are required to add a pronunciation pair.") st.markdown(""" If the model mispronounces some word incorrectly, you can correct it by adding the term as the Pronunciation Key and its phonetic spelling as the Pronunciation Value. For example, if AI/Cholestrol isn't pronounced correctly, respell it as ए आई/colestrol: enter AI/Cholestrol in the Pronunciation Key field and ए आई/colestrol in the Pronunciation Value field, then click **Add Pronunciation Pair**. """) if random_btn: if language in utils.language_sentences.keys(): random_text = random.choice(utils.language_sentences[language]) st.session_state.pending_random_text = random_text st.session_state.set_random_next_run = True st.rerun() else: st.warning(f"No sample sentences available for {language}") if generate_btn: session_id = utils.generate_session_id() # Validate pronunciation input pronunciation_dict_str = st.session_state.pronunc_dict input_text = st.session_state.input_text print(f"Clicked Generation btn.....\n input:- {input_text}") if not input_text.strip(): st.warning("Please enter text to synthesize") elif len(input_text) > 1000: st.warning(f"Text length must be less than 1000 characters. Current length: {len(input_text)}") else: try: token = parameters.TTS_SECRET_KEY if model == "V1": language_code = utils.V1_LANGUAGES[language] else: language_code = utils.V2_LANGUAGES[language] user_id = parameters.user_id voice_path = None # Determine voice_id based on mode if voice_mode == "Default Speaker" and model == "V1": if language_code in list(utils.V1_SPEAKERS.keys()): voice_id = default_speaker status_msg = f"Using default speaker: {default_speaker} for {language}" else: st.warning(f"Language {language} not available for {default_speaker}") st.stop() elif voice_mode == "Default Speaker" and model == "V2": if language_code in list(utils.V2_SPEAKERS.keys()): voice_id = default_speaker status_msg = f"Using default speaker: {default_speaker} for {language}" else: st.warning(f"Language {language} not available for {default_speaker}") st.stop() else: if not reference_audio: st.warning("Please upload a reference audio file") st.stop() audio_hash = utils.get_audio_hash(reference_audio) cache_key = f"{audio_hash}_{language_code}_{model}" if cache_key in st.session_state.voice_cache: voice_id = st.session_state.voice_cache[cache_key] voice_path = cache_key status_msg = f"✓ Using cached voice ID for language: {language}" else: with st.spinner("Cloning voice..."): with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: tmp_file.write(reference_audio.read()) tmp_file.flush() if model == "V1": result = utils.v1_clone_voice(tmp_file.name, user_id, token, language_code ) else: result = utils.v2_clone_voice(tmp_file.name, user_id, token) voice_id = result['voice_id'] reference_audio.seek(0) classes.upload_voice_clone_audio(reference_audio, voice_id) voice_path = cache_key if len(st.session_state.voice_cache) > 5: st.session_state.voice_cache.clear() st.session_state.voice_cache[cache_key] = voice_id print(f"This is the voice id get from {model}:--{voice_id}") status_msg = f"✓ Cloned voice successfully for language: {language}" # Generate speech with st.spinner("Generating speech..."): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if model=="V1": sr, audio = loop.run_until_complete( utils.v1_generate_speech_async( session_id, voice_mode, voice_id, model, input_text, language_code, user_id, pronunciation_dict_str, speed, expressive, stability, clarity, volume_level, speech_rate, stitch_request ) ) else: sr, audio = loop.run_until_complete( utils.v2_generate_speech_async( session_id, voice_mode, voice_id, model, input_text, language_code, user_id, pronunciation_dict_str, speed, expressive, stability, clarity, volume_level, speech_rate, stitch_request ) ) loop.close() # Store audio + session info in state, mark as available st.session_state.last_msg = status_msg st.session_state.last_audio = audio st.session_state.last_sr = sr st.session_state.last_session_id = session_id st.session_state.has_audio = True st.session_state.show_feedback = True print("Generation completed......") except Exception as e: # st.warning("Something went wrong. Please try again!") st.session_state.show_feedback = False st.markdown("---") st.markdown("### 🎧 Output & Feedback") # Column layout for audio + feedback a_col, f_col = st.columns([1, 1]) with a_col: if st.session_state.has_audio and st.session_state.last_audio is not None: st.success(st.session_state.last_msg) st.audio(st.session_state.last_audio, sample_rate=st.session_state.last_sr) else: st.markdown( "