Spaces:
Runtime error
Runtime error
| import random | |
| import numpy as np | |
| import torch | |
| from chatterbox.src.chatterbox.tts import ChatterboxTTS | |
| import gradio as gr | |
| import spaces | |
| import re | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"๐ Running on device: {DEVICE}") | |
| # --- Global Model Initialization --- | |
| MODEL = None | |
| def get_or_load_model(): | |
| """Loads the ChatterboxTTS model if it hasn't been loaded already, | |
| and ensures it's on the correct device.""" | |
| global MODEL | |
| if MODEL is None: | |
| print("Model not loaded, initializing...") | |
| try: | |
| MODEL = ChatterboxTTS.from_pretrained(DEVICE) | |
| if hasattr(MODEL, 'to') and str(MODEL.device) != DEVICE: | |
| MODEL.to(DEVICE) | |
| print(f"Model loaded successfully. Internal device: {getattr(MODEL, 'device', 'N/A')}") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| raise | |
| return MODEL | |
| # Attempt to load the model at startup. | |
| try: | |
| get_or_load_model() | |
| except Exception as e: | |
| print(f"CRITICAL: Failed to load model on startup. Application may not function. Error: {e}") | |
| def set_seed(seed: int): | |
| """Sets the random seed for reproducibility across torch, numpy, and random.""" | |
| torch.manual_seed(seed) | |
| if DEVICE == "cuda": | |
| torch.cuda.manual_seed(seed) | |
| torch.cuda.manual_seed_all(seed) | |
| random.seed(seed) | |
| np.random.seed(seed) | |
| def split_text_into_chunks(text: str, max_chars: int = 250) -> list[str]: | |
| """ | |
| ํ ์คํธ๋ฅผ ๋ฌธ์ฅ ๋จ์๋ก ๋๋๋, ๊ฐ ์ฒญํฌ๊ฐ max_chars๋ฅผ ๋์ง ์๋๋ก ํฉ๋๋ค. | |
| """ | |
| # ๋ฌธ์ฅ ๋จ์๋ก ๋ถ๋ฆฌ (๊ธฐ๋ณธ์ ์ธ ๋ฌธ์ฅ ๋ถ๋ฆฌ) | |
| sentences = re.split(r'(?<=[.!?])\s+', text.strip()) | |
| chunks = [] | |
| current_chunk = "" | |
| for sentence in sentences: | |
| # ํ์ฌ ์ฒญํฌ์ ๋ฌธ์ฅ์ ์ถ๊ฐํด๋ max_chars๋ฅผ ๋์ง ์์ผ๋ฉด ์ถ๊ฐ | |
| if len(current_chunk) + len(sentence) + 1 <= max_chars: | |
| if current_chunk: | |
| current_chunk += " " + sentence | |
| else: | |
| current_chunk = sentence | |
| else: | |
| # ํ์ฌ ์ฒญํฌ๋ฅผ ์ ์ฅํ๊ณ ์ ์ฒญํฌ ์์ | |
| if current_chunk: | |
| chunks.append(current_chunk) | |
| # ๋ฌธ์ฅ ์์ฒด๊ฐ max_chars๋ณด๋ค ๊ธด ๊ฒฝ์ฐ ๊ฐ์ ๋ก ๋ถํ | |
| if len(sentence) > max_chars: | |
| words = sentence.split() | |
| temp_chunk = "" | |
| for word in words: | |
| if len(temp_chunk) + len(word) + 1 <= max_chars: | |
| if temp_chunk: | |
| temp_chunk += " " + word | |
| else: | |
| temp_chunk = word | |
| else: | |
| if temp_chunk: | |
| chunks.append(temp_chunk) | |
| temp_chunk = word | |
| if temp_chunk: | |
| current_chunk = temp_chunk | |
| else: | |
| current_chunk = sentence | |
| # ๋ง์ง๋ง ์ฒญํฌ ์ถ๊ฐ | |
| if current_chunk: | |
| chunks.append(current_chunk) | |
| return chunks | |
| def generate_tts_audio( | |
| text_input: str, | |
| audio_prompt_path_input: str, | |
| exaggeration_input: float, | |
| temperature_input: float, | |
| seed_num_input: int, | |
| cfgw_input: float, | |
| chunk_size_input: int, | |
| progress=gr.Progress() | |
| ) -> tuple[int, np.ndarray]: | |
| """ | |
| ๊ธด ํ ์คํธ๋ฅผ ์ฒญํฌ๋ก ๋๋์ด TTS ์ค๋์ค๋ฅผ ์์ฑํ๊ณ ์ฐ๊ฒฐํฉ๋๋ค. | |
| ๋ชจ๋ ์ฒ๋ฆฌ๋ฅผ ๋จ์ผ GPU ์ปจํ ์คํธ ๋ด์์ ์ํํฉ๋๋ค. | |
| """ | |
| current_model = get_or_load_model() | |
| if current_model is None: | |
| raise RuntimeError("TTS model is not loaded.") | |
| if seed_num_input != 0: | |
| set_seed(int(seed_num_input)) | |
| # ํ ์คํธ๋ฅผ ์ฒญํฌ๋ก ๋ถํ | |
| chunks = split_text_into_chunks(text_input, max_chars=chunk_size_input) | |
| total_chunks = len(chunks) | |
| print(f"ํ ์คํธ๋ฅผ {total_chunks}๊ฐ์ ์ฒญํฌ๋ก ๋ถํ ํ์ต๋๋ค.") | |
| # ๊ฐ ์ฒญํฌ์ ๋ํด ์ค๋์ค ์์ฑ | |
| audio_segments = [] | |
| for i, chunk in enumerate(chunks): | |
| progress((i + 1) / total_chunks, f"์ฒญํฌ {i + 1}/{total_chunks} ์์ฑ ์ค...") | |
| print(f"์ฒญํฌ {i + 1}/{total_chunks} ์์ฑ ์ค: '{chunk[:50]}...'") | |
| try: | |
| # ์ง์ generate ๋ฉ์๋ ํธ์ถ (๋ณ๋ ํจ์ ์์ด) | |
| wav = current_model.generate( | |
| chunk, | |
| audio_prompt_path=audio_prompt_path_input, | |
| exaggeration=exaggeration_input, | |
| temperature=temperature_input, | |
| cfg_weight=cfgw_input, | |
| ) | |
| wav_chunk = wav.squeeze(0).numpy() | |
| audio_segments.append(wav_chunk) | |
| except Exception as e: | |
| print(f"์ฒญํฌ {i + 1} ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {e}") | |
| # ์ค๋ฅ ๋ฐ์ ์ ๊ณ์ ์งํ | |
| continue | |
| # ๋ชจ๋ ์ค๋์ค ์ธ๊ทธ๋จผํธ ์ฐ๊ฒฐ | |
| if audio_segments: | |
| # ๊ฐ ์ฒญํฌ ์ฌ์ด์ ์งง์ ๋ฌด์ ์ถ๊ฐ (์ ํ์ฌํญ) | |
| silence_duration = int(0.2 * current_model.sr) # 0.2์ด ๋ฌด์ | |
| silence = np.zeros(silence_duration) | |
| final_audio = [] | |
| for i, segment in enumerate(audio_segments): | |
| final_audio.append(segment) | |
| if i < len(audio_segments) - 1: # ๋ง์ง๋ง ์ธ๊ทธ๋จผํธ๊ฐ ์๋๋ฉด ๋ฌด์ ์ถ๊ฐ | |
| final_audio.append(silence) | |
| concatenated_audio = np.concatenate(final_audio) | |
| print(f"์ค๋์ค ์์ฑ ์๋ฃ. ์ด ๊ธธ์ด: {len(concatenated_audio) / current_model.sr:.2f}์ด") | |
| return (current_model.sr, concatenated_audio) | |
| else: | |
| raise RuntimeError("์ค๋์ค ์์ฑ์ ์คํจํ์ต๋๋ค.") | |
| # ๋จ์ผ ์ฒญํฌ ์์ฑ์ ์ํ ๊ฐ๋จํ wrapper ํจ์ (GPU ๋ฐ์ฝ๋ ์ดํฐ ํฌํจ) | |
| def generate_single_audio( | |
| text_input: str, | |
| audio_prompt_path_input: str, | |
| exaggeration_input: float, | |
| temperature_input: float, | |
| seed_num_input: int, | |
| cfgw_input: float | |
| ) -> tuple[int, np.ndarray]: | |
| """ | |
| ๋จ์ผ ํ ์คํธ์ ๋ํ TTS ์ค๋์ค ์์ฑ (300์ ์ดํ) | |
| """ | |
| current_model = get_or_load_model() | |
| if current_model is None: | |
| raise RuntimeError("TTS model is not loaded.") | |
| if seed_num_input != 0: | |
| set_seed(int(seed_num_input)) | |
| print(f"Generating audio for text: '{text_input[:50]}...'") | |
| wav = current_model.generate( | |
| text_input[:300], # ์์ ์ ์ํด 300์๋ก ์ ํ | |
| audio_prompt_path=audio_prompt_path_input, | |
| exaggeration=exaggeration_input, | |
| temperature=temperature_input, | |
| cfg_weight=cfgw_input, | |
| ) | |
| print("Audio generation complete.") | |
| return (current_model.sr, wav.squeeze(0).numpy()) | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| """ | |
| # Chatterbox TTS Demo - ๋ฌด์ ํ ๊ธธ์ด ๋ฒ์ | |
| ๊ธด ํ ์คํธ๋ ์ฒญํฌ๋ก ๋๋์ด ์ฒ๋ฆฌํ์ฌ ์ ํ ์์ด ์์ฑ์ ์์ฑํฉ๋๋ค. | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| text = gr.Textbox( | |
| value="Now let's make my mum's favourite. So three mars bars into the pan. Then we add the tuna and just stir for a bit, just let the chocolate and fish infuse. A sprinkle of olive oil and some tomato ketchup. Now smell that. Oh boy this is going to be incredible.", | |
| label="ํ ์คํธ ์ ๋ ฅ (๊ธธ์ด ์ ํ ์์)", | |
| lines=10, | |
| max_lines=30 | |
| ) | |
| ref_wav = gr.Audio( | |
| sources=["upload", "microphone"], | |
| type="filepath", | |
| label="Reference Audio File (Optional)", | |
| value="https://storage.googleapis.com/chatterbox-demo-samples/prompts/female_shadowheart4.flac" | |
| ) | |
| with gr.Row(): | |
| exaggeration = gr.Slider( | |
| 0.25, 2, step=.05, | |
| label="Exaggeration (Neutral = 0.5)", | |
| value=.5 | |
| ) | |
| cfg_weight = gr.Slider( | |
| 0.2, 1, step=.05, | |
| label="CFG/Pace", | |
| value=0.5 | |
| ) | |
| with gr.Row(): | |
| chunk_size = gr.Slider( | |
| 100, 300, step=50, | |
| label="์ฒญํฌ ํฌ๊ธฐ (๋ฌธ์ ์)", | |
| value=250, | |
| info="ํ ์คํธ๋ฅผ ๋๋ ์ฒญํฌ์ ์ต๋ ํฌ๊ธฐ์ ๋๋ค. ์์์๋ก ๋ ์์ฐ์ค๋ฝ์ง๋ง ์ฒ๋ฆฌ ์๊ฐ์ด ๊ธธ์ด์ง๋๋ค." | |
| ) | |
| mode = gr.Radio( | |
| choices=["๋จ์ผ ์์ฑ (300์ ์ดํ)", "์ฒญํฌ ๋ถํ (๋ฌด์ ํ)"], | |
| value="์ฒญํฌ ๋ถํ (๋ฌด์ ํ)", | |
| label="์์ฑ ๋ชจ๋" | |
| ) | |
| with gr.Accordion("๊ณ ๊ธ ์ต์ ", open=False): | |
| seed_num = gr.Number(value=0, label="Random seed (0 for random)") | |
| temp = gr.Slider(0.05, 5, step=.05, label="Temperature", value=.8) | |
| run_btn = gr.Button("์์ฑ ์์ฑ", variant="primary") | |
| with gr.Column(): | |
| audio_output = gr.Audio(label="์์ฑ๋ ์์ฑ") | |
| # ํ ์คํธ ๊ธธ์ด ํ์ | |
| char_count = gr.Textbox( | |
| label="ํ ์คํธ ์ ๋ณด", | |
| value="0 ๋ฌธ์, ์ฝ 0๊ฐ ์ฒญํฌ", | |
| interactive=False | |
| ) | |
| # ํ ์คํธ ์ ๋ ฅ ์ ๋ฌธ์ ์์ ์์ ์ฒญํฌ ์ ์ ๋ฐ์ดํธ | |
| def update_char_count(text, chunk_size, mode): | |
| char_len = len(text) | |
| if mode == "๋จ์ผ ์์ฑ (300์ ์ดํ)": | |
| if char_len > 300: | |
| return f"{char_len} ๋ฌธ์ (โ ๏ธ 300์ ์ด๊ณผ - ์๋ฆด ์ ์์)" | |
| else: | |
| return f"{char_len} ๋ฌธ์" | |
| else: | |
| chunks = split_text_into_chunks(text, max_chars=chunk_size) | |
| chunk_count = len(chunks) | |
| return f"{char_len} ๋ฌธ์, ์ฝ {chunk_count}๊ฐ ์ฒญํฌ๋ก ๋ถํ ๋จ" | |
| text.change( | |
| fn=update_char_count, | |
| inputs=[text, chunk_size, mode], | |
| outputs=[char_count] | |
| ) | |
| chunk_size.change( | |
| fn=update_char_count, | |
| inputs=[text, chunk_size, mode], | |
| outputs=[char_count] | |
| ) | |
| mode.change( | |
| fn=update_char_count, | |
| inputs=[text, chunk_size, mode], | |
| outputs=[char_count] | |
| ) | |
| # ๋ชจ๋์ ๋ฐ๋ผ ๋ค๋ฅธ ํจ์ ํธ์ถ | |
| def process_audio(text, ref_wav, exaggeration, temp, seed_num, cfg_weight, chunk_size, mode): | |
| if mode == "๋จ์ผ ์์ฑ (300์ ์ดํ)": | |
| return generate_single_audio(text, ref_wav, exaggeration, temp, seed_num, cfg_weight) | |
| else: | |
| return generate_tts_audio(text, ref_wav, exaggeration, temp, seed_num, cfg_weight, chunk_size) | |
| run_btn.click( | |
| fn=process_audio, | |
| inputs=[ | |
| text, | |
| ref_wav, | |
| exaggeration, | |
| temp, | |
| seed_num, | |
| cfg_weight, | |
| chunk_size, | |
| mode | |
| ], | |
| outputs=[audio_output], | |
| ) | |
| gr.Markdown( | |
| """ | |
| ### ์ฌ์ฉ ํ: | |
| - **๋จ์ผ ์์ฑ ๋ชจ๋**: 300์ ์ดํ์ ์งง์ ํ ์คํธ์ ์ ํฉํ๋ฉฐ ๋น ๋ฅด๊ฒ ์์ฑ๋ฉ๋๋ค | |
| - **์ฒญํฌ ๋ถํ ๋ชจ๋**: ๊ธด ํ ์คํธ๋ฅผ ์๋์ผ๋ก ์ฌ๋ฌ ๋ถ๋ถ์ผ๋ก ๋๋์ด ์ฒ๋ฆฌํฉ๋๋ค | |
| - ์ฒญํฌ ํฌ๊ธฐ๋ฅผ ์กฐ์ ํ์ฌ ํ์ง๊ณผ ์๋์ ๊ท ํ์ ๋ง์ถ ์ ์์ต๋๋ค | |
| - ๊ฐ ์ฒญํฌ ์ฌ์ด์๋ ์์ฐ์ค๋ฌ์ด ์ ํ์ ์ํด ์งง์ ๋ฌด์์ด ์ถ๊ฐ๋ฉ๋๋ค | |
| - ๋งค์ฐ ๊ธด ํ ์คํธ์ ๊ฒฝ์ฐ ์ฒ๋ฆฌ ์๊ฐ์ด ์ค๋ ๊ฑธ๋ฆด ์ ์์ต๋๋ค | |
| """ | |
| ) | |
| demo.launch() |