peterlllmm commited on
Commit
fb861cb
·
verified ·
1 Parent(s): fe83d6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -104
app.py CHANGED
@@ -1,10 +1,22 @@
 
 
 
 
 
 
 
1
  import random
2
  import numpy as np
3
  import torch
4
- from chatterbox.src.chatterbox.tts import ChatterboxTTS
5
  import gradio as gr
6
- import spaces
 
 
 
 
7
 
 
8
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
9
  print(f"🚀 Running on device: {DEVICE}")
10
 
@@ -13,21 +25,26 @@ MODEL = None
13
 
14
  def get_or_load_model():
15
  """Loads the ChatterboxTTS model if it hasn't been loaded already,
16
- and ensures it's on the correct device."""
 
17
  global MODEL
18
  if MODEL is None:
19
  print("Model not loaded, initializing...")
20
  try:
 
21
  MODEL = ChatterboxTTS.from_pretrained(DEVICE)
 
22
  if hasattr(MODEL, 'to') and str(MODEL.device) != DEVICE:
23
  MODEL.to(DEVICE)
24
  print(f"Model loaded successfully. Internal device: {getattr(MODEL, 'device', 'N/A')}")
25
  except Exception as e:
26
  print(f"Error loading model: {e}")
 
27
  raise
28
  return MODEL
29
 
30
- # Attempt to load the model at startup.
 
31
  try:
32
  get_or_load_model()
33
  except Exception as e:
@@ -42,37 +59,6 @@ def set_seed(seed: int):
42
  random.seed(seed)
43
  np.random.seed(seed)
44
 
45
- import re
46
-
47
- def split_text_smart(text, max_length=2000):
48
- """Split text into smart chunks without breaking words mid-way (adapted from Kokoro)."""
49
- chunks = []
50
- start = 0
51
- text_length = len(text)
52
-
53
- while start < text_length:
54
- if (text_length - start) <= max_length:
55
- chunks.append(text[start:].strip())
56
- break
57
-
58
- split_pos = start + max_length
59
- split_region = text[start:split_pos]
60
-
61
- # Find whitespace or newlines to split at
62
- matches = list(re.finditer(r'[\s\n]', split_region))
63
- if matches:
64
- split_at = matches[-1].start()
65
- split_point = start + split_at
66
- else:
67
- split_point = split_pos
68
-
69
- chunk = text[start:split_point].strip()
70
- chunks.append(chunk)
71
- start = split_point + 1
72
-
73
- return chunks
74
-
75
- @spaces.GPU
76
  def generate_tts_audio(
77
  text_input: str,
78
  audio_prompt_path_input: str = None,
@@ -80,110 +66,156 @@ def generate_tts_audio(
80
  temperature_input: float = 0.8,
81
  seed_num_input: int = 0,
82
  cfgw_input: float = 0.5
83
- ) -> tuple[int, np.ndarray]:
84
  """
85
  Generate high-quality speech audio from text using ChatterboxTTS model with optional reference audio styling.
86
- Uses smart text chunking to prevent CUDA errors on long texts.
87
-
88
- This tool synthesizes natural-sounding speech from input text. When a reference audio file
89
- is provided, it captures the speaker's voice characteristics and speaking style. The generated audio
90
- maintains the prosody, tone, and vocal qualities of the reference speaker, or uses default voice if no reference is provided.
91
 
92
  Args:
93
- text_input (str): The text to synthesize into speech
94
  audio_prompt_path_input (str, optional): File path or URL to the reference audio file that defines the target voice style. Defaults to None.
95
  exaggeration_input (float, optional): Controls speech expressiveness (0.25-2.0, neutral=0.5, extreme values may be unstable). Defaults to 0.5.
96
  temperature_input (float, optional): Controls randomness in generation (0.05-5.0, higher=more varied). Defaults to 0.8.
97
  seed_num_input (int, optional): Random seed for reproducible results (0 for random generation). Defaults to 0.
98
  cfgw_input (float, optional): CFG/Pace weight controlling generation guidance (0.2-1.0). Defaults to 0.5.
99
-
100
  Returns:
101
- tuple[int, np.ndarray]: A tuple containing the sample rate (int) and the generated audio waveform (numpy.ndarray)
102
  """
103
  current_model = get_or_load_model()
104
-
105
  if current_model is None:
106
- raise RuntimeError("TTS model is not loaded.")
107
 
108
  if seed_num_input != 0:
109
  set_seed(int(seed_num_input))
110
 
111
- print(f"Generating audio for text: '{text_input[:50]}...' (Length: {len(text_input)} chars)")
112
-
113
- # Handle optional audio prompt
114
  generate_kwargs = {
115
  "exaggeration": exaggeration_input,
116
  "temperature": temperature_input,
117
  "cfg_weight": cfgw_input,
118
  }
119
-
 
120
  if audio_prompt_path_input:
121
- generate_kwargs["audio_prompt_path"] = audio_prompt_path_input
122
-
123
- # Force chunking for any text over 1000 chars to prevent CUDA errors
124
- if len(text_input) > 1000:
125
- chunks = split_text_smart(text_input, max_length=1000)
126
- print(f"Text chunked into {len(chunks)} smart segments (forced chunking to prevent CUDA errors)")
127
- else:
128
- chunks = [text_input]
129
- print("Processing as single chunk")
130
-
131
- # Generate audio for each chunk
132
- all_audio_segments = []
133
- sample_rate = None
134
-
135
- for i, chunk in enumerate(chunks):
136
- print(f"Processing chunk {i+1}/{len(chunks)}: {len(chunk)} chars - '{chunk[:30]}...'")
137
-
138
  try:
139
- wav = current_model.generate(
140
- chunk,
141
- **generate_kwargs
142
- )
143
-
144
- if sample_rate is None:
145
- sample_rate = current_model.sr
146
-
147
- all_audio_segments.append(wav.squeeze(0).numpy())
148
-
149
- except RuntimeError as e:
150
- if "CUDA" in str(e) or "device-side assert" in str(e):
151
- print(f"CUDA error on chunk {i+1}, skipping: {e}")
152
- continue
153
- else:
154
- raise e
155
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  if not all_audio_segments:
157
- raise RuntimeError("All chunks failed to generate - text may be too long or contain invalid characters")
158
-
159
- # Concatenate all audio segments (no artificial pauses - let natural speech flow)
160
- if len(all_audio_segments) == 1:
161
- final_audio = all_audio_segments[0]
162
- else:
163
- final_audio = np.concatenate(all_audio_segments)
164
-
165
- print("Audio generation complete.")
166
- return (sample_rate, final_audio)
167
 
 
 
 
 
 
 
 
 
 
 
168
  with gr.Blocks() as demo:
169
  gr.Markdown(
170
  """
171
  # Chatterbox TTS Demo
172
  Generate high-quality speech from text with reference audio styling.
 
173
  """
174
  )
175
  with gr.Row():
176
  with gr.Column():
177
  text = gr.Textbox(
178
- value="Now let's make my mum's favourite. So three mars bars into the pan. Then we add the tuna and just stir for a bit, just let the chocolate and fish infuse. A sprinkle of olive oil and some tomato ketchup. Now smell that. Oh boy this is going to be incredible.",
179
- label="Text to synthesize", # Removed "max chars 300" from label
180
- max_lines=5
 
 
 
 
 
 
 
 
181
  )
 
 
182
  ref_wav = gr.Audio(
183
  sources=["upload", "microphone"],
184
  type="filepath",
185
  label="Reference Audio File (Optional)",
186
- value="https://storage.googleapis.com/chatterbox-demo-samples/prompts/female_shadowheart4.flac"
187
  )
188
  exaggeration = gr.Slider(
189
  0.25, 2, step=.05, label="Exaggeration (Neutral = 0.5, extreme values can be unstable)", value=.5
@@ -191,16 +223,15 @@ with gr.Blocks() as demo:
191
  cfg_weight = gr.Slider(
192
  0.2, 1, step=.05, label="CFG/Pace", value=0.5
193
  )
194
-
195
  with gr.Accordion("More options", open=False):
196
  seed_num = gr.Number(value=0, label="Random seed (0 for random)")
197
  temp = gr.Slider(0.05, 5, step=.05, label="Temperature", value=.8)
198
-
199
  run_btn = gr.Button("Generate", variant="primary")
200
-
201
  with gr.Column():
202
- audio_output = gr.Audio(label="Output Audio")
 
203
 
 
204
  run_btn.click(
205
  fn=generate_tts_audio,
206
  inputs=[
@@ -213,5 +244,8 @@ with gr.Blocks() as demo:
213
  ],
214
  outputs=[audio_output],
215
  )
216
-
217
- demo.launch(share=True, mcp_server=True)
 
 
 
 
1
+
2
+ # Download NLTK data for sentence tokenization
3
+ import nltk
4
+ nltk.download('punkt')
5
+ # Explicitly download 'punkt_tab' as it's often required by sent_tokenize
6
+ nltk.download('punkt_tab')
7
+
8
  import random
9
  import numpy as np
10
  import torch
11
+ from chatterbox.tts import ChatterboxTTS
12
  import gradio as gr
13
+ import io
14
+ import soundfile as sf
15
+ from pydub import AudioSegment
16
+ from nltk.tokenize import sent_tokenize
17
+ import os # Added for temporary file handling
18
 
19
+ # Determine the device to run on (GPU if available, otherwise CPU)
20
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
21
  print(f"🚀 Running on device: {DEVICE}")
22
 
 
25
 
26
  def get_or_load_model():
27
  """Loads the ChatterboxTTS model if it hasn't been loaded already,
28
+ and ensures it's on the correct device. This helps avoid reloading
29
+ the model multiple times which can be slow."""
30
  global MODEL
31
  if MODEL is None:
32
  print("Model not loaded, initializing...")
33
  try:
34
+ # Load the model and move it to the determined device (CPU or CUDA)
35
  MODEL = ChatterboxTTS.from_pretrained(DEVICE)
36
+ # Ensure the model is explicitly on the correct device after loading
37
  if hasattr(MODEL, 'to') and str(MODEL.device) != DEVICE:
38
  MODEL.to(DEVICE)
39
  print(f"Model loaded successfully. Internal device: {getattr(MODEL, 'device', 'N/A')}")
40
  except Exception as e:
41
  print(f"Error loading model: {e}")
42
+ # Re-raise the exception to indicate a critical failure
43
  raise
44
  return MODEL
45
 
46
+ # Attempt to load the model at startup of the script.
47
+ # This ensures the model is ready when the Gradio interface starts.
48
  try:
49
  get_or_load_model()
50
  except Exception as e:
 
59
  random.seed(seed)
60
  np.random.seed(seed)
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def generate_tts_audio(
63
  text_input: str,
64
  audio_prompt_path_input: str = None,
 
66
  temperature_input: float = 0.8,
67
  seed_num_input: int = 0,
68
  cfgw_input: float = 0.5
69
+ ) -> str: # Return type changed to str (filepath)
70
  """
71
  Generate high-quality speech audio from text using ChatterboxTTS model with optional reference audio styling.
72
+ Handles long scripts by chunking text, generating audio for each chunk, and combining them into an MP3.
 
 
 
 
73
 
74
  Args:
75
+ text_input (str): The text to synthesize into speech.
76
  audio_prompt_path_input (str, optional): File path or URL to the reference audio file that defines the target voice style. Defaults to None.
77
  exaggeration_input (float, optional): Controls speech expressiveness (0.25-2.0, neutral=0.5, extreme values may be unstable). Defaults to 0.5.
78
  temperature_input (float, optional): Controls randomness in generation (0.05-5.0, higher=more varied). Defaults to 0.8.
79
  seed_num_input (int, optional): Random seed for reproducible results (0 for random generation). Defaults to 0.
80
  cfgw_input (float, optional): CFG/Pace weight controlling generation guidance (0.2-1.0). Defaults to 0.5.
 
81
  Returns:
82
+ str: Filepath to the generated combined MP3 audio waveform.
83
  """
84
  current_model = get_or_load_model()
 
85
  if current_model is None:
86
+ raise RuntimeError("TTS model is not loaded. Please check the startup logs for errors.")
87
 
88
  if seed_num_input != 0:
89
  set_seed(int(seed_num_input))
90
 
91
+ print(f"Generating audio for text: '{text_input[:100]}...' (first 100 chars)")
92
+ print(f"Audio prompt path received: {audio_prompt_path_input}") # Debug print for the received path
93
+
94
  generate_kwargs = {
95
  "exaggeration": exaggeration_input,
96
  "temperature": temperature_input,
97
  "cfg_weight": cfgw_input,
98
  }
99
+
100
+ processed_audio_prompt_path = None
101
  if audio_prompt_path_input:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  try:
103
+ # Load the input audio using pydub
104
+ audio = AudioSegment.from_file(audio_prompt_path_input)
105
+ # Create a temporary WAV file to ensure compatibility with ChatterboxTTS
106
+ temp_wav_path = "temp_prompt.wav"
107
+ audio.export(temp_wav_path, format="wav")
108
+ processed_audio_prompt_path = temp_wav_path
109
+ print(f"Converted audio prompt to temporary WAV: {processed_audio_prompt_path}")
110
+ except Exception as e:
111
+ print(f"Warning: Could not process audio prompt file '{audio_prompt_path_input}'. Error: {e}")
112
+ print("Proceeding without audio prompt (will use default voice).")
113
+ # If conversion fails, ensure the audio prompt path is not used
114
+ processed_audio_prompt_path = None
115
+
116
+ if processed_audio_prompt_path:
117
+ generate_kwargs["audio_prompt_path"] = processed_audio_prompt_path
118
+
119
+ all_audio_segments = []
120
+ # Split text into sentences for more natural chunking
121
+ sentences = sent_tokenize(text_input)
122
+
123
+ # Chatterbox model has an implicit input limit, typically around 300 characters.
124
+ # We'll chunk sentences to stay within this limit.
125
+ MAX_CHARS_PER_MODEL_INPUT = 300
126
+
127
+ current_chunk_sentences = []
128
+ current_chunk_char_count = 0
129
+
130
+ for sentence in sentences:
131
+ # If adding the current sentence exceeds the max chars, process the current chunk
132
+ # and ensure current_chunk_sentences is not empty to avoid creating empty chunks
133
+ if current_chunk_char_count + len(sentence) + 1 > MAX_CHARS_PER_MODEL_INPUT and current_chunk_sentences: # +1 for space
134
+ chunk_text = " ".join(current_chunk_sentences)
135
+ print(f"Processing chunk (chars: {len(chunk_text)}): '{chunk_text[:50]}...'")
136
+ wav_tensor = current_model.generate(chunk_text, **generate_kwargs)
137
+ wav_numpy = wav_tensor.squeeze(0).cpu().numpy()
138
+
139
+ # Convert numpy array to AudioSegment via an in-memory WAV buffer
140
+ buffer = io.BytesIO()
141
+ sf.write(buffer, wav_numpy, current_model.sr, format='WAV')
142
+ buffer.seek(0) # Rewind the buffer to the beginning
143
+ audio_segment = AudioSegment.from_wav(buffer)
144
+ all_audio_segments.append(audio_segment)
145
+
146
+ # Start a new chunk with the current sentence
147
+ current_chunk_sentences = [sentence]
148
+ current_chunk_char_count = len(sentence)
149
+ else:
150
+ current_chunk_sentences.append(sentence)
151
+ # Add 1 for space between sentences, but only if it's not the very first sentence in a chunk
152
+ current_chunk_char_count += len(sentence) + (1 if current_chunk_sentences else 0)
153
+
154
+ # Process the last remaining chunk
155
+ if current_chunk_sentences:
156
+ chunk_text = " ".join(current_chunk_sentences)
157
+ print(f"Processing final chunk (chars: {len(chunk_text)}): '{chunk_text[:50]}...'")
158
+ wav_tensor = current_model.generate(chunk_text, **generate_kwargs)
159
+ wav_numpy = wav_tensor.squeeze(0).cpu().numpy()
160
+
161
+ buffer = io.BytesIO()
162
+ sf.write(buffer, wav_numpy, current_model.sr, format='WAV')
163
+ buffer.seek(0)
164
+ audio_segment = AudioSegment.from_wav(buffer)
165
+ all_audio_segments.append(audio_segment)
166
+
167
  if not all_audio_segments:
168
+ raise ValueError("No audio segments were generated. Please ensure the input text is not empty or too short.")
169
+
170
+ # Concatenate all audio segments
171
+ combined_audio = all_audio_segments[0]
172
+ for i in range(1, len(all_audio_segments)):
173
+ combined_audio += all_audio_segments[i]
174
+
175
+ # Export to MP3 format
176
+ output_filename = "combined_chatterbox_output.mp3"
177
+ combined_audio.export(output_filename, format="mp3")
178
 
179
+ print(f"Combined audio generated and saved as {output_filename}")
180
+
181
+ # Clean up the temporary WAV file if it was created
182
+ if processed_audio_prompt_path and os.path.exists(processed_audio_prompt_path):
183
+ os.remove(processed_audio_prompt_path)
184
+ print(f"Cleaned up temporary prompt file: {processed_audio_prompt_path}")
185
+
186
+ return output_filename # Return the filepath for Gradio
187
+
188
+ # --- Gradio Interface Definition ---
189
  with gr.Blocks() as demo:
190
  gr.Markdown(
191
  """
192
  # Chatterbox TTS Demo
193
  Generate high-quality speech from text with reference audio styling.
194
+ Now supports longer scripts and MP3 output!
195
  """
196
  )
197
  with gr.Row():
198
  with gr.Column():
199
  text = gr.Textbox(
200
+ value="""
201
+ The quick brown fox jumps over the lazy dog. This is a common pangram used to display all letters of the alphabet.
202
+ Now, let's try a slightly longer passage to test the new chunking functionality.
203
+ This should demonstrate how the system handles multiple sentences and combines them seamlessly.
204
+ We are aiming for a natural flow, even with extended input.
205
+ The sun dipped below the horizon, painting the sky in hues of orange and purple.
206
+ A gentle breeze rustled through the leaves, carrying the scent of night-blooming jasmine.
207
+ Soon, the stars would emerge, tiny pinpricks of light in the vast, dark canvas above.
208
+ """,
209
+ label="Text to synthesize (can be long)",
210
+ lines=10 # Increased lines for longer text input
211
  )
212
+ # Gradio's Audio component handles file uploads directly.
213
+ # The 'value' here is a placeholder for the demo.
214
  ref_wav = gr.Audio(
215
  sources=["upload", "microphone"],
216
  type="filepath",
217
  label="Reference Audio File (Optional)",
218
+ value="https://storage.googleapis.com/chatterbox-demo-samples/prompts/female_shadowheart4.flac" # Default example audio
219
  )
220
  exaggeration = gr.Slider(
221
  0.25, 2, step=.05, label="Exaggeration (Neutral = 0.5, extreme values can be unstable)", value=.5
 
223
  cfg_weight = gr.Slider(
224
  0.2, 1, step=.05, label="CFG/Pace", value=0.5
225
  )
 
226
  with gr.Accordion("More options", open=False):
227
  seed_num = gr.Number(value=0, label="Random seed (0 for random)")
228
  temp = gr.Slider(0.05, 5, step=.05, label="Temperature", value=.8)
 
229
  run_btn = gr.Button("Generate", variant="primary")
 
230
  with gr.Column():
231
+ # Output type is now implicitly a filepath to the MP3
232
+ audio_output = gr.Audio(label="Output Audio (MP3)")
233
 
234
+ # Define the action when the "Generate" button is clicked
235
  run_btn.click(
236
  fn=generate_tts_audio,
237
  inputs=[
 
244
  ],
245
  outputs=[audio_output],
246
  )
247
+
248
+ # Launch the Gradio interface.
249
+ # Use share=True to get a public URL for the app, essential for Colab.
250
+ # debug=True can be useful for seeing more detailed error messages in the Colab output.
251
+ demo.launch(share=True, debug=True)