Ryanus commited on
Commit
89e85bb
·
verified ·
1 Parent(s): 65eecd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -129
app.py CHANGED
@@ -6,123 +6,55 @@ import tempfile
6
  import datetime
7
  import shutil
8
  import re
9
- import onnxruntime as rt # 引入 onnxruntime
10
-
11
- # --- 打印 Gradio 版本以供診斷 ---
12
- print(f"Gradio version at runtime: {gr.__version__}")
13
- # ---
14
 
15
  # --- 解決 Coqui TTS 授權同意問題 ---
16
  os.environ["COQUI_TOS_AGREED"] = "1"
17
 
18
- # --- 解決 PyTorch 載入 XTTS-v2 模型時的 WeightsUnpickler 錯誤 ---
19
- import torch.serialization
20
- from TTS.tts.configs.xtts_config import XttsConfig
21
- from TTS.tts.models.xtts import XttsAudioConfig
22
- from TTS.config.shared_configs import BaseDatasetConfig
23
- from TTS.tts.models.xtts import XttsArgs
24
-
25
- try:
26
- torch.serialization.add_safe_globals([XttsConfig, XttsAudioConfig, BaseDatasetConfig, XttsArgs])
27
- print("已將 XTTS 相關配置類加入 PyTorch 安全全局變數白名單。")
28
- except Exception as e:
29
- print(f"警告:無法將安全全局變數加入 PyTorch 白名單: {e}")
30
- print("如果遇到模型載入錯誤,請檢查 PyTorch 和 TTS 庫版本。")
31
-
32
  # 檢查是否有 CUDA 可用,否則使用 CPU
33
  device = "cuda" if torch.cuda.is_available() else "cpu"
34
  print(f"使用設備: {device}")
35
 
36
- # 全局變數來儲存 TTS 模型實例。如果載入失敗,它將保持為 None。
37
  tts = None
38
- # 全局變數來儲存模型載入時發生的任何錯誤訊息。
39
  model_load_error = None
40
 
41
- # 全局變數來儲存 ONNX Session
42
- onnx_session = None
43
- onnx_model_path = "xtts_v2_quantized.onnx" # 假設量化後的 ONNX 模型路徑
44
-
45
- # 初始化 TTS 模型或 ONNX Session
46
- try:
47
- print("正在嘗試載入 Coqui TTS XTTS-v2 模型...")
48
- # 這裡可以嘗試載入原始 PyTorch 模型,然後進行 ONNX 轉換和量化
49
- # 或者直接載入預先轉換好的 ONNX 模型
50
-
51
- # 為了簡化,這裡假設我們仍然使用 TTS 庫來載入模型,
52
- # 但如果需要 ONNX 優化,您可能需要手動導出 XTTS-v2 到 ONNX
53
- # 並使用 onnxruntime.InferenceSession 來載入。
54
- # 這部分需要更深入的 XTTS-v2 模型結構知識。
55
-
56
- # 這裡僅為示意,實際的 ONNX 轉換和載入會更複雜
57
- # if os.path.exists(onnx_model_path):
58
- # print(f"正在載入 ONNX 模型: {onnx_model_path}")
59
- # onnx_session = rt.InferenceSession(onnx_model_path, providers=['CPUExecutionProvider'])
60
- # print("ONNX 模型已成功載入。")
61
- # else:
62
- # print("ONNX 模型未找到,將載入 PyTorch 模型。")
63
- tts = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", progress_bar=True).to(device)
64
- print("Coqui TTS XTTS-v2 模型已成功載入。")
65
-
66
- # 如果要進行 ONNX 轉換,可以在這裡添加邏輯
67
- # 例如:
68
- # if device == "cpu" and not os.path.exists(onnx_model_path):
69
- # print("嘗試將 PyTorch 模型轉換為 ONNX...")
70
- # # 這部分需要 XTTS-v2 模型的具體輸入格式來進行 torch.onnx.export
71
- # # 並且可能需要量化
72
- # # dummy_input = ... # 根據 XTTS-v2 的 forward 函數定義 dummy input
73
- # # torch.onnx.export(tts.model, dummy_input, onnx_model_path, opset_version=15)
74
- # # onnx_session = rt.InferenceSession(onnx_model_path, providers=['CPUExecutionProvider'])
75
- # # print("模型已轉換並載入為 ONNX。")
76
-
77
- except Exception as e:
78
- model_load_error = (
79
- f"載入 Coqui TTS XTTS-v2 模型時發生錯誤: {e}。\n"
80
- "請確保你的網路連接正常,並且模型名稱正確。此外,請檢查 Hugging Face Space 的日誌以獲取更多詳細資訊。"
81
- )
82
- print(model_load_error)
83
-
84
- # XTTS-v2 支援的語言列表
85
  SUPPORTED_LANGUAGES = [
86
  "en", "zh-cn", "es", "fr", "de", "it", "pt", "pl", "ru", "ja", "ko", "ar", "hi", "tr",
87
  "nl", "sv", "da", "fi", "no", "cs", "hu", "el", "uk", "vi", "th", "id", "ms", "ro",
88
  "sk", "hr", "bg", "ca", "fa", "he", "ur", "bn", "gu", "kn", "ml", "mr", "pa", "ta", "te",
89
  ]
90
 
91
- # --- 預設語音參考檔案路徑 ---
92
  DEFAULT_SPEAKER_WAV = "speaker.wav"
93
-
94
- # --- 自動儲存設定 ---
95
  SAVE_GENERATED_AUDIO_DIR = "generated_audio"
96
  SAVE_UPLOADED_REFERENCES_DIR = "uploaded_references"
97
 
98
  os.makedirs(SAVE_GENERATED_AUDIO_DIR, exist_ok=True)
99
  os.makedirs(SAVE_UPLOADED_REFERENCES_DIR, exist_ok=True)
100
- # --- 結束自動儲存設定 ---
101
 
102
  def sanitize_filename(text: str, max_len: int = 50) -> str:
103
- """
104
- 淨化字串以用於檔案名稱。
105
- 移除除字母、數字、空格和連字號以外的所有字元,
106
- 將空格替換為底線,並截斷至指定長度。
107
- """
108
  safe_text = re.sub(r'[^\w\s-]', '', text).strip()
109
  safe_text = re.sub(r'\s+', '_', safe_text)
110
  if len(safe_text) > max_len:
111
  safe_text = safe_text[:max_len]
112
  return safe_text
113
 
114
- def generate_speech(text: str, language: str, uploaded_speaker_audio_path: str):
115
- """
116
- 根據輸入文字、語言和語音參考檔案生成語音。
117
- 如果用戶上傳了檔案,則使用上傳的檔案;否則使用預設的 speaker.wav。
118
- 生成的語音和上傳的參考語音(如果有的話)都將自動儲存到指定資料夾。
119
- """
 
 
120
  if model_load_error:
121
  return None, f"應用程式啟動錯誤:{model_load_error}"
122
 
123
- if tts is None and onnx_session is None:
124
- return None, "TTS 模型未成功載入,無法生成語音。請檢查日誌獲取詳細資訊。"
 
125
 
 
126
  if not text:
127
  return None, "請輸入一些文字!"
128
  if not language:
@@ -131,7 +63,7 @@ def generate_speech(text: str, language: str, uploaded_speaker_audio_path: str):
131
  speaker_wav_to_use = None
132
  status_message = ""
133
 
134
- # --- 決定使用哪個語音參考檔案 ---
135
  if uploaded_speaker_audio_path:
136
  speaker_wav_to_use = uploaded_speaker_audio_path
137
  try:
@@ -140,87 +72,55 @@ def generate_speech(text: str, language: str, uploaded_speaker_audio_path: str):
140
  saved_ref_file_name = f"{timestamp_ref}_uploaded_ref{original_ext}"
141
  saved_ref_file_path = os.path.join(SAVE_UPLOADED_REFERENCES_DIR, saved_ref_file_name)
142
  shutil.copy(uploaded_speaker_audio_path, saved_ref_file_path)
143
- print(f"上傳的參考語音已儲存到:{saved_ref_file_path}")
144
  status_message += f"參考語音已儲存到:{saved_ref_file_path}\n"
145
  except Exception as e:
146
- print(f"儲存上傳的參考語音時發生錯誤: {e}")
147
  status_message += f"警告:儲存參考語音失敗: {e}\n"
148
-
149
- print(f"使用上傳的語音參考檔案: {speaker_wav_to_use}")
150
  else:
151
  speaker_wav_to_use = DEFAULT_SPEAKER_WAV
152
  if not os.path.exists(speaker_wav_to_use):
153
  return None, f"錯誤:預設語音參考檔案 ({DEFAULT_SPEAKER_WAV}) 未找到。請上傳一個檔案或確保預設檔案存在。"
154
- print(f"沒有上傳語音參考檔案,將使用預設檔案: {speaker_wav_to_use}")
155
- # --- 結束決定 ---
156
 
157
  output_file = None
158
  try:
159
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
160
  output_file = fp.name
161
 
162
- print(f"正在為語言 '{language}' 生成語音,使用語音參考檔案: {speaker_wav_to_use}...")
163
-
164
- # 如果 ONNX Session 存在,嘗試使用 ONNX 進行推理
165
- # 這部分需要將 XTTS-v2 的輸入轉換為 ONNX 模型所需的格式
166
- # 並將輸出轉換回音訊格式。這會非常複雜,因為 TTS 庫封裝了許多細節。
167
- # if onnx_session:
168
- # # 這裡需要 XTTS-v2 ONNX 模型的具體輸入/輸出格式
169
- # # 例如:
170
- # # inputs = {
171
- # # onnx_session.get_inputs()[0].name: processed_text_input,
172
- # # onnx_session.get_inputs()[1].name: processed_speaker_input,
173
- # # ...
174
- # # }
175
- # # outputs = onnx_session.run(None, inputs)
176
- # # generated_audio_data = outputs[0]
177
- # # import soundfile as sf
178
- # # sf.write(output_file, generated_audio_data, 24000) # 假設採樣率為 24000
179
- # pass # 暫時不實作 ONNX 推理,因為太複雜
180
- # else:
181
  tts.tts_to_file(text=text, language=language, speaker_wav=speaker_wav_to_use, file_path=output_file)
182
- print(f"語音已生成到臨時檔案:{output_file}")
183
 
184
- # --- 自動儲存生成的語音檔案 ---
185
  timestamp_gen = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
186
  sanitized_text = sanitize_filename(text)
187
-
188
  saved_file_name = f"{timestamp_gen}_{language}_{sanitized_text}.wav"
189
  saved_file_path = os.path.join(SAVE_GENERATED_AUDIO_DIR, saved_file_name)
190
-
191
  shutil.copy(output_file, saved_file_path)
192
- print(f"生成的語音已自動儲存到:{saved_file_path}")
193
  status_message += f"語音生成成功!已儲存為:{saved_file_path}"
194
- # --- 結束自動儲存 ---
195
 
 
196
  return output_file, status_message
197
  except Exception as e:
198
- print(f"生成語音時發生錯誤: {e}")
199
  if output_file and os.path.exists(output_file):
200
  os.remove(output_file)
201
  return None, f"生成語音失敗: {e}"
202
 
203
- def list_saved_audio_files() -> list:
204
- """掃描儲存生成的語音資料夾,返回所有 .wav 檔案的完整路徑列表。"""
205
  audio_files = []
206
- if os.path.exists(SAVE_GENERATED_AUDIO_DIR) and os.path.isdir(SAVE_GENERATED_AUDIO_DIR):
207
  for filename in os.listdir(SAVE_GENERATED_AUDIO_DIR):
208
  if filename.lower().endswith(".wav"):
209
  audio_files.append(os.path.join(SAVE_GENERATED_AUDIO_DIR, filename))
210
  audio_files.sort(key=os.path.getmtime, reverse=True)
211
  return audio_files
212
 
213
- def list_uploaded_reference_files() -> list:
214
- """掃描上傳參考語音資料夾,返回所有 .wav 檔案的完整路徑列表。"""
215
  ref_files = []
216
- if os.path.exists(SAVE_UPLOADED_REFERENCES_DIR) and os.path.isdir(SAVE_UPLOADED_REFERENCES_DIR):
217
  for filename in os.listdir(SAVE_UPLOADED_REFERENCES_DIR):
218
  if filename.lower().endswith(".wav"):
219
  ref_files.append(os.path.join(SAVE_UPLOADED_REFERENCES_DIR, filename))
220
  ref_files.sort(key=os.path.getmtime, reverse=True)
221
  return ref_files
222
 
223
- # Gradio 介面配置 (使用 gr.Blocks 實現多 Tab 介面)
224
  with gr.Blocks(title="Coqui TTS XTTS-v2 語音生成") as demo:
225
  gr.Markdown("# Coqui TTS XTTS-v2 語音生成 (CPU)")
226
  gr.Markdown("此演示使用 CPU 運行,請注意 XTTS-v2 在 CPU 上運行會非常慢。您可以上傳自己的語音,或使用預設語音。**生成的語音和上傳的參考語音都將自動儲存到 Space 專案中。**")
@@ -240,7 +140,7 @@ with gr.Blocks(title="Coqui TTS XTTS-v2 語音生成") as demo:
240
  with gr.Column():
241
  output_audio = gr.Audio(label="生成的語音", type="filepath")
242
  status_textbox = gr.Textbox(label="狀態")
243
-
244
  generate_button.click(
245
  fn=generate_speech,
246
  inputs=[text_input, language_dropdown, speaker_audio_upload],
@@ -249,31 +149,26 @@ with gr.Blocks(title="Coqui TTS XTTS-v2 語音生成") as demo:
249
 
250
  with gr.Tab("查看已儲存語音"):
251
  gr.Markdown("### 已儲存的生成語音檔案")
252
- gr.Markdown("這些是您生成的語音檔案。")
253
-
254
  saved_generated_files_output = gr.File(
255
  label="生成的語音檔案",
256
  file_count="multiple",
257
  interactive=False
258
  )
259
  refresh_generated_button = gr.Button("刷新生成語音列表")
260
-
261
  demo.load(list_saved_audio_files, outputs=[saved_generated_files_output])
262
  refresh_generated_button.click(list_saved_audio_files, outputs=[saved_generated_files_output])
263
 
264
  with gr.Tab("查看已上傳參考語音"):
265
  gr.Markdown("### 已儲存的上傳參考語音檔案")
266
- gr.Markdown("這些是您上傳的語音參考檔案。")
267
-
268
  saved_uploaded_ref_files_output = gr.File(
269
  label="上傳的參考語音檔案",
270
  file_count="multiple",
271
  interactive=False
272
  )
273
  refresh_uploaded_ref_button = gr.Button("刷新參考語音列表")
274
-
275
  demo.load(list_uploaded_reference_files, outputs=[saved_uploaded_ref_files_output])
276
  refresh_uploaded_ref_button.click(list_uploaded_reference_files, outputs=[saved_uploaded_ref_files_output])
277
 
 
278
  if __name__ == "__main__":
279
  demo.launch()
 
6
  import datetime
7
  import shutil
8
  import re
 
 
 
 
 
9
 
10
  # --- 解決 Coqui TTS 授權同意問題 ---
11
  os.environ["COQUI_TOS_AGREED"] = "1"
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # 檢查是否有 CUDA 可用,否則使用 CPU
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
  print(f"使用設備: {device}")
16
 
17
+ # 全局變數來儲存 TTS 模型實例
18
  tts = None
 
19
  model_load_error = None
20
 
21
+ # XTTS-v2 支援的語言列表(可依需求擴充)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  SUPPORTED_LANGUAGES = [
23
  "en", "zh-cn", "es", "fr", "de", "it", "pt", "pl", "ru", "ja", "ko", "ar", "hi", "tr",
24
  "nl", "sv", "da", "fi", "no", "cs", "hu", "el", "uk", "vi", "th", "id", "ms", "ro",
25
  "sk", "hr", "bg", "ca", "fa", "he", "ur", "bn", "gu", "kn", "ml", "mr", "pa", "ta", "te",
26
  ]
27
 
 
28
  DEFAULT_SPEAKER_WAV = "speaker.wav"
 
 
29
  SAVE_GENERATED_AUDIO_DIR = "generated_audio"
30
  SAVE_UPLOADED_REFERENCES_DIR = "uploaded_references"
31
 
32
  os.makedirs(SAVE_GENERATED_AUDIO_DIR, exist_ok=True)
33
  os.makedirs(SAVE_UPLOADED_REFERENCES_DIR, exist_ok=True)
 
34
 
35
  def sanitize_filename(text: str, max_len: int = 50) -> str:
 
 
 
 
 
36
  safe_text = re.sub(r'[^\w\s-]', '', text).strip()
37
  safe_text = re.sub(r'\s+', '_', safe_text)
38
  if len(safe_text) > max_len:
39
  safe_text = safe_text[:max_len]
40
  return safe_text
41
 
42
+ # 載入模型(建議只載入一次)
43
+ try:
44
+ tts = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", progress_bar=True).to(device)
45
+ print("Coqui TTS XTTS-v2 模型已成功載入。")
46
+ except Exception as e:
47
+ model_load_error = f"載入 Coqui TTS XTTS-v2 模型時發生錯誤: {e}"
48
+
49
+ def generate_speech(text, language, uploaded_speaker_audio_path, progress=gr.Progress()):
50
  if model_load_error:
51
  return None, f"應用程式啟動錯誤:{model_load_error}"
52
 
53
+ progress(0.05, desc="檢查模型狀態")
54
+ if tts is None:
55
+ return None, "TTS 模型未成功載入,無法生成語音。"
56
 
57
+ progress(0.1, desc="檢查輸入")
58
  if not text:
59
  return None, "請輸入一些文字!"
60
  if not language:
 
63
  speaker_wav_to_use = None
64
  status_message = ""
65
 
66
+ progress(0.2, desc="處理語音參考檔案")
67
  if uploaded_speaker_audio_path:
68
  speaker_wav_to_use = uploaded_speaker_audio_path
69
  try:
 
72
  saved_ref_file_name = f"{timestamp_ref}_uploaded_ref{original_ext}"
73
  saved_ref_file_path = os.path.join(SAVE_UPLOADED_REFERENCES_DIR, saved_ref_file_name)
74
  shutil.copy(uploaded_speaker_audio_path, saved_ref_file_path)
 
75
  status_message += f"參考語音已儲存到:{saved_ref_file_path}\n"
76
  except Exception as e:
 
77
  status_message += f"警告:儲存參考語音失敗: {e}\n"
 
 
78
  else:
79
  speaker_wav_to_use = DEFAULT_SPEAKER_WAV
80
  if not os.path.exists(speaker_wav_to_use):
81
  return None, f"錯誤:預設語音參考檔案 ({DEFAULT_SPEAKER_WAV}) 未找到。請上傳一個檔案或確保預設檔案存在。"
 
 
82
 
83
  output_file = None
84
  try:
85
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
86
  output_file = fp.name
87
 
88
+ progress(0.5, desc="生成語音")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  tts.tts_to_file(text=text, language=language, speaker_wav=speaker_wav_to_use, file_path=output_file)
 
90
 
91
+ progress(0.8, desc="儲存語音檔案")
92
  timestamp_gen = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
93
  sanitized_text = sanitize_filename(text)
 
94
  saved_file_name = f"{timestamp_gen}_{language}_{sanitized_text}.wav"
95
  saved_file_path = os.path.join(SAVE_GENERATED_AUDIO_DIR, saved_file_name)
 
96
  shutil.copy(output_file, saved_file_path)
 
97
  status_message += f"語音生成成功!已儲存為:{saved_file_path}"
 
98
 
99
+ progress(1.0, desc="完成")
100
  return output_file, status_message
101
  except Exception as e:
 
102
  if output_file and os.path.exists(output_file):
103
  os.remove(output_file)
104
  return None, f"生成語音失敗: {e}"
105
 
106
+ def list_saved_audio_files():
 
107
  audio_files = []
108
+ if os.path.exists(SAVE_GENERATED_AUDIO_DIR):
109
  for filename in os.listdir(SAVE_GENERATED_AUDIO_DIR):
110
  if filename.lower().endswith(".wav"):
111
  audio_files.append(os.path.join(SAVE_GENERATED_AUDIO_DIR, filename))
112
  audio_files.sort(key=os.path.getmtime, reverse=True)
113
  return audio_files
114
 
115
+ def list_uploaded_reference_files():
 
116
  ref_files = []
117
+ if os.path.exists(SAVE_UPLOADED_REFERENCES_DIR):
118
  for filename in os.listdir(SAVE_UPLOADED_REFERENCES_DIR):
119
  if filename.lower().endswith(".wav"):
120
  ref_files.append(os.path.join(SAVE_UPLOADED_REFERENCES_DIR, filename))
121
  ref_files.sort(key=os.path.getmtime, reverse=True)
122
  return ref_files
123
 
 
124
  with gr.Blocks(title="Coqui TTS XTTS-v2 語音生成") as demo:
125
  gr.Markdown("# Coqui TTS XTTS-v2 語音生成 (CPU)")
126
  gr.Markdown("此演示使用 CPU 運行,請注意 XTTS-v2 在 CPU 上運行會非常慢。您可以上傳自己的語音,或使用預設語音。**生成的語音和上傳的參考語音都將自動儲存到 Space 專案中。**")
 
140
  with gr.Column():
141
  output_audio = gr.Audio(label="生成的語音", type="filepath")
142
  status_textbox = gr.Textbox(label="狀態")
143
+
144
  generate_button.click(
145
  fn=generate_speech,
146
  inputs=[text_input, language_dropdown, speaker_audio_upload],
 
149
 
150
  with gr.Tab("查看已儲存語音"):
151
  gr.Markdown("### 已儲存的生成語音檔案")
 
 
152
  saved_generated_files_output = gr.File(
153
  label="生成的語音檔案",
154
  file_count="multiple",
155
  interactive=False
156
  )
157
  refresh_generated_button = gr.Button("刷新生成語音列表")
 
158
  demo.load(list_saved_audio_files, outputs=[saved_generated_files_output])
159
  refresh_generated_button.click(list_saved_audio_files, outputs=[saved_generated_files_output])
160
 
161
  with gr.Tab("查看已上傳參考語音"):
162
  gr.Markdown("### 已儲存的上傳參考語音檔案")
 
 
163
  saved_uploaded_ref_files_output = gr.File(
164
  label="上傳的參考語音檔案",
165
  file_count="multiple",
166
  interactive=False
167
  )
168
  refresh_uploaded_ref_button = gr.Button("刷新參考語音列表")
 
169
  demo.load(list_uploaded_reference_files, outputs=[saved_uploaded_ref_files_output])
170
  refresh_uploaded_ref_button.click(list_uploaded_reference_files, outputs=[saved_uploaded_ref_files_output])
171
 
172
+ demo.queue() # 啟用進度條功能
173
  if __name__ == "__main__":
174
  demo.launch()