TGPro1 commited on
Commit
de5f08d
Β·
verified Β·
1 Parent(s): 183c33d

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +55 -118
app.py CHANGED
@@ -60,8 +60,8 @@ if not hasattr(torchaudio, "info"):
60
 
61
  from df.enhance import enhance, init_df, load_audio, save_audio
62
 
63
- # FORCE BUILD TRIGGER: 10:10:00 Jan 21 2026
64
- # v86: Pre-load to CPU RAM + Fast Transfer to GPU (Prevents ZeroGPU timeouts)
65
 
66
  # πŸ› οΈ Monkeypatch torchaudio.load
67
  try:
@@ -89,77 +89,67 @@ os.environ["COQUI_TOS_AGREED"] = "1"
89
  # Global models (Resident in RAM)
90
  MODELS = {"stt": None, "translate": None, "tts": None, "denoiser": None}
91
 
92
- def load_models():
93
- """Fast GPU Activation: Moves pre-loaded CPU models to GPU (v86)"""
94
  global MODELS
95
 
96
- # 1. Faster-Whisper (Must re-init for device change, but disk cache is hot)
97
- if MODELS["stt"] is None:
98
- print("πŸŽ™οΈ Initializing Faster-Whisper...")
99
- from faster_whisper import WhisperModel
100
- dev = "cuda" if torch.cuda.is_available() else "cpu"
101
- ct = "float16" if dev == "cuda" else "int8"
102
- MODELS["stt"] = WhisperModel("large-v3", device=dev, compute_type=ct)
103
-
104
- # 2. XTTS-v2 (Efficient .to("cuda") transfer)
105
- if MODELS["tts"] is None:
106
- print("πŸ”Š Loading XTTS-v2 into Engine (CPU Base)...")
107
- from TTS.api import TTS
108
- MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
109
-
110
- if torch.cuda.is_available():
111
- # Move XTTS to GPU if it's currently on CPU
112
- # We check the device of the underlying model to avoid redundant moves
 
 
 
 
113
  try:
114
  current_dev = str(next(MODELS["tts"].synthesizer.tts_model.parameters()).device)
115
  if "cuda" not in current_dev:
116
- print("πŸš€ Moving XTTS-v2 to GPU...")
117
  MODELS["tts"].to("cuda")
118
- except:
119
- # Fallback if structure differs
120
- MODELS["tts"].to("cuda")
121
-
122
- # 3. DeepFilterNet
123
- if MODELS["denoiser"] is None:
124
- try: MODELS["denoiser"] = init_df()
125
- except: pass
126
-
127
- # 4. Chatterbox ONNX
128
- chatterbox_utils.load_chatterbox(device="cuda" if torch.cuda.is_available() else "cpu")
129
 
130
- # 5. Translate
131
- if MODELS["translate"] is None:
132
- MODELS["translate"] = "active"
 
 
 
 
133
 
134
- # 🧹 Proactive Memory Cleanup
135
  gc.collect()
136
  if torch.cuda.is_available():
137
  torch.cuda.empty_cache()
138
 
139
  def warmup_models():
140
- """PRE-LOAD EVERYTHING INTO SYSTEM RAM (CPU)"""
141
- print("\nπŸ”₯ --- SYSTEM WARMUP: RESIDENT RAM LOADING (v86) ---")
142
  start = time.time()
143
  try:
144
- # Load Whisper into RAM
145
- print("πŸ“₯ Pre-loading Whisper to RAM...")
146
  from faster_whisper import WhisperModel
 
147
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8")
148
 
149
- # Load XTTS into RAM (The heaviest part)
150
- print("πŸ“₯ Pre-loading XTTS-v2 to RAM...")
151
  from TTS.api import TTS
 
152
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
153
 
154
- # Load Denoiser
155
- print("πŸ“₯ Pre-loading Denoiser...")
156
- try: MODELS["denoiser"] = init_df()
157
- except: pass
158
-
159
- # Pre-download ONNX weights
160
  chatterbox_utils.warmup_chatterbox()
161
-
162
- print(f"βœ… --- WARMUP COMPLETE: All models resident in RAM ({time.time()-start:.2f}s) --- \n")
163
  except Exception as e:
164
  print(f"⚠️ Warmup warning: {e}")
165
 
@@ -167,12 +157,10 @@ def _stt_logic(request_dict):
167
  audio_bytes = base64.b64decode(request_dict.get("file"))
168
  lang = request_dict.get("lang")
169
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
170
- f.write(audio_bytes)
171
- temp_path = f.name
172
  try:
173
  segments, _ = MODELS["stt"].transcribe(temp_path, language=lang, beam_size=1)
174
- text = " ".join([s.text for s in segments]).strip()
175
- return {"text": text}
176
  finally:
177
  if os.path.exists(temp_path): os.unlink(temp_path)
178
 
@@ -182,28 +170,22 @@ def _translate_logic(text, target_lang):
182
 
183
  def _tts_logic(text, lang, speaker_wav_b64):
184
  if not text or not text.strip(): return {"error": "Input empty"}
185
-
186
  XTTS_MAP = {
187
  "en": "en", "de": "de", "fr": "fr", "es": "es", "it": "it", "pl": "pl",
188
  "pt": "pt", "tr": "tr", "ru": "ru", "nl": "nl", "cs": "cs", "ar": "ar",
189
  "hu": "hu", "ko": "ko", "hi": "hi", "zh": "zh-cn"
190
  }
191
-
192
  clean_lang = lang.strip().lower().split('-')[0]
193
- mapped_lang = XTTS_MAP.get(clean_lang)
194
- if clean_lang == "zh": mapped_lang = "zh-cn"
195
-
196
- print(f"[v86] TTS: {lang} -> {mapped_lang}")
197
 
198
  if mapped_lang:
199
- print(f"[v86] GPU Mode: XTTS-v2")
200
  speaker_wav_path = None
201
  if speaker_wav_b64:
202
  sb = base64.b64decode(speaker_wav_b64)
203
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
204
  f.write(sb); speaker_wav_path = f.name
205
  else: speaker_wav_path = "default_speaker.wav"
206
-
207
  try:
208
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as output_file:
209
  output_path = output_file.name
@@ -214,27 +196,25 @@ def _tts_logic(text, lang, speaker_wav_b64):
214
  if speaker_wav_path and "default_speaker" not in speaker_wav_path:
215
  if os.path.exists(speaker_wav_path): os.unlink(speaker_wav_path)
216
  if 'output_path' in locals() and os.path.exists(output_path): os.unlink(output_path)
217
-
218
- # Fallback to Chatterbox
219
- print(f"[v86] Fallback Mode: Chatterbox ONNX")
220
  try:
221
  temp_ref = None
222
  if speaker_wav_b64:
223
- sb = base64.b64decode(speaker_wav_b64)
224
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
225
- f.write(sb); temp_ref = f.name
226
  audio_bytes = chatterbox_utils.run_chatterbox_inference(text, clean_lang, speaker_wav_path=temp_ref)
227
  if temp_ref and os.path.exists(temp_ref): os.unlink(temp_ref)
228
  return {"audio": base64.b64encode(audio_bytes).decode()}
229
- except Exception as e:
230
- return {"error": f"TTS Failure: {str(e)}"}
231
 
232
  @spaces.GPU
233
  def core_process(request_dict):
234
  action = request_dict.get("action")
235
  t0 = time.time()
236
- print(f"--- [v86] πŸš€ GPU SESSION START: {action} ---")
237
- load_models()
238
  try:
239
  if action == "stt": res = _stt_logic(request_dict)
240
  elif action == "translate": res = {"translated": _translate_logic(request_dict.get("text"), request_dict.get("target_lang", "en"))}
@@ -242,48 +222,18 @@ def core_process(request_dict):
242
  elif action == "s2st":
243
  stt_res = _stt_logic({"file": request_dict.get("file"), "lang": request_dict.get("source_lang")})
244
  text = stt_res.get("text", "")
245
- if not text: return {"error": "No speech detected"}
246
  translated = _translate_logic(text, request_dict.get("target_lang"))
247
  tts_res = _tts_logic(translated, request_dict.get("target_lang"), request_dict.get("speaker_wav"))
248
  res = {"text": text, "translated": translated, "audio": tts_res.get("audio")}
249
  elif action == "health": res = {"status": "awake"}
250
  else: res = {"error": f"Unknown action: {action}"}
251
  finally:
252
- print(f"--- [v86] ✨ SESSION END: {action} ({time.time()-t0:.2f}s) ---")
253
  gc.collect()
254
  if torch.cuda.is_available(): torch.cuda.empty_cache()
255
  return res
256
 
257
- def create_wav_header(sample_rate=24000, channels=1, bit_depth=16):
258
- header = bytearray(b'RIFF')
259
- header.extend((1000000000).to_bytes(4, 'little'))
260
- header.extend(b'WAVEfmt ')
261
- header.extend((16).to_bytes(4, 'little'))
262
- header.extend((1).to_bytes(2, 'little'))
263
- header.extend((channels).to_bytes(2, 'little'))
264
- header.extend((sample_rate).to_bytes(4, 'little'))
265
- header.extend((sample_rate * channels * (bit_depth // 8)).to_bytes(4, 'little'))
266
- header.extend((channels * (bit_depth // 8)).to_bytes(2, 'little'))
267
- header.extend((bit_depth).to_bytes(2, 'little'))
268
- header.extend(b'data')
269
- header.extend((0xFFFFFFFF).to_bytes(4, 'little'))
270
- return bytes(header)
271
-
272
- @spaces.GPU
273
- def gpu_tts_generator(text, lang, speaker_wav_path):
274
- load_models()
275
- try:
276
- yield bytes(create_wav_header(sample_rate=24000))
277
- for chunk in MODELS["tts"].synthesizer.tts_model.inference_stream(
278
- text, lang, *MODELS["tts"].synthesizer.tts_model.get_conditioning_latents(audio_path=[speaker_wav_path]),
279
- stream_chunk_size=20
280
- ):
281
- yield bytes((chunk * 32767).to(torch.int16).cpu().numpy().tobytes())
282
- finally:
283
- if speaker_wav_path and "default_speaker" not in speaker_wav_path and os.path.exists(speaker_wav_path): os.unlink(speaker_wav_path)
284
- gc.collect()
285
- if torch.cuda.is_available(): torch.cuda.empty_cache()
286
-
287
  app = FastAPI()
288
 
289
  @app.post("/api/v1/process")
@@ -295,19 +245,6 @@ async def api_process(request: Request):
295
  traceback.print_exc()
296
  return {"error": str(e)}
297
 
298
- @app.post("/api/v1/tts_stream")
299
- async def api_tts_stream(request: Request):
300
- try:
301
- data = await request.json()
302
- speaker_wav_b64 = data.get("speaker_wav")
303
- if speaker_wav_b64:
304
- sb = base64.b64decode(speaker_wav_b64)
305
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
306
- f.write(sb); speaker_wav_path = f.name
307
- else: speaker_wav_path = "default_speaker.wav"
308
- return StreamingResponse(gpu_tts_generator(data.get("text"), data.get("lang"), speaker_wav_path), media_type="audio/wav")
309
- except Exception as e: return {"error": str(e)}
310
-
311
  @app.get("/health")
312
  def health(): return {"status": "ok", "gpu": torch.cuda.is_available(), "time": time.ctime()}
313
 
 
60
 
61
  from df.enhance import enhance, init_df, load_audio, save_audio
62
 
63
+ # FORCE BUILD TRIGGER: 10:20:00 Jan 21 2026
64
+ # v87: Targeted GPU Activation (Only loads what's needed for the specific action)
65
 
66
  # πŸ› οΈ Monkeypatch torchaudio.load
67
  try:
 
89
  # Global models (Resident in RAM)
90
  MODELS = {"stt": None, "translate": None, "tts": None, "denoiser": None}
91
 
92
+ def activate_gpu_models(action):
93
+ """v87: Targetted activation of models on GPU to save time"""
94
  global MODELS
95
 
96
+ # 1. Faster-Whisper (Activate only if action needs it)
97
+ if action in ["stt", "s2st"]:
98
+ is_cuda = False
99
+ try:
100
+ # Check current device
101
+ if hasattr(MODELS["stt"], "model") and MODELS["stt"].model.device == "cuda":
102
+ is_cuda = True
103
+ except: pass
104
+
105
+ if not is_cuda:
106
+ print(f"πŸŽ™οΈ Activating Whisper on GPU for {action}...")
107
+ from faster_whisper import WhisperModel
108
+ MODELS["stt"] = WhisperModel("large-v3", device="cuda", compute_type="float16")
109
+
110
+ # 2. XTTS-v2 (Activate only if action needs it)
111
+ if action in ["tts", "s2st"]:
112
+ if MODELS["tts"] is None:
113
+ from TTS.api import TTS
114
+ print("πŸ”Š Initializing XTTS to RAM...")
115
+ MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
116
+
117
  try:
118
  current_dev = str(next(MODELS["tts"].synthesizer.tts_model.parameters()).device)
119
  if "cuda" not in current_dev:
120
+ print(f"πŸš€ Activating XTTS-v2 on GPU for {action}...")
121
  MODELS["tts"].to("cuda")
122
+ except:
123
+ MODELS["tts"].to("cuda")
 
 
 
 
 
 
 
 
 
124
 
125
+ # 3. Denoiser & Translate & Chatterbox
126
+ if action in ["tts", "s2st", "stt"]:
127
+ if MODELS["denoiser"] is None:
128
+ try: MODELS["denoiser"] = init_df()
129
+ except: pass
130
+ if MODELS["translate"] is None: MODELS["translate"] = "active"
131
+ chatterbox_utils.load_chatterbox(device="cuda" if torch.cuda.is_available() else "cpu")
132
 
133
+ # 🧹 Cleanup
134
  gc.collect()
135
  if torch.cuda.is_available():
136
  torch.cuda.empty_cache()
137
 
138
  def warmup_models():
139
+ """Download models at startup to System RAM"""
140
+ print("\nπŸ”₯ --- SYSTEM WARMUP: RAM CACHING (v87) ---")
141
  start = time.time()
142
  try:
 
 
143
  from faster_whisper import WhisperModel
144
+ print("πŸ“₯ Caching Whisper to RAM...")
145
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8")
146
 
 
 
147
  from TTS.api import TTS
148
+ print("πŸ“₯ Caching XTTS-v2 to RAM...")
149
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
150
 
 
 
 
 
 
 
151
  chatterbox_utils.warmup_chatterbox()
152
+ print(f"βœ… --- WARMUP COMPLETE ({time.time()-start:.2f}s) --- \n")
 
153
  except Exception as e:
154
  print(f"⚠️ Warmup warning: {e}")
155
 
 
157
  audio_bytes = base64.b64decode(request_dict.get("file"))
158
  lang = request_dict.get("lang")
159
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
160
+ f.write(audio_bytes); temp_path = f.name
 
161
  try:
162
  segments, _ = MODELS["stt"].transcribe(temp_path, language=lang, beam_size=1)
163
+ return {"text": " ".join([s.text for s in segments]).strip()}
 
164
  finally:
165
  if os.path.exists(temp_path): os.unlink(temp_path)
166
 
 
170
 
171
  def _tts_logic(text, lang, speaker_wav_b64):
172
  if not text or not text.strip(): return {"error": "Input empty"}
 
173
  XTTS_MAP = {
174
  "en": "en", "de": "de", "fr": "fr", "es": "es", "it": "it", "pl": "pl",
175
  "pt": "pt", "tr": "tr", "ru": "ru", "nl": "nl", "cs": "cs", "ar": "ar",
176
  "hu": "hu", "ko": "ko", "hi": "hi", "zh": "zh-cn"
177
  }
 
178
  clean_lang = lang.strip().lower().split('-')[0]
179
+ mapped_lang = XTTS_MAP.get(clean_lang) or ("zh-cn" if clean_lang == "zh" else None)
 
 
 
180
 
181
  if mapped_lang:
182
+ print(f"[v87] Use XTTS: {mapped_lang}")
183
  speaker_wav_path = None
184
  if speaker_wav_b64:
185
  sb = base64.b64decode(speaker_wav_b64)
186
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
187
  f.write(sb); speaker_wav_path = f.name
188
  else: speaker_wav_path = "default_speaker.wav"
 
189
  try:
190
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as output_file:
191
  output_path = output_file.name
 
196
  if speaker_wav_path and "default_speaker" not in speaker_wav_path:
197
  if os.path.exists(speaker_wav_path): os.unlink(speaker_wav_path)
198
  if 'output_path' in locals() and os.path.exists(output_path): os.unlink(output_path)
199
+
200
+ print(f"[v87] Use Chatterbox: {clean_lang}")
 
201
  try:
202
  temp_ref = None
203
  if speaker_wav_b64:
204
+ sb = base64.b64decode(speaker_wav_b64)
205
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
206
+ f.write(sb); temp_ref = f.name
207
  audio_bytes = chatterbox_utils.run_chatterbox_inference(text, clean_lang, speaker_wav_path=temp_ref)
208
  if temp_ref and os.path.exists(temp_ref): os.unlink(temp_ref)
209
  return {"audio": base64.b64encode(audio_bytes).decode()}
210
+ except Exception as e: return {"error": f"TTS Failure: {str(e)}"}
 
211
 
212
  @spaces.GPU
213
  def core_process(request_dict):
214
  action = request_dict.get("action")
215
  t0 = time.time()
216
+ print(f"--- [v87] πŸš€ GPU START: {action} ---")
217
+ activate_gpu_models(action)
218
  try:
219
  if action == "stt": res = _stt_logic(request_dict)
220
  elif action == "translate": res = {"translated": _translate_logic(request_dict.get("text"), request_dict.get("target_lang", "en"))}
 
222
  elif action == "s2st":
223
  stt_res = _stt_logic({"file": request_dict.get("file"), "lang": request_dict.get("source_lang")})
224
  text = stt_res.get("text", "")
225
+ if not text: return {"error": "No speech"}
226
  translated = _translate_logic(text, request_dict.get("target_lang"))
227
  tts_res = _tts_logic(translated, request_dict.get("target_lang"), request_dict.get("speaker_wav"))
228
  res = {"text": text, "translated": translated, "audio": tts_res.get("audio")}
229
  elif action == "health": res = {"status": "awake"}
230
  else: res = {"error": f"Unknown action: {action}"}
231
  finally:
232
+ print(f"--- [v87] ✨ END: {action} ({time.time()-t0:.2f}s) ---")
233
  gc.collect()
234
  if torch.cuda.is_available(): torch.cuda.empty_cache()
235
  return res
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  app = FastAPI()
238
 
239
  @app.post("/api/v1/process")
 
245
  traceback.print_exc()
246
  return {"error": str(e)}
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  @app.get("/health")
249
  def health(): return {"status": "ok", "gpu": torch.cuda.is_available(), "time": time.ctime()}
250