TGPro1 commited on
Commit
92366fd
Β·
verified Β·
1 Parent(s): 3cdd30e

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +50 -48
app.py CHANGED
@@ -1,6 +1,4 @@
1
  from fastapi import FastAPI, Request, Response
2
- from fastapi.responses import StreamingResponse
3
- from contextlib import asynccontextmanager
4
  import gradio as gr
5
  import uvicorn
6
  import base64
@@ -18,7 +16,7 @@ import logging
18
  from threading import Thread, Lock
19
  from huggingface_hub import snapshot_download
20
 
21
- # πŸ›‘οΈ 1. SILENCE LOGS & WARNINGS (v102: Mission-Critical Silence & Stability)
22
  logging.getLogger("transformers").setLevel(logging.ERROR)
23
  logging.getLogger("TTS").setLevel(logging.ERROR)
24
  logging.getLogger("onnxruntime").setLevel(logging.ERROR)
@@ -81,8 +79,8 @@ except ImportError:
81
  if f is None: return lambda x: x
82
  return f
83
 
84
- # FORCE BUILD TRIGGER: 13:16:00 Jan 21 2026
85
- # v102: Gradio SDK Restoration. Synchronized Port Binding.
86
 
87
  os.environ["COQUI_TOS_AGREED"] = "1"
88
  MODELS = {"stt": None, "translate": None, "tts": None, "denoiser": None}
@@ -91,7 +89,7 @@ WARMUP_STATUS = {"complete": False, "in_progress": False}
91
  WARMUP_LOCK = Lock()
92
 
93
  def activate_gpu_models(action):
94
- """v102: Mission-Critical GPU Mode"""
95
  global MODELS, WARMUP_STATUS
96
  local_only = WARMUP_STATUS["complete"]
97
 
@@ -100,7 +98,7 @@ def activate_gpu_models(action):
100
  try: stt_on_gpu = MODELS["stt"] is not None and MODELS["stt"].model.device == "cuda"
101
  except: pass
102
  if not stt_on_gpu:
103
- print(f"πŸŽ™οΈ [v102] Activating Whisper (GPU)...")
104
  try:
105
  if MODELS["stt"]: del MODELS["stt"]
106
  gc.collect(); torch.cuda.empty_cache()
@@ -112,7 +110,7 @@ def activate_gpu_models(action):
112
  local_files_only=local_only
113
  )
114
  except Exception as e:
115
- print(f"⚠️ GPU Init failed: {e}. Falling back to CPU in-RAM.")
116
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8", local_files_only=True)
117
 
118
  if action in ["tts", "s2st"]:
@@ -122,7 +120,7 @@ def activate_gpu_models(action):
122
  tts_on_gpu = "cuda" in curr
123
  except: pass
124
  if MODELS["tts"] is None or not tts_on_gpu:
125
- print(f"πŸ”Š [v102] Activating XTTS-v2 (GPU)...")
126
  try:
127
  if MODELS["tts"] is None:
128
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=True)
@@ -137,9 +135,9 @@ def activate_gpu_models(action):
137
  if MODELS["translate"] is None: MODELS["translate"] = "active"
138
 
139
  def release_gpu_models():
140
- """v102: Clean Resident State"""
141
  global MODELS
142
- print("🧹 [v102] Releasing GPU resources.")
143
  try:
144
  if MODELS["stt"] and MODELS["stt"].model.device == "cuda":
145
  del MODELS["stt"]
@@ -153,20 +151,19 @@ def release_gpu_models():
153
  if torch.cuda.is_available(): torch.cuda.empty_cache()
154
 
155
  def warmup_task():
156
- """Silent Pre-loading (v102)"""
157
  global WARMUP_STATUS
158
  with WARMUP_LOCK:
159
  if WARMUP_STATUS["complete"] or WARMUP_STATUS["in_progress"]: return
160
  WARMUP_STATUS["in_progress"] = True
161
- print("\nπŸ”₯ --- V102: GRADIO SYNC WARMUP STARTED ---")
162
  try:
163
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8")
164
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
165
  chatterbox_utils.warmup_chatterbox()
166
  WARMUP_STATUS["complete"] = True
167
- print(f"βœ… --- SYSTEM WARM: v102 --- \n")
168
- except Exception as e:
169
- print(f"❌ Warmup fail: {e}")
170
  finally: WARMUP_STATUS["in_progress"] = False
171
 
172
  def _stt_logic(request_dict):
@@ -217,7 +214,7 @@ def _tts_logic(text, lang, speaker_wav_b64):
217
  def core_process(request_dict):
218
  action = request_dict.get("action")
219
  t1 = time.time()
220
- print(f"--- [v102] πŸš€ GPU SESSION: {action} ---")
221
  activate_gpu_models(action)
222
  try:
223
  if action == "stt": res = _stt_logic(request_dict)
@@ -230,19 +227,27 @@ def core_process(request_dict):
230
  res = {"text": stt_res.get("text"), "translated": translated, "audio": tts_res.get("audio")}
231
  else: res = {"error": f"Unknown action: {action}"}
232
  finally:
233
- print(f"--- [v102] ✨ SUCCESS: {action} ({time.time()-t1:.2f}s) ---")
234
  release_gpu_models()
235
  return res
236
 
237
- @asynccontextmanager
238
- async def lifespan(app: FastAPI):
239
- # GRADIO MANAGED LIFECYCLE
240
- Thread(target=warmup_task, daemon=True).start()
241
- yield
242
 
243
- app = FastAPI(lifespan=lifespan)
 
 
 
 
 
 
 
244
 
245
- @app.post("/api/v1/process")
 
246
  async def api_process(request: Request):
247
  try:
248
  req_data = await request.json()
@@ -251,33 +256,30 @@ async def api_process(request: Request):
251
  return core_process(req_data)
252
  except Exception as e: return {"error": str(e)}
253
 
254
- @app.get("/health")
255
- def health(): return {"status": "ok", "warm": WARMUP_STATUS["complete"], "v": "102"}
256
 
257
- @app.post("/api/v1/clear_cache")
258
- async def clear_cache():
259
  try:
260
  release_gpu_models()
261
- temp_dir = tempfile.gettempdir()
262
- for f in os.listdir(temp_dir):
263
- if f.endswith(".wav") or f.startswith("tm"):
264
- try: os.unlink(os.path.join(temp_dir, f))
265
- except: pass
266
  return {"status": "success"}
267
  except: return {"status": "error"}
268
 
269
- # πŸš€ V102: SYNCHRONIZED ENTRY POINT
270
- # We mount everything into Gradio and let the SDK handle the binding.
271
- demo = gr.Interface(
272
- fn=lambda x: json.dumps(core_process(json.loads(x))),
273
- inputs="text",
274
- outputs="text",
275
- title="πŸš€ AI Engine v102"
276
- )
277
-
278
- # Use gr.mount_gradio_app to merge FastAPI and Gradio.
279
- # The Hugging Face Gradio SDK will automatically detect and launch the mounted app.
280
- app = gr.mount_gradio_app(app, demo, path="/")
281
 
282
- # No uvicorn.run here! The Hugging Face Gradio launcher handles it.
283
- # This prevents the "Address already in use" error.
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from fastapi import FastAPI, Request, Response
 
 
2
  import gradio as gr
3
  import uvicorn
4
  import base64
 
16
  from threading import Thread, Lock
17
  from huggingface_hub import snapshot_download
18
 
19
+ # πŸ›‘οΈ 1. SILENCE LOGS & WARNINGS (v103: Mission-Critical Stability)
20
  logging.getLogger("transformers").setLevel(logging.ERROR)
21
  logging.getLogger("TTS").setLevel(logging.ERROR)
22
  logging.getLogger("onnxruntime").setLevel(logging.ERROR)
 
79
  if f is None: return lambda x: x
80
  return f
81
 
82
+ # FORCE BUILD TRIGGER: 13:25:00 Jan 21 2026
83
+ # v103: Gradio Blocking Launch. ZeroGPU Ready. CPU Health Fix.
84
 
85
  os.environ["COQUI_TOS_AGREED"] = "1"
86
  MODELS = {"stt": None, "translate": None, "tts": None, "denoiser": None}
 
89
  WARMUP_LOCK = Lock()
90
 
91
  def activate_gpu_models(action):
92
+ """v103: Stability-First Activation"""
93
  global MODELS, WARMUP_STATUS
94
  local_only = WARMUP_STATUS["complete"]
95
 
 
98
  try: stt_on_gpu = MODELS["stt"] is not None and MODELS["stt"].model.device == "cuda"
99
  except: pass
100
  if not stt_on_gpu:
101
+ print(f"πŸŽ™οΈ [v103] Activating Whisper (GPU: 1-Worker Stability)...")
102
  try:
103
  if MODELS["stt"]: del MODELS["stt"]
104
  gc.collect(); torch.cuda.empty_cache()
 
110
  local_files_only=local_only
111
  )
112
  except Exception as e:
113
+ print(f"⚠️ GPU Init failed: {e}")
114
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8", local_files_only=True)
115
 
116
  if action in ["tts", "s2st"]:
 
120
  tts_on_gpu = "cuda" in curr
121
  except: pass
122
  if MODELS["tts"] is None or not tts_on_gpu:
123
+ print(f"πŸ”Š [v103] Activating XTTS-v2 (GPU)...")
124
  try:
125
  if MODELS["tts"] is None:
126
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=True)
 
135
  if MODELS["translate"] is None: MODELS["translate"] = "active"
136
 
137
  def release_gpu_models():
138
+ """v103: RAM Cleanup"""
139
  global MODELS
140
+ print("🧹 [v103] Releasing GPU resources.")
141
  try:
142
  if MODELS["stt"] and MODELS["stt"].model.device == "cuda":
143
  del MODELS["stt"]
 
151
  if torch.cuda.is_available(): torch.cuda.empty_cache()
152
 
153
  def warmup_task():
154
+ """Silent Warmup (v103)"""
155
  global WARMUP_STATUS
156
  with WARMUP_LOCK:
157
  if WARMUP_STATUS["complete"] or WARMUP_STATUS["in_progress"]: return
158
  WARMUP_STATUS["in_progress"] = True
159
+ print("\nπŸ”₯ --- V103: UNIFIED WARMUP STARTED ---")
160
  try:
161
  MODELS["stt"] = WhisperModel("large-v3", device="cpu", compute_type="int8")
162
  MODELS["tts"] = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", gpu=False)
163
  chatterbox_utils.warmup_chatterbox()
164
  WARMUP_STATUS["complete"] = True
165
+ print(f"βœ… --- SYSTEM READY: v103 --- \n")
166
+ except: pass
 
167
  finally: WARMUP_STATUS["in_progress"] = False
168
 
169
  def _stt_logic(request_dict):
 
214
  def core_process(request_dict):
215
  action = request_dict.get("action")
216
  t1 = time.time()
217
+ print(f"--- [v103] πŸš€ GPU SESSION: {action} ---")
218
  activate_gpu_models(action)
219
  try:
220
  if action == "stt": res = _stt_logic(request_dict)
 
227
  res = {"text": stt_res.get("text"), "translated": translated, "audio": tts_res.get("audio")}
228
  else: res = {"error": f"Unknown action: {action}"}
229
  finally:
230
+ print(f"--- [v103] ✨ SUCCESS: {action} ({time.time()-t1:.2f}s) ---")
231
  release_gpu_models()
232
  return res
233
 
234
+ # πŸš€ GRADIO UNIFIED PORT (v103)
235
+ # We mount everything on the Gradio app to avoid "Address already in use"
236
+ def gradio_fn(req_json):
237
+ try: return json.dumps(core_process(json.loads(req_json)))
238
+ except Exception as e: return json.dumps({"error": str(e)})
239
 
240
+ # Create Gradio interface
241
+ demo = gr.Interface(
242
+ fn=gradio_fn,
243
+ inputs="text",
244
+ outputs="text",
245
+ title="πŸš€ AI Engine v103 (ZeroGPU Ready)",
246
+ description="Backend API with integrated Gradio UI"
247
+ )
248
 
249
+ # πŸ₯ ADD FASTAPI ROUTES TO GRADIO'S INTERNAL APP
250
+ @demo.app.post("/api/v1/process")
251
  async def api_process(request: Request):
252
  try:
253
  req_data = await request.json()
 
256
  return core_process(req_data)
257
  except Exception as e: return {"error": str(e)}
258
 
259
+ @demo.app.get("/health")
260
+ def health(): return {"status": "ok", "warm": WARMUP_STATUS["complete"], "v": "103"}
261
 
262
+ @demo.app.post("/api/v1/clear_cache")
263
+ async def clear_cache_api():
264
  try:
265
  release_gpu_models()
 
 
 
 
 
266
  return {"status": "success"}
267
  except: return {"status": "error"}
268
 
269
+ # START WARMUP
270
+ Thread(target=warmup_task, daemon=True).start()
 
 
 
 
 
 
 
 
 
 
271
 
272
+ # πŸš€ FINAL LAUNCH: This keeps the process alive on HF Spaces
273
+ if __name__ == "__main__":
274
+ print("πŸš€ [v103] Starting Engine...")
275
+ try:
276
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
277
+ except Exception as e:
278
+ print(f"⚠️ Launch failed (possibly handled by SDK): {e}")
279
+ # Final keep-alive if launch() returned instantly
280
+ while True: time.sleep(100)
281
+ else:
282
+ # If imported by HF SDK wrapper
283
+ try:
284
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False, prevent_thread_lock=True)
285
+ except: pass