TGPro1 commited on
Commit
f2df3df
Β·
verified Β·
1 Parent(s): 323845c

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -10,9 +10,8 @@ from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.responses import HTMLResponse
11
  import uvicorn
12
 
13
- # --- [v155] πŸš€ FINAL STABLE ENGINE (GPU-STT-Base + CPU-TTS-XTTS) ---
14
- # This version uses the proven stable components for H200 ZeroGPU.
15
- print(f"--- [v155] πŸ“‘ BOOTING FINAL ENGINE ---")
16
 
17
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
18
  from TTS.api import TTS
@@ -47,15 +46,15 @@ MODELS = {"stt": None, "tts": None}
47
  def load_tts_cpu():
48
  global MODELS
49
  if MODELS.get("tts") is None:
50
- print("--- [v155] πŸ“₯ LOADING XTTS V2 (CPU) ---")
51
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cpu")
52
- print("--- [v155] βœ… XTTS READY ---")
53
 
54
  @spaces.GPU(duration=60)
55
  def gpu_stt_base(temp_path, lang):
56
  global MODELS
57
  if MODELS.get("stt") is None:
58
- print("--- [v155] πŸ“₯ LOADING WHISPER (Base) ON GPU ---")
59
  model_id = "openai/whisper-base"
60
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float32).to("cuda")
61
  processor = AutoProcessor.from_pretrained(model_id)
@@ -70,13 +69,13 @@ def gpu_stt_base(temp_path, lang):
70
  return res["text"].strip()
71
 
72
  async def handle_process(request: Request):
 
73
  try:
74
  data = await request.json()
75
  action = data.get("action")
76
- if action == "health": return {"status": "awake", "v": "155"}
77
 
78
- print(f"--- [v155] πŸ› οΈ {action} ---")
79
- t1 = time.time()
80
 
81
  stt_text = ""
82
  if action in ["stt", "s2st"]:
@@ -130,20 +129,20 @@ async def handle_process(request: Request):
130
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
131
 
132
  except Exception as e:
133
- print(f"❌ [v155] ERROR: {traceback.format_exc()}")
134
  return {"error": str(e)}
135
  finally:
136
- print(f"--- [v155] ✨ DONE ({time.time()-t1:.1f}s) ---")
137
 
138
  @app.post("/process")
139
  @app.post("/api/v1/process")
140
  async def api_process(request: Request): return await handle_process(request)
141
 
142
  @app.get("/health")
143
- def health(): return {"status": "ok", "v": "155", "gpu": torch.cuda.is_available()}
144
 
145
  @app.get("/", response_class=HTMLResponse)
146
- def root(): return "<h1>πŸš€ AI Engine v155 (STABLE)</h1>"
147
 
148
  if __name__ == "__main__":
149
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
10
  from fastapi.responses import HTMLResponse
11
  import uvicorn
12
 
13
+ # --- [v156] πŸš€ PRO STABLE ENGINE (GPU-STT-Base + CPU-TTS-XTTS) ---
14
+ print(f"--- [v156] πŸ“‘ BOOTING PRO STABLE ENGINE ---")
 
15
 
16
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
17
  from TTS.api import TTS
 
46
  def load_tts_cpu():
47
  global MODELS
48
  if MODELS.get("tts") is None:
49
+ print("--- [v156] πŸ“₯ LOADING XTTS V2 (CPU) ---")
50
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cpu")
51
+ print("--- [v156] βœ… XTTS READY ---")
52
 
53
  @spaces.GPU(duration=60)
54
  def gpu_stt_base(temp_path, lang):
55
  global MODELS
56
  if MODELS.get("stt") is None:
57
+ print("--- [v156] πŸ“₯ LOADING WHISPER (Base) ON GPU ---")
58
  model_id = "openai/whisper-base"
59
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float32).to("cuda")
60
  processor = AutoProcessor.from_pretrained(model_id)
 
69
  return res["text"].strip()
70
 
71
  async def handle_process(request: Request):
72
+ t1 = time.time() # Initialize t1 at the very beginning to avoid UnboundLocalError
73
  try:
74
  data = await request.json()
75
  action = data.get("action")
76
+ if action == "health": return {"status": "awake", "v": "156"}
77
 
78
+ print(f"--- [v156] πŸ› οΈ {action} ---")
 
79
 
80
  stt_text = ""
81
  if action in ["stt", "s2st"]:
 
129
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
130
 
131
  except Exception as e:
132
+ print(f"❌ [v156] ERROR: {traceback.format_exc()}")
133
  return {"error": str(e)}
134
  finally:
135
+ print(f"--- [v156] ✨ DONE ({time.time()-t1:.1f}s) ---")
136
 
137
  @app.post("/process")
138
  @app.post("/api/v1/process")
139
  async def api_process(request: Request): return await handle_process(request)
140
 
141
  @app.get("/health")
142
+ def health(): return {"status": "ok", "v": "156", "gpu": torch.cuda.is_available()}
143
 
144
  @app.get("/", response_class=HTMLResponse)
145
+ def root(): return "<h1>πŸš€ AI Engine v156 (PRO STABLE)</h1>"
146
 
147
  if __name__ == "__main__":
148
  uvicorn.run(app, host="0.0.0.0", port=7860)