TGPro1 commited on
Commit
6fd2aaf
Β·
verified Β·
1 Parent(s): df87867

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +25 -13
app.py CHANGED
@@ -5,13 +5,26 @@ import base64
5
  import torch
6
  import tempfile
7
  import traceback
 
8
  from fastapi import FastAPI, Request
9
  from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.responses import HTMLResponse
11
- import uvicorn
12
 
13
- # --- [v157] πŸš€ EXTREME STABILITY ENGINE ---
14
- print(f"--- [v157] πŸ“‘ BOOTING ENGINE ---")
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
17
  from TTS.api import TTS
@@ -46,15 +59,15 @@ MODELS = {"stt": None, "tts": None}
46
  def load_tts_cpu():
47
  global MODELS
48
  if MODELS.get("tts") is None:
49
- print("--- [v157] πŸ“₯ LOADING XTTS V2 (CPU) ---")
50
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cpu")
51
- print("--- [v157] βœ… XTTS READY (CPU) ---")
52
 
53
  @spaces.GPU(duration=60)
54
  def gpu_stt_base(temp_path, lang):
55
  global MODELS
56
  if MODELS.get("stt") is None:
57
- print("--- [v157] πŸ“₯ LOADING WHISPER (Base) ON GPU ---")
58
  model_id = "openai/whisper-base"
59
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float32).to("cuda")
60
  processor = AutoProcessor.from_pretrained(model_id)
@@ -65,7 +78,6 @@ def gpu_stt_base(temp_path, lang):
65
  feature_extractor=processor.feature_extractor,
66
  device="cuda"
67
  )
68
- # Whisper Base is extremely robust and fast on H200
69
  res = MODELS["stt"](temp_path, generate_kwargs={"language": lang if lang and len(lang) <= 3 else None})
70
  return res["text"].strip()
71
 
@@ -74,9 +86,9 @@ async def handle_process(request: Request):
74
  try:
75
  data = await request.json()
76
  action = data.get("action")
77
- if action == "health": return {"status": "awake", "v": "157"}
78
 
79
- print(f"--- [v157] πŸ› οΈ {action} ---")
80
 
81
  stt_text = ""
82
  if action in ["stt", "s2st"]:
@@ -130,20 +142,20 @@ async def handle_process(request: Request):
130
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
131
 
132
  except Exception as e:
133
- print(f"❌ [v157] ERROR: {traceback.format_exc()}")
134
  return {"error": str(e)}
135
  finally:
136
- print(f"--- [v157] ✨ DONE ({time.time()-t1:.1f}s) ---")
137
 
138
  @app.post("/process")
139
  @app.post("/api/v1/process")
140
  async def api_process(request: Request): return await handle_process(request)
141
 
142
  @app.get("/health")
143
- def health(): return {"status": "ok", "v": "157", "gpu": torch.cuda.is_available()}
144
 
145
  @app.get("/", response_class=HTMLResponse)
146
- def root(): return "<h1>πŸš€ AI Engine v157 (EXTREME STABILITY)</h1>"
147
 
148
  if __name__ == "__main__":
149
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
5
  import torch
6
  import tempfile
7
  import traceback
8
+ import uvicorn
9
  from fastapi import FastAPI, Request
10
  from fastapi.middleware.cors import CORSMiddleware
11
  from fastapi.responses import HTMLResponse
 
12
 
13
+ # --- [v158] πŸš€ ULTRA STABLE ENGINE ---
14
+ # This version fixes the TorchCodec/torchaudio dependency hell on H200 ZeroGPU
15
+ print(f"--- [v158] πŸ“‘ BOOTING ENGINE ---")
16
+
17
+ # πŸ› οΈ MONKEYPATCH torchaudio BEFORE XTTS LOADING πŸ› οΈ
18
+ import torchaudio
19
+ import librosa
20
+ def stable_load(filepath, **kwargs):
21
+ # Redirect torchaudio.load to librosa to bypass torchcodec issues
22
+ # Coqui XTTS usually passes sr as a keyword or positional argument
23
+ target_sr = kwargs.get("sample_rate") or kwargs.get("sr") or None
24
+ y, sr = librosa.load(filepath, sr=target_sr)
25
+ return torch.from_numpy(y).unsqueeze(0), sr
26
+ torchaudio.load = stable_load
27
+ print("--- [v158] 🩹 TORCHAUDIO PATCH APPLIED ---")
28
 
29
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
30
  from TTS.api import TTS
 
59
  def load_tts_cpu():
60
  global MODELS
61
  if MODELS.get("tts") is None:
62
+ print("--- [v158] πŸ“₯ LOADING XTTS V2 (CPU) ---")
63
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cpu")
64
+ print("--- [v158] βœ… XTTS READY (CPU) ---")
65
 
66
  @spaces.GPU(duration=60)
67
  def gpu_stt_base(temp_path, lang):
68
  global MODELS
69
  if MODELS.get("stt") is None:
70
+ print("--- [v158] πŸ“₯ LOADING WHISPER (Base) ON GPU ---")
71
  model_id = "openai/whisper-base"
72
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float32).to("cuda")
73
  processor = AutoProcessor.from_pretrained(model_id)
 
78
  feature_extractor=processor.feature_extractor,
79
  device="cuda"
80
  )
 
81
  res = MODELS["stt"](temp_path, generate_kwargs={"language": lang if lang and len(lang) <= 3 else None})
82
  return res["text"].strip()
83
 
 
86
  try:
87
  data = await request.json()
88
  action = data.get("action")
89
+ if action == "health": return {"status": "awake", "v": "158"}
90
 
91
+ print(f"--- [v158] πŸ› οΈ {action} ---")
92
 
93
  stt_text = ""
94
  if action in ["stt", "s2st"]:
 
142
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
143
 
144
  except Exception as e:
145
+ print(f"❌ [v158] ERROR: {traceback.format_exc()}")
146
  return {"error": str(e)}
147
  finally:
148
+ print(f"--- [v158] ✨ DONE ({time.time()-t1:.1f}s) ---")
149
 
150
  @app.post("/process")
151
  @app.post("/api/v1/process")
152
  async def api_process(request: Request): return await handle_process(request)
153
 
154
  @app.get("/health")
155
+ def health(): return {"status": "ok", "v": "158", "gpu": torch.cuda.is_available()}
156
 
157
  @app.get("/", response_class=HTMLResponse)
158
+ def root(): return "<h1>πŸš€ AI Engine v158 (ULTRA STABLE)</h1>"
159
 
160
  if __name__ == "__main__":
161
  uvicorn.run(app, host="0.0.0.0", port=7860)