TGPro1 commited on
Commit
0695186
Β·
verified Β·
1 Parent(s): 6b0a7cc

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +29 -23
app.py CHANGED
@@ -11,9 +11,9 @@ from fastapi import FastAPI, Request
11
  from fastapi.middleware.cors import CORSMiddleware
12
  from fastapi.responses import HTMLResponse
13
 
14
- # --- [v163] πŸš€ PRO GPU ENGINE (FULL STABILITY) ---
15
- # This version enforces GPU usage for STT and TTS with robust error handling.
16
- print(f"--- [v163] πŸ“‘ BOOTING ENGINE ---")
17
 
18
  # πŸ› οΈ CRITICAL: TORCHAUDIO MONKEYPATCH πŸ› οΈ
19
  import torchaudio
@@ -33,7 +33,7 @@ def HeroLoad(filepath, **kwargs):
33
  if not hasattr(torchaudio, 'load_orig'):
34
  torchaudio.load_orig = torchaudio.load
35
  torchaudio.load = HeroLoad
36
- print("--- [v163] 🩹 TORCHAUDIO PATCH APPLIED ---")
37
 
38
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
39
  from TTS.api import TTS
@@ -78,29 +78,35 @@ def gpu_stt_full(temp_path, lang):
78
  device = get_best_gpu()
79
 
80
  if MODELS.get("stt") is None:
81
- print(f"--- [v163] πŸ“₯ LOADING WHISPER LARGE ON {device} ---")
82
  model_id = "openai/whisper-large-v3-turbo"
83
- model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
 
84
  processor = AutoProcessor.from_pretrained(model_id)
85
  MODELS["stt"] = pipeline(
86
  "automatic-speech-recognition",
87
  model=model,
88
  tokenizer=processor.tokenizer,
89
  feature_extractor=processor.feature_extractor,
90
- chunk_length_s=30, # Mandatory for >30s support
91
  device=device
92
  )
93
 
94
- print(f"--- [v163] πŸŽ™οΈ WHISPER INFERENCE (TEMP 0) ---")
95
  res = MODELS["stt"](
96
  temp_path,
97
- batch_size=8, # Accelerated processing
98
  generate_kwargs={
99
  "language": lang if lang and len(lang) <= 3 else None,
100
- "temperature": 0.0, # High precision as requested
101
- "return_timestamps": True # Fixes ValueError: > 3000 mel features
102
  }
103
  )
 
 
 
 
 
104
  return res["text"].strip()
105
 
106
  @spaces.GPU(duration=180)
@@ -109,13 +115,13 @@ def gpu_tts_full(text, mapped_lang, speaker_path):
109
  device = "cuda"
110
 
111
  if MODELS.get("tts") is None:
112
- print(f"--- [v163] πŸ“₯ LOADING XTTS V2 ON GPU ---")
113
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
114
  else:
115
  try: MODELS["tts"].to(device)
116
  except: pass
117
 
118
- print(f"--- [v163] πŸ”Š XTTS GPU INFERENCE ---")
119
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as out_f:
120
  out_p = out_f.name
121
 
@@ -137,9 +143,9 @@ async def handle_process(request: Request):
137
  try:
138
  data = await request.json()
139
  action = data.get("action")
140
- if action == "health": return {"status": "awake", "v": "163"}
141
 
142
- print(f"--- [v163] πŸ› οΈ API REQUEST: {action.upper()} ---")
143
 
144
  stt_text = ""
145
  # 🟒 SPEECH-TO-TEXT
@@ -167,10 +173,10 @@ async def handle_process(request: Request):
167
  trans_text = text
168
 
169
  if action == "s2st":
170
- print(f"--- [v163] 🌏 TRANSLATING TO {target} ---")
171
  trans_text = GoogleTranslator(source='auto', target=target).translate(stt_text)
172
  text = trans_text
173
- print(f"--- [v163] πŸ“ TRANS: {text[:100]}... ---")
174
 
175
  XTTS_MAP = {"en": "en", "de": "de", "fr": "fr", "es": "es", "it": "it", "pl": "pl", "pt": "pt", "tr": "tr", "ru": "ru", "nl": "nl", "cs": "cs", "ar": "ar", "hu": "hu", "ko": "ko", "hi": "hi", "zh": "zh-cn"}
176
  clean_lang = target.split('-')[0].lower()
@@ -201,10 +207,10 @@ async def handle_process(request: Request):
201
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
202
 
203
  except Exception as e:
204
- print(f"❌ [v163] ENGINE ERROR: {traceback.format_exc()}")
205
  return {"error": str(e)}
206
  finally:
207
- print(f"--- [v163] ✨ MISSION COMPLETED ({time.time()-t1:.1f}s) ---")
208
 
209
  @app.post("/process")
210
  @app.post("/api/v1/process")
@@ -214,16 +220,16 @@ async def api_process(request: Request): return await handle_process(request)
214
  def health():
215
  return {
216
  "status": "ready",
217
- "v": "163",
218
  "gpu": torch.cuda.is_available(),
219
  "devices": torch.cuda.device_count(),
220
- "engine": "Full GPU PRO",
221
- "stt": "Whisper-v3-Turbo (GPU)",
222
  "tts": "XTTS-v2 (GPU)"
223
  }
224
 
225
  @app.get("/", response_class=HTMLResponse)
226
- def root(): return "<h1>πŸš€ PRO AI Engine v163 (GPU MODE)</h1>"
227
 
228
  if __name__ == "__main__":
229
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
11
  from fastapi.middleware.cors import CORSMiddleware
12
  from fastapi.responses import HTMLResponse
13
 
14
+ # --- [v164] πŸš€ PRO GPU ENGINE (ULTIMATE STABILITY) ---
15
+ # This version enforces float32 for STT to avoid CUBLAS errors and uses batch_size=1.
16
+ print(f"--- [v164] πŸ“‘ BOOTING ENGINE ---")
17
 
18
  # πŸ› οΈ CRITICAL: TORCHAUDIO MONKEYPATCH πŸ› οΈ
19
  import torchaudio
 
33
  if not hasattr(torchaudio, 'load_orig'):
34
  torchaudio.load_orig = torchaudio.load
35
  torchaudio.load = HeroLoad
36
+ print("--- [v164] 🩹 TORCHAUDIO PATCH APPLIED ---")
37
 
38
  from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
39
  from TTS.api import TTS
 
78
  device = get_best_gpu()
79
 
80
  if MODELS.get("stt") is None:
81
+ print(f"--- [v164] πŸ“₯ LOADING WHISPER LARGE (FP32) ON {device} ---")
82
  model_id = "openai/whisper-large-v3-turbo"
83
+ # Using float32 to resolve CUBLAS_STATUS_EXECUTION_FAILED on H200/A10G MIG
84
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch.float32).to(device)
85
  processor = AutoProcessor.from_pretrained(model_id)
86
  MODELS["stt"] = pipeline(
87
  "automatic-speech-recognition",
88
  model=model,
89
  tokenizer=processor.tokenizer,
90
  feature_extractor=processor.feature_extractor,
91
+ chunk_length_s=30,
92
  device=device
93
  )
94
 
95
+ print(f"--- [v164] πŸŽ™οΈ WHISPER INFERENCE (TEMP 0, BS 1) ---")
96
  res = MODELS["stt"](
97
  temp_path,
98
+ batch_size=1, # Ultimate stability
99
  generate_kwargs={
100
  "language": lang if lang and len(lang) <= 3 else None,
101
+ "temperature": 0.0,
102
+ "return_timestamps": True
103
  }
104
  )
105
+
106
+ # Post-inference cleanup
107
+ torch.cuda.empty_cache()
108
+ gc.collect()
109
+
110
  return res["text"].strip()
111
 
112
  @spaces.GPU(duration=180)
 
115
  device = "cuda"
116
 
117
  if MODELS.get("tts") is None:
118
+ print(f"--- [v164] πŸ“₯ LOADING XTTS V2 ON GPU ---")
119
  MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
120
  else:
121
  try: MODELS["tts"].to(device)
122
  except: pass
123
 
124
+ print(f"--- [v164] πŸ”Š XTTS GPU INFERENCE ---")
125
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as out_f:
126
  out_p = out_f.name
127
 
 
143
  try:
144
  data = await request.json()
145
  action = data.get("action")
146
+ if action == "health": return {"status": "awake", "v": "164"}
147
 
148
+ print(f"--- [v164] πŸ› οΈ API REQUEST: {action.upper()} ---")
149
 
150
  stt_text = ""
151
  # 🟒 SPEECH-TO-TEXT
 
173
  trans_text = text
174
 
175
  if action == "s2st":
176
+ print(f"--- [v164] 🌏 TRANSLATING TO {target} ---")
177
  trans_text = GoogleTranslator(source='auto', target=target).translate(stt_text)
178
  text = trans_text
179
+ print(f"--- [v164] πŸ“ TRANS: {text[:100]}... ---")
180
 
181
  XTTS_MAP = {"en": "en", "de": "de", "fr": "fr", "es": "es", "it": "it", "pl": "pl", "pt": "pt", "tr": "tr", "ru": "ru", "nl": "nl", "cs": "cs", "ar": "ar", "hu": "hu", "ko": "ko", "hi": "hi", "zh": "zh-cn"}
182
  clean_lang = target.split('-')[0].lower()
 
207
  return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
208
 
209
  except Exception as e:
210
+ print(f"❌ [v164] ENGINE ERROR: {traceback.format_exc()}")
211
  return {"error": str(e)}
212
  finally:
213
+ print(f"--- [v164] ✨ MISSION COMPLETED ({time.time()-t1:.1f}s) ---")
214
 
215
  @app.post("/process")
216
  @app.post("/api/v1/process")
 
220
  def health():
221
  return {
222
  "status": "ready",
223
+ "v": "164",
224
  "gpu": torch.cuda.is_available(),
225
  "devices": torch.cuda.device_count(),
226
+ "engine": "Full GPU PRO (Stable)",
227
+ "stt": "Whisper-v3-Turbo (FP32-GPU)",
228
  "tts": "XTTS-v2 (GPU)"
229
  }
230
 
231
  @app.get("/", response_class=HTMLResponse)
232
+ def root(): return "<h1>πŸš€ PRO AI Engine v164 (GPU MODE)</h1>"
233
 
234
  if __name__ == "__main__":
235
  uvicorn.run(app, host="0.0.0.0", port=7860)