TGPro1 commited on
Commit
b93eba5
Β·
verified Β·
1 Parent(s): 17edb4a

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +40 -60
app.py CHANGED
@@ -9,12 +9,13 @@ from fastapi import FastAPI, Request
9
  from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.responses import HTMLResponse
11
  import uvicorn
 
12
 
13
- # --- [v146] πŸš€ ZEROGPU OPTIMIZED ENGINE ---
14
- print(f"--- [v146] πŸ“‘ BOOTING ZEROGPU ENGINE ---")
15
 
16
- from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
17
- from TTS.api import TTS
18
  from deep_translator import GoogleTranslator
19
 
20
  try:
@@ -35,24 +36,31 @@ os.environ["PYTHONWARNINGS"] = "ignore"
35
  app = FastAPI()
36
  app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
37
 
38
- MODELS = {"stt": None, "tts": None}
39
 
40
- @spaces.GPU(duration=120)
 
 
 
 
 
 
 
 
 
41
  def process_stt(audio_b64, lang):
42
- """Speech-to-Text on GPU"""
43
  global MODELS
44
 
45
- # Load Whisper if needed
46
  if MODELS.get("stt") is None:
47
- print("--- [v146] πŸ“₯ LOADING WHISPER-BASE (STABLE) ---")
48
- # Use whisper-base for stability on ZeroGPU
49
  MODELS["stt"] = pipeline(
50
  "automatic-speech-recognition",
51
  model="openai/whisper-base",
52
  device="cuda",
53
- torch_dtype=torch.float32 # FP32 for stability
54
  )
55
- print("--- [v146] βœ… WHISPER-BASE LOADED ---")
56
 
57
  audio_bytes = base64.b64decode(audio_b64)
58
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
@@ -65,45 +73,19 @@ def process_stt(audio_b64, lang):
65
  if os.path.exists(temp_path):
66
  os.unlink(temp_path)
67
 
68
- @spaces.GPU(duration=120)
69
- def process_tts(text, target_lang, speaker_wav_b64=None):
70
- """Text-to-Speech on GPU"""
71
- global MODELS
72
-
73
- # Load XTTS if needed
74
- if MODELS.get("tts") is None:
75
- print("--- [v146] πŸ“₯ LOADING XTTS ---")
76
- MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cuda")
77
- print("--- [v146] βœ… XTTS LOADED ---")
78
-
79
- XTTS_MAP = {"en": "en", "de": "de", "fr": "fr", "es": "es", "it": "it", "pl": "pl", "pt": "pt", "tr": "tr", "ru": "ru", "nl": "nl", "cs": "cs", "ar": "ar", "hu": "hu", "ko": "ko", "hi": "hi", "zh": "zh-cn"}
80
- clean_lang = target_lang.split('-')[0].lower()
81
- mapped_lang = XTTS_MAP.get(clean_lang) or ("zh-cn" if clean_lang == "zh" else None)
82
 
83
- if not mapped_lang:
84
- return {"error": f"Language {clean_lang} not supported."}
 
85
 
86
- speaker_wav_path = "default_speaker.wav"
87
- if speaker_wav_b64:
88
- sb = base64.b64decode(speaker_wav_b64)
89
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
90
- f.write(sb)
91
- speaker_wav_path = f.name
92
- elif not os.path.exists(speaker_wav_path):
93
- speaker_wav_path = None
94
-
95
- try:
96
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as out_f:
97
- out_p = out_f.name
98
- MODELS["tts"].tts_to_file(text=text, language=mapped_lang, file_path=out_p, speaker_wav=speaker_wav_path)
99
- with open(out_p, "rb") as f:
100
- audio_b64 = base64.b64encode(f.read()).decode()
101
- return audio_b64
102
- finally:
103
- if speaker_wav_b64 and os.path.exists(speaker_wav_path):
104
- os.unlink(speaker_wav_path)
105
- if 'out_p' in locals() and os.path.exists(out_p):
106
- os.unlink(out_p)
107
 
108
  @app.post("/process")
109
  async def api_process(request: Request):
@@ -111,9 +93,9 @@ async def api_process(request: Request):
111
  data = await request.json()
112
  action = data.get("action")
113
  if action == "health":
114
- return {"status": "awake", "v": "146", "mode": "ZEROGPU"}
115
 
116
- print(f"--- [v146] πŸ› οΈ PROCESSING: {action} ---")
117
  t1 = time.time()
118
 
119
  stt_text = None
@@ -133,27 +115,25 @@ async def api_process(request: Request):
133
  if len(text) < 2:
134
  return {"audio": ""} if action == "tts" else {"text": stt_text, "translated": "", "audio": ""}
135
 
136
- audio_res = process_tts(text, data.get("lang") if action == "tts" else target, data.get("speaker_wav"))
137
 
138
- if isinstance(audio_res, dict) and "error" in audio_res:
139
- return audio_res
140
  if action == "tts":
141
- return {"audio": audio_res}
142
- return {"text": stt_text, "translated": trans_text, "audio": audio_res}
143
 
144
  except Exception as e:
145
- print(f"❌ [v146] ERROR: {traceback.format_exc()}")
146
  return {"error": str(e)}
147
  finally:
148
- print(f"--- [v146] ✨ DONE ({time.time()-t1:.1f}s) ---")
149
 
150
  @app.get("/health")
151
  def health():
152
- return {"status": "ok", "v": "146", "mode": "ZEROGPU", "spaces": HAS_SPACES}
153
 
154
  @app.get("/", response_class=HTMLResponse)
155
  def root():
156
- return f"<html><body><h1>πŸš€ AI Engine v146 (ZEROGPU)</h1><p>Spaces: {HAS_SPACES}</p></body></html>"
157
 
158
  if __name__ == "__main__":
159
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
9
  from fastapi.middleware.cors import CORSMiddleware
10
  from fastapi.responses import HTMLResponse
11
  import uvicorn
12
+ import scipy.io.wavfile
13
 
14
+ # --- [v147] πŸš€ HYBRID ENGINE: GPU-STT + CPU-TTS ---
15
+ print(f"--- [v147] πŸ“‘ BOOTING HYBRID ENGINE ---")
16
 
17
+ from transformers import pipeline, SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
18
+ from datasets import load_dataset
19
  from deep_translator import GoogleTranslator
20
 
21
  try:
 
36
  app = FastAPI()
37
  app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
38
 
39
+ MODELS = {"stt": None, "tts_processor": None, "tts_model": None, "tts_vocoder": None, "tts_speaker": None}
40
 
41
+ # Load TTS at startup (CPU)
42
+ print("--- [v147] πŸ“₯ LOADING TTS (SpeechT5) ON CPU ---")
43
+ MODELS["tts_processor"] = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
44
+ MODELS["tts_model"] = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
45
+ MODELS["tts_vocoder"] = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
46
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
47
+ MODELS["tts_speaker"] = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
48
+ print("--- [v147] βœ… TTS READY (CPU) ---")
49
+
50
+ @spaces.GPU(duration=60)
51
  def process_stt(audio_b64, lang):
52
+ """Speech-to-Text on GPU (Whisper)"""
53
  global MODELS
54
 
 
55
  if MODELS.get("stt") is None:
56
+ print("--- [v147] πŸ“₯ LOADING WHISPER ON GPU ---")
 
57
  MODELS["stt"] = pipeline(
58
  "automatic-speech-recognition",
59
  model="openai/whisper-base",
60
  device="cuda",
61
+ torch_dtype=torch.float32
62
  )
63
+ print("--- [v147] βœ… WHISPER READY (GPU) ---")
64
 
65
  audio_bytes = base64.b64decode(audio_b64)
66
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
 
73
  if os.path.exists(temp_path):
74
  os.unlink(temp_path)
75
 
76
+ def process_tts(text):
77
+ """Text-to-Speech on CPU (SpeechT5)"""
78
+ inputs = MODELS["tts_processor"](text=text, return_tensors="pt")
79
+ speech = MODELS["tts_model"].generate_speech(inputs["input_ids"], MODELS["tts_speaker"], vocoder=MODELS["tts_vocoder"])
 
 
 
 
 
 
 
 
 
 
80
 
81
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as out_f:
82
+ out_p = out_f.name
83
+ scipy.io.wavfile.write(out_p, rate=16000, data=speech.numpy())
84
 
85
+ with open(out_p, "rb") as f:
86
+ audio_b64 = base64.b64encode(f.read()).decode()
87
+ os.unlink(out_p)
88
+ return audio_b64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  @app.post("/process")
91
  async def api_process(request: Request):
 
93
  data = await request.json()
94
  action = data.get("action")
95
  if action == "health":
96
+ return {"status": "awake", "v": "147", "mode": "HYBRID_GPU_STT_CPU_TTS"}
97
 
98
+ print(f"--- [v147] πŸ› οΈ PROCESSING: {action} ---")
99
  t1 = time.time()
100
 
101
  stt_text = None
 
115
  if len(text) < 2:
116
  return {"audio": ""} if action == "tts" else {"text": stt_text, "translated": "", "audio": ""}
117
 
118
+ audio_b64 = process_tts(text)
119
 
 
 
120
  if action == "tts":
121
+ return {"audio": audio_b64}
122
+ return {"text": stt_text, "translated": trans_text, "audio": audio_b64}
123
 
124
  except Exception as e:
125
+ print(f"❌ [v147] ERROR: {traceback.format_exc()}")
126
  return {"error": str(e)}
127
  finally:
128
+ print(f"--- [v147] ✨ DONE ({time.time()-t1:.1f}s) ---")
129
 
130
  @app.get("/health")
131
  def health():
132
+ return {"status": "ok", "v": "147", "mode": "HYBRID_GPU_STT_CPU_TTS", "spaces": HAS_SPACES}
133
 
134
  @app.get("/", response_class=HTMLResponse)
135
  def root():
136
+ return f"<html><body><h1>πŸš€ AI Engine v147 (HYBRID)</h1><p>STT: GPU Whisper | TTS: CPU SpeechT5</p></body></html>"
137
 
138
  if __name__ == "__main__":
139
  uvicorn.run(app, host="0.0.0.0", port=7860)