TGPro1 commited on
Commit
ba52b5c
Β·
verified Β·
1 Parent(s): 9fae676

Delete app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +0 -135
app.py DELETED
@@ -1,135 +0,0 @@
1
- import gradio as gr
2
- from fastapi import FastAPI, Request
3
- import uvicorn
4
- import base64
5
- import torch
6
- import os
7
- import tempfile
8
- import traceback
9
- import json
10
- import time
11
-
12
- # FORCE BUILD TRIGGER: 00:05:00 Jan 20 2026
13
- os.environ["COQUI_TOS_AGREED"] = "1"
14
-
15
- # Global models
16
- MODELS = {"stt": None, "translate": None, "tts": None, "tokenizer": None}
17
-
18
- def load_models():
19
- global MODELS
20
- if MODELS["stt"] is None:
21
- print("πŸŽ™οΈ Loading Whisper large-v3...")
22
- import whisper
23
- MODELS["stt"] = whisper.load_model("large-v3")
24
- if torch.cuda.is_available():
25
- MODELS["stt"] = MODELS["stt"].half().to("cuda")
26
-
27
- if MODELS["translate"] is None:
28
- print("🌍 Loading NLLB-200...")
29
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
30
- MODELS["tokenizer"] = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
31
- MODELS["translate"] = AutoModelForSeq2SeqLM.from_pretrained(
32
- "facebook/nllb-200-distilled-600M",
33
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
34
- ).to("cuda" if torch.cuda.is_available() else "cpu")
35
-
36
- if MODELS["tts"] is None:
37
- print("πŸ”Š Loading XTTS-v2...")
38
- from TTS.api import TTS
39
- MODELS["tts"] = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(
40
- "cuda" if torch.cuda.is_available() else "cpu"
41
- )
42
-
43
- def process_logic(request):
44
- """Core logic shared between Gradio and FastAPI"""
45
- action = request.get("action")
46
- print(f"βš™οΈ Logic Action: {action}")
47
-
48
- if action == "health":
49
- return {"status": "ok", "gpu": torch.cuda.is_available(), "timestamp": time.time()}
50
-
51
- load_models()
52
-
53
- if action == "stt":
54
- audio_b64 = request.get("file")
55
- lang = request.get("lang")
56
- audio_bytes = base64.b64decode(audio_b64)
57
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
58
- f.write(audio_bytes)
59
- temp_path = f.name
60
- try:
61
- result = MODELS["stt"].transcribe(temp_path, language=lang, fp16=torch.cuda.is_available())
62
- return {"text": result["text"].strip()}
63
- finally:
64
- os.unlink(temp_path)
65
-
66
- elif action == "translate":
67
- text = request.get("text")
68
- target_lang = request.get("target_lang")
69
- tgt_code = f"{target_lang}_Latn"
70
- inputs = MODELS["tokenizer"](text, return_tensors="pt", padding=True).to(MODELS["translate"].device)
71
- translated = MODELS["translate"].generate(**inputs, forced_bos_token_id=MODELS["tokenizer"].convert_tokens_to_ids(tgt_code), max_length=512)
72
- result = MODELS["tokenizer"].decode(translated[0], skip_special_tokens=True)
73
- return {"translated": result.strip()}
74
-
75
- elif action == "tts":
76
- text = request.get("text")
77
- lang = request.get("lang")
78
- speaker_wav_b64 = request.get("speaker_wav")
79
- speaker_wav_path = None
80
- if speaker_wav_b64:
81
- speaker_bytes = base64.b64decode(speaker_wav_b64)
82
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
83
- f.write(speaker_bytes)
84
- speaker_wav_path = f.name
85
- try:
86
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as output_file:
87
- output_path = output_file.name
88
- MODELS["tts"].tts_to_file(text=text, language=lang, file_path=output_path, speaker_wav=speaker_wav_path)
89
- with open(output_path, "rb") as f:
90
- audio_b64 = base64.b64encode(f.read()).decode()
91
- return {"audio": audio_b64}
92
- finally:
93
- if speaker_wav_path and os.path.exists(speaker_wav_path):
94
- os.unlink(speaker_wav_path)
95
- if os.path.exists(output_path):
96
- os.unlink(output_path)
97
-
98
- return {"error": f"Unknown action: {action}"}
99
-
100
- # --- GRADIO SECTION ---
101
- def gradio_fn(request_json):
102
- try:
103
- req = json.loads(request_json)
104
- res = process_logic(req)
105
- return json.dumps(res)
106
- except Exception as e:
107
- return json.dumps({"error": str(e), "trace": traceback.format_exc()})
108
-
109
- demo = gr.Interface(
110
- fn=gradio_fn,
111
- inputs=gr.Textbox(label="JSON Request", lines=5),
112
- outputs=gr.Textbox(label="JSON Response"),
113
- title="πŸš€ Unified AI Engine"
114
- )
115
-
116
- # --- FASTAPI SECTION ---
117
- app = FastAPI()
118
-
119
- @app.post("/api/v1/process")
120
- async def api_v1_process(request: Request):
121
- try:
122
- body = await request.json()
123
- return process_logic(body)
124
- except Exception as e:
125
- return {"error": str(e)}
126
-
127
- @app.get("/health")
128
- def health():
129
- return {"status": "ok"}
130
-
131
- # Mount Gradio into FastAPI
132
- app = gr.mount_gradio_app(app, demo, path="/")
133
-
134
- if __name__ == "__main__":
135
- uvicorn.run(app, host="0.0.0.0", port=7860)