Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,29 @@
|
|
| 1 |
-
import os, sys, json, tempfile, subprocess, shutil, uuid, glob
|
| 2 |
from pathlib import Path
|
| 3 |
from typing import Tuple, List
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
import spaces
|
| 7 |
from huggingface_hub import snapshot_download
|
| 8 |
-
|
| 9 |
from loguru import logger
|
| 10 |
import torch, torchaudio
|
| 11 |
|
|
@@ -114,7 +132,7 @@ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
| 114 |
|
| 115 |
def ensure_clap_safetensors():
|
| 116 |
"""
|
| 117 |
-
|
| 118 |
Transformers never selects a stale/corrupt *.bin.
|
| 119 |
"""
|
| 120 |
snapshot_download(
|
|
@@ -216,10 +234,20 @@ def auto_load_models() -> str:
|
|
| 216 |
logger.info("✅ Model loaded")
|
| 217 |
return "✅ Model loaded"
|
| 218 |
|
| 219 |
-
# Init logger and load model once
|
| 220 |
logger.remove()
|
| 221 |
logger.add(lambda msg: print(msg, end=''), level="INFO")
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
# ========= Preprocessing =========
|
| 225 |
def preprocess_video(in_path: str) -> Tuple[str, float]:
|
|
@@ -498,4 +526,19 @@ with gr.Blocks(css=THEME_CSS, title=APP_TITLE, analytics_enabled=False) as demo:
|
|
| 498 |
- Enable **Mux** to get a ready MP4 with the generated foley track.
|
| 499 |
""")
|
| 500 |
|
| 501 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, sys, json, tempfile, subprocess, shutil, uuid, glob, traceback, datetime
|
| 2 |
from pathlib import Path
|
| 3 |
from typing import Tuple, List
|
| 4 |
|
| 5 |
+
# ================= Crash trap & verbose logs =================
|
| 6 |
+
import faulthandler
|
| 7 |
+
faulthandler.enable()
|
| 8 |
+
|
| 9 |
+
os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "false")
|
| 10 |
+
os.environ.setdefault("GRADIO_NUM_PORTS", "1")
|
| 11 |
+
os.environ.setdefault("HF_HUB_VERBOSE", "1")
|
| 12 |
+
os.environ.setdefault("TRANSFORMERS_VERBOSITY", "info")
|
| 13 |
+
os.environ.setdefault("PYTHONUNBUFFERED", "1")
|
| 14 |
+
|
| 15 |
+
def _crash_trap(exctype, value, tb):
|
| 16 |
+
ts = datetime.datetime.utcnow().isoformat()
|
| 17 |
+
print(f"\n===== FATAL ({ts}Z) =====================================")
|
| 18 |
+
traceback.print_exception(exctype, value, tb)
|
| 19 |
+
print("=========================================================\n", flush=True)
|
| 20 |
+
|
| 21 |
+
sys.excepthook = _crash_trap
|
| 22 |
+
# ============================================================
|
| 23 |
+
|
| 24 |
import gradio as gr
|
| 25 |
import spaces
|
| 26 |
from huggingface_hub import snapshot_download
|
|
|
|
| 27 |
from loguru import logger
|
| 28 |
import torch, torchaudio
|
| 29 |
|
|
|
|
| 132 |
|
| 133 |
def ensure_clap_safetensors():
|
| 134 |
"""
|
| 135 |
+
Pre-cache ONLY safetensors for laion/larger_clap_general so
|
| 136 |
Transformers never selects a stale/corrupt *.bin.
|
| 137 |
"""
|
| 138 |
snapshot_download(
|
|
|
|
| 234 |
logger.info("✅ Model loaded")
|
| 235 |
return "✅ Model loaded"
|
| 236 |
|
| 237 |
+
# Init logger and load model once (with explicit crash surface)
|
| 238 |
logger.remove()
|
| 239 |
logger.add(lambda msg: print(msg, end=''), level="INFO")
|
| 240 |
+
|
| 241 |
+
try:
|
| 242 |
+
msg = auto_load_models()
|
| 243 |
+
logger.info(msg)
|
| 244 |
+
except Exception as e:
|
| 245 |
+
print("\n[BOOT][ERROR] auto_load_models() failed:")
|
| 246 |
+
traceback.print_exc()
|
| 247 |
+
with gr.Blocks(title="Foley Studio · Boot Error") as demo:
|
| 248 |
+
gr.Markdown("### ❌ Boot failure\n```\n" + "".join(traceback.format_exc()) + "\n```")
|
| 249 |
+
demo.launch(server_name="0.0.0.0")
|
| 250 |
+
raise
|
| 251 |
|
| 252 |
# ========= Preprocessing =========
|
| 253 |
def preprocess_video(in_path: str) -> Tuple[str, float]:
|
|
|
|
| 526 |
- Enable **Mux** to get a ready MP4 with the generated foley track.
|
| 527 |
""")
|
| 528 |
|
| 529 |
+
# ---- Health endpoint & guarded launch ---------------------------------------
|
| 530 |
+
try:
|
| 531 |
+
from fastapi import FastAPI
|
| 532 |
+
fastapi_app = demo.app # Gradio's FastAPI app
|
| 533 |
+
@fastapi_app.get("/health")
|
| 534 |
+
def _health():
|
| 535 |
+
return {"ok": True, "model_loaded": _model_dict is not None, "device": str(_device)}
|
| 536 |
+
except Exception:
|
| 537 |
+
pass
|
| 538 |
+
|
| 539 |
+
try:
|
| 540 |
+
demo.queue(max_size=24).launch(server_name="0.0.0.0")
|
| 541 |
+
except Exception:
|
| 542 |
+
print("\n[BOOT][ERROR] Gradio launch failed:")
|
| 543 |
+
traceback.print_exc()
|
| 544 |
+
raise
|