SoulX-Singer / app.py
Xinsheng-Wang's picture
Upload folder using huggingface_hub
437ec35 verified
"""
Hugging Face Space entry point for SoulX-Singer.
Downloads pretrained models from the Hub if needed, then launches the Gradio app.
"""
import os
import sys
from pathlib import Path
# Set matplotlib backend before any imports that might use it (required for headless environments)
import matplotlib
matplotlib.use('Agg') # Use non-interactive backend
# Import spaces for ZeroGPU support (if available)
try:
import spaces
except ImportError:
# spaces module not available (not running on HF Spaces or not ZeroGPU)
spaces = None
ROOT = Path(__file__).resolve().parent
PRETRAINED_DIR = ROOT / "pretrained_models"
MODEL_DIR_SVS = PRETRAINED_DIR / "SoulX-Singer"
MODEL_DIR_PREPROCESS = PRETRAINED_DIR / "SoulX-Singer-Preprocess"
def ensure_pretrained_models():
"""Download SoulX-Singer and Preprocess models from Hugging Face Hub if not present."""
if (MODEL_DIR_SVS / "model.pt").exists() and MODEL_DIR_PREPROCESS.exists():
print("Pretrained models already present, skipping download.", flush=True)
return
try:
from huggingface_hub import snapshot_download
except ImportError:
print(
"huggingface_hub not installed. Install with: pip install huggingface_hub",
file=sys.stderr,
flush=True,
)
raise
PRETRAINED_DIR.mkdir(parents=True, exist_ok=True)
if not (MODEL_DIR_SVS / "model.pt").exists():
print("Downloading SoulX-Singer model...", flush=True)
snapshot_download(
repo_id="Soul-AILab/SoulX-Singer",
local_dir=str(MODEL_DIR_SVS),
local_dir_use_symlinks=False,
)
print("SoulX-Singer model ready.", flush=True)
if not MODEL_DIR_PREPROCESS.exists():
print("Downloading SoulX-Singer-Preprocess models...", flush=True)
snapshot_download(
repo_id="Soul-AILab/SoulX-Singer-Preprocess",
local_dir=str(MODEL_DIR_PREPROCESS),
local_dir_use_symlinks=False,
)
print("SoulX-Singer-Preprocess models ready.", flush=True)
@spaces.GPU() if spaces is not None else lambda f: f
def initialize_gpu():
"""Initialize GPU for ZeroGPU. This function must be called at startup."""
# This function is called at startup to initialize ZeroGPU
# The actual model loading happens lazily in webui.py
import torch
if torch.cuda.is_available():
print(f"GPU initialized: {torch.cuda.get_device_name(0)}", flush=True)
else:
print("GPU not available, using CPU", flush=True)
return True
if __name__ == "__main__":
os.chdir(ROOT)
ensure_pretrained_models()
# Initialize GPU for ZeroGPU (must be called before importing webui)
if spaces is not None:
initialize_gpu()
from webui import render_interface
page = render_interface()
page.queue()
page.launch(
server_name="0.0.0.0",
server_port=int(os.environ.get("PORT", "7860")),
)