Spaces:
Running on Zero
Running on Zero
File size: 10,888 Bytes
0d1b9e9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 | """
VRAM Memory Manager for LongCat-AudioDiT + Whisper.
Orchestrates loading and unloading of:
- AudioDiT TTS models (1B / 3.5B)
- Whisper STT models (turbo / large-v3)
Modes:
"auto" – probe available VRAM; keep both loaded if possible, else sequential
"simultaneous"– always keep both loaded (fails if VRAM too small)
"sequential" – always unload one before loading the other (safest for ≤12GB)
"""
import gc
import logging
from enum import Enum
from typing import Dict, Optional
import torch
logger = logging.getLogger(__name__)
# Estimated peak VRAM (GB) per model in fp16 / int8 on 1 GPU
AUDIODIT_VRAM = {
"1B": 4.0,
"3.5B": 10.0,
}
WHISPER_VRAM = {
"turbo": 1.6,
"large-v3": 3.0,
}
# Leave this headroom free for activations, KV-cache, OS
VRAM_HEADROOM_GB = 2.0
class LoadMode(str, Enum):
AUTO = "auto"
SIMULTANEOUS = "simultaneous"
SEQUENTIAL = "sequential"
def _available_vram_gb() -> float:
"""Return free VRAM in GB on the default CUDA device, or 0 if no GPU."""
if not torch.cuda.is_available():
return 0.0
free, _ = torch.cuda.mem_get_info()
return free / (1024 ** 3)
def _total_vram_gb() -> float:
if not torch.cuda.is_available():
return 0.0
_, total = torch.cuda.mem_get_info()
return total / (1024 ** 3)
def _used_vram_gb() -> float:
if not torch.cuda.is_available():
return 0.0
allocated = torch.cuda.memory_allocated()
reserved = torch.cuda.memory_reserved()
return max(allocated, reserved) / (1024 ** 3)
class ModelMemoryManager:
"""
Coordinates AudioDiT + Whisper model lifecycle.
Typical usage::
mgr = ModelMemoryManager(mode="auto")
tts_model, tokenizer = mgr.get_tts(audiodit_size="1B", device="cuda")
# ... generate audio ...
whisper = mgr.get_whisper(whisper_size="turbo")
text, lang = whisper.transcribe("audio.wav")
mgr.release_all()
"""
def __init__(self, mode: str = "auto"):
self.mode = LoadMode(mode)
self._tts_model = None
self._tts_tokenizer = None
self._tts_size: Optional[str] = None
self._tts_device: Optional[str] = None
self._whisper: Optional[object] = None # WhisperHelper
self._whisper_size: Optional[str] = None
# ------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------
def get_tts(self, audiodit_size: str = "1B", device: str = "cuda"):
"""
Return (AudioDiTModel, tokenizer), loading if necessary.
If mode is sequential and Whisper is loaded, Whisper is unloaded first.
"""
if self._tts_model is not None and self._tts_size == audiodit_size:
return self._tts_model, self._tts_tokenizer
# Need to load a (potentially different) TTS model
if self._tts_model is not None:
self._unload_tts()
# Sequential: unload Whisper first
if self._should_unload_whisper_for_tts(audiodit_size):
logger.info("Sequential mode: unloading Whisper before loading AudioDiT %s", audiodit_size)
self._unload_whisper()
self._load_tts(audiodit_size, device)
return self._tts_model, self._tts_tokenizer
def get_whisper(self, whisper_size: str = "turbo"):
"""
Return WhisperHelper, loading if necessary.
If mode is sequential and TTS is loaded, TTS is unloaded first.
"""
from whisper_helper import WhisperHelper
if self._whisper is not None and self._whisper_size == whisper_size:
return self._whisper
# Need to (re)load
if self._whisper is not None:
self._unload_whisper()
# Sequential: unload TTS first
if self._should_unload_tts_for_whisper(whisper_size):
logger.info("Sequential mode: unloading AudioDiT before loading Whisper %s", whisper_size)
self._unload_tts()
device = self._tts_device or ("cuda" if torch.cuda.is_available() else "cpu")
self._whisper = WhisperHelper(model_size=whisper_size, device=device)
self._whisper.load()
self._whisper_size = whisper_size
return self._whisper
def release_tts(self):
"""Explicitly unload TTS model."""
self._unload_tts()
def release_whisper(self):
"""Explicitly unload Whisper model."""
self._unload_whisper()
def release_all(self):
"""Unload everything and free VRAM."""
self._unload_tts()
self._unload_whisper()
# ------------------------------------------------------------------
# Status helpers
# ------------------------------------------------------------------
def status(self) -> Dict:
tts_loaded = self._tts_model is not None
whisper_loaded = self._whisper is not None and getattr(self._whisper, "is_loaded", False)
return {
"mode": self.mode.value,
"tts_loaded": tts_loaded,
"tts_size": self._tts_size if tts_loaded else None,
"whisper_loaded": whisper_loaded,
"whisper_size": self._whisper_size if whisper_loaded else None,
"vram_used_gb": round(_used_vram_gb(), 2),
"vram_total_gb": round(_total_vram_gb(), 2),
"vram_free_gb": round(_available_vram_gb(), 2),
}
def status_str(self) -> str:
s = self.status()
lines = [
f"Mode: {s['mode']}",
f"TTS: {'[ON] ' + s['tts_size'] if s['tts_loaded'] else '[OFF] not loaded'}",
f"Whisper: {'[ON] ' + s['whisper_size'] if s['whisper_loaded'] else '[OFF] not loaded'}",
]
if torch.cuda.is_available():
lines.append(
f"VRAM: {s['vram_used_gb']:.1f} / {s['vram_total_gb']:.1f} GB "
f"({s['vram_free_gb']:.1f} GB free)"
)
return "\n".join(lines)
# ------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------
def _should_unload_whisper_for_tts(self, audiodit_size: str) -> bool:
if self._whisper is None:
return False
if self.mode == LoadMode.SEQUENTIAL:
return True
if self.mode == LoadMode.SIMULTANEOUS:
return False
# AUTO: check if both fit
needed = AUDIODIT_VRAM.get(audiodit_size, 10.0) + WHISPER_VRAM.get(self._whisper_size, 3.0)
available = _available_vram_gb() + WHISPER_VRAM.get(self._whisper_size, 3.0) # pretend whisper free
return needed + VRAM_HEADROOM_GB > available
def _should_unload_tts_for_whisper(self, whisper_size: str) -> bool:
if self._tts_model is None:
return False
if self.mode == LoadMode.SEQUENTIAL:
return True
if self.mode == LoadMode.SIMULTANEOUS:
return False
# AUTO
needed = AUDIODIT_VRAM.get(self._tts_size, 10.0) + WHISPER_VRAM.get(whisper_size, 3.0)
available = _available_vram_gb() + AUDIODIT_VRAM.get(self._tts_size, 10.0)
return needed + VRAM_HEADROOM_GB > available
def _load_tts(self, audiodit_size: str, device: str):
import audiodit # noqa: F401 – registers AutoConfig / AutoModel
from audiodit import AudioDiTModel
from transformers import AutoTokenizer
from pathlib import Path
from safetensors import safe_open
# Prefer local model dir; fall back to HF Hub id
local_dir_map = {
"1B": Path(__file__).parent / "models" / "audiodit" / "1B",
"3.5B": Path(__file__).parent / "models" / "audiodit" / "3.5B",
}
hf_id_map = {
"1B": "meituan-longcat/LongCat-AudioDiT-1B",
"3.5B": "meituan-longcat/LongCat-AudioDiT-3.5B",
}
local_dir = local_dir_map.get(audiodit_size)
if local_dir and (local_dir / "config.json").exists():
model_id = str(local_dir)
safetensors_path = local_dir / "model.safetensors"
else:
model_id = hf_id_map.get(audiodit_size, audiodit_size)
safetensors_path = None
logger.info("Loading AudioDiT %s from %s on %s …", audiodit_size, model_id, device)
torch_device = torch.device(device)
model = AudioDiTModel.from_pretrained(model_id).to(torch_device)
# Transformers 5.x uses meta-device init which breaks weight_norm parameters
# in the VAE (weight_g stays zero → NaN output). Fix: reload VAE weights
# directly from safetensors, bypassing the meta-device path.
# When loading from HF Hub, find the cached safetensors file.
if safetensors_path is None:
try:
from huggingface_hub import try_to_load_from_cache
cached = try_to_load_from_cache(model_id, "model.safetensors")
if cached and Path(cached).exists():
safetensors_path = Path(cached)
except Exception:
pass
if safetensors_path and Path(safetensors_path).exists():
logger.info("Reloading VAE weights from safetensors (meta-device fix) …")
vae_sd = {}
with safe_open(str(safetensors_path), framework="pt", device="cpu") as f:
for k in f.keys():
if k.startswith("vae."):
vae_sd[k[4:]] = f.get_tensor(k)
model.vae.load_state_dict(vae_sd, strict=True)
logger.info("VAE weights reloaded OK.")
else:
logger.warning("Could not find safetensors for VAE fix — output may be silence.")
model.vae.to_half()
model.eval()
tokenizer = AutoTokenizer.from_pretrained(model.config.text_encoder_model)
self._tts_model = model
self._tts_tokenizer = tokenizer
self._tts_size = audiodit_size
self._tts_device = device
logger.info("AudioDiT %s loaded.", audiodit_size)
def _unload_tts(self):
if self._tts_model is None:
return
logger.info("Unloading AudioDiT %s …", self._tts_size)
del self._tts_model
del self._tts_tokenizer
self._tts_model = None
self._tts_tokenizer = None
self._tts_size = None
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
logger.info("AudioDiT unloaded.")
def _unload_whisper(self):
if self._whisper is None:
return
logger.info("Unloading Whisper %s …", self._whisper_size)
self._whisper.unload()
self._whisper = None
self._whisper_size = None
logger.info("Whisper unloaded.")
|