Spaces:
Paused
Paused
File size: 10,771 Bytes
da355b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
# FILE: api/ltx_pool_manager.py
# DESCRIPTION: A singleton pool manager for the LTX-Video pipeline.
# This module is the "secret weapon": it handles loading, device placement,
# and applies a runtime monkey patch to the LTX pipeline for full control
# and compatibility with the ADUC-SDR architecture, especially for latent conditioning.
import logging
import time
import os
import yaml
import json
from pathlib import Path
from typing import List, Optional, Tuple, Union
from dataclasses import dataclass
import torch
from diffusers.utils.torch_utils import randn_tensor
from huggingface_hub import hf_hub_download
# --- Importações da nossa arquitetura ---
from api.gpu_manager import gpu_manager
# --- Importações da biblioteca LTX-Video e Utilitários ---
from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu
from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline
from ltx_video.models.autoencoders.vae_encode import vae_encode, latent_to_pixel_coords
# ==============================================================================
# --- DEFINIÇÃO DOS NOSSOS DATACLASSES DE CONDICIONAMENTO ---
# ==============================================================================
@dataclass
class ConditioningItem:
"""Nosso Data Class para condicionamento com TENSORES DE PIXEL (de imagens)."""
pixel_tensor: torch.Tensor
media_frame_number: int
conditioning_strength: float
@dataclass
class LatentConditioningItem:
"""Nossa "arma secreta": um Data Class para condicionamento com TENSORES LATENTES (de overlap)."""
latent_tensor: torch.Tensor
media_frame_number: int
conditioning_strength: float
# ==============================================================================
# --- O MONKEY PATCH ---
# Nossa versão customizada de `prepare_conditioning` que entende ambos os Data Classes.
# ==============================================================================
def _aduc_prepare_conditioning_patch(
self: "LTXVideoPipeline",
conditioning_items: Optional[List[Union[ConditioningItem, LatentConditioningItem]]],
init_latents: torch.Tensor,
num_frames: int, height: int, width: int, # Assinatura mantida para compatibilidade
vae_per_channel_normalize: bool = False,
generator=None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
# Se não houver itens, apenas "patchify" os latentes iniciais e retorna.
if not conditioning_items:
latents, latent_coords = self.patchifier.patchify(latents=init_latents)
pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
return latents, pixel_coords, None, 0
# Prepara máscaras e listas para acumular os tensores de condição.
init_conditioning_mask = torch.zeros_like(init_latents[:, 0, ...], dtype=torch.float32, device=init_latents.device)
extra_conditioning_latents, extra_conditioning_pixel_coords, extra_conditioning_mask = [], [], []
extra_conditioning_num_latents = 0
for item in conditioning_items:
strength = item.conditioning_strength
media_frame_number = item.media_frame_number
# --- LÓGICA PRINCIPAL DO PATCH ---
if isinstance(item, ConditioningItem):
# Item é um tensor de PIXEL (ex: imagem inicial).
logging.debug("Patch ADUC: Processando ConditioningItem (pixels).")
# Encodifica o tensor de pixel para o espaço latente usando o VAE.
# Garante que a operação ocorra no dispositivo do VAE para evitar erros.
pixel_tensor_on_vae_device = item.pixel_tensor.to(device=self.vae.device, dtype=self.vae.dtype)
media_item_latents = vae_encode(pixel_tensor_on_vae_device, self.vae, vae_per_channel_normalize=vae_per_channel_normalize)
# Traz o resultado de volta para o dispositivo principal (do Transformer).
media_item_latents = media_item_latents.to(device=init_latents.device, dtype=init_latents.dtype)
elif isinstance(item, LatentConditioningItem):
# Item já é um tensor LATENTE (ex: overlap de chunks).
logging.debug("Patch ADUC: Processando LatentConditioningItem (latentes).")
# Apenas garante que o tensor está no dispositivo e tipo corretos.
media_item_latents = item.latent_tensor.to(device=init_latents.device, dtype=init_latents.dtype)
else:
logging.warning(f"Patch ADUC: Item de condicionamento de tipo desconhecido '{type(item)}' será ignorado.")
continue
# Lógica original da pipeline, agora operando sobre `media_item_latents` garantido.
if media_frame_number == 0:
f_l, h_l, w_l = media_item_latents.shape[-3:]
init_latents[..., :f_l, :h_l, :w_l] = torch.lerp(init_latents[..., :f_l, :h_l, :w_l], media_item_latents, strength)
init_conditioning_mask[..., :f_l, :h_l, :w_l] = strength
else: # Condicionamento em frames intermediários
noise = randn_tensor(media_item_latents.shape, generator=generator, device=media_item_latents.device, dtype=media_item_latents.dtype)
media_item_latents = torch.lerp(noise, media_item_latents, strength)
patched_latents, latent_coords = self.patchifier.patchify(latents=media_item_latents)
pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
pixel_coords[:, 0] += media_frame_number
extra_conditioning_num_latents += patched_latents.shape[1]
new_mask = torch.full(patched_latents.shape[:2], strength, dtype=torch.float32, device=init_latents.device)
extra_conditioning_latents.append(patched_latents)
extra_conditioning_pixel_coords.append(pixel_coords)
extra_conditioning_mask.append(new_mask)
# Finaliza o processo de patchifying e concatenação dos tensores.
init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents)
init_pixel_coords = latent_to_pixel_coords(init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
init_conditioning_mask, _ = self.patchifier.patchify(latents=init_conditioning_mask.unsqueeze(1))
init_conditioning_mask = init_conditioning_mask.squeeze(-1)
if extra_conditioning_latents:
init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1)
init_pixel_coords = torch.cat([*extra_conditioning_pixel_coords, init_pixel_coords], dim=2)
init_conditioning_mask = torch.cat([*extra_conditioning_mask, init_conditioning_mask], dim=1)
return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents
# ==============================================================================
# --- LTX WORKER E POOL MANAGER ---
# ==============================================================================
class LTXWorker:
"""Gerencia uma instância do LTX Pipeline em um par de GPUs (main + vae)."""
def __init__(self, main_device_str: str, vae_device_str: str, config: dict):
self.main_device = torch.device(main_device_str)
self.vae_device = torch.device(vae_device_str)
self.config = config
self.pipeline: LTXVideoPipeline = None
self._load_and_patch_pipeline()
def _load_and_patch_pipeline(self):
logging.info(f"[LTXWorker-{self.main_device}] Carregando pipeline LTX para a CPU...")
self.pipeline, _ = build_ltx_pipeline_on_cpu(self.config)
logging.info(f"[LTXWorker-{self.main_device}] Movendo pipeline para GPUs (Main: {self.main_device}, VAE: {self.vae_device})...")
self.pipeline.to(self.main_device)
self.pipeline.vae.to(self.vae_device)
logging.info(f"[LTXWorker-{self.main_device}] Aplicando patch ADUC-SDR na função 'prepare_conditioning'...")
# Substitui o método da instância pelo nosso patch
self.pipeline.prepare_conditioning = _aduc_prepare_conditioning_patch.__get__(self.pipeline, LTXVideoPipeline)
logging.info(f"[LTXWorker-{self.main_device}] ✅ Pipeline 'quente', corrigido e pronto para uso.")
class LTXPoolManager:
_instance = None
_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized: return
with self._lock:
if self._initialized: return
logging.info("⚙️ Inicializando LTXPoolManager Singleton...")
self.config = self._load_config()
self._resolve_model_paths_from_cache()
main_device_str = str(gpu_manager.get_ltx_device())
vae_device_str = str(gpu_manager.get_ltx_vae_device())
self.worker = LTXWorker(main_device_str, vae_device_str, self.config)
self._initialized = True
logging.info("✅ LTXPoolManager pronto.")
def _load_config(self) -> Dict:
"""Carrega a configuração YAML principal do LTX."""
config_path = Path("/data/LTX-Video/configs/ltxv-13b-0.9.8-distilled-fp8.yaml")
with open(config_path, "r") as file:
return yaml.safe_load(file)
def _resolve_model_paths_from_cache(self):
"""Garante que a configuração em memória tenha os caminhos absolutos para os modelos no cache."""
try:
main_ckpt_path = hf_hub_download(repo_id="Lightricks/LTX-Video", filename=self.config["checkpoint_path"])
self.config["checkpoint_path"] = main_ckpt_path
if self.config.get("spatial_upscaler_model_path"):
upscaler_path = hf_hub_download(repo_id="Lightricks/LTX-Video", filename=self.config["spatial_upscaler_model_path"])
self.config["spatial_upscaler_model_path"] = upscaler_path
except Exception as e:
logging.critical(f"Falha ao resolver caminhos de modelo LTX. O setup.py foi executado? Erro: {e}", exc_info=True)
raise
def get_pipeline(self) -> LTXVideoPipeline:
"""Retorna a instância do pipeline, já carregada e corrigida."""
return self.worker.pipeline
# --- Instância Singleton Global ---
# A aplicação importará esta instância para interagir com o LTX.
try:
ltx_pool_manager = LTXPoolManager()
except Exception as e:
logging.critical("Falha crítica ao inicializar o LTXPoolManager.", exc_info=True)
ltx_pool_manager = None |