Spaces:
Paused
Paused
Delete api/ltx_server (7).py
Browse files- api/ltx_server (7).py +0 -832
api/ltx_server (7).py
DELETED
|
@@ -1,832 +0,0 @@
|
|
| 1 |
-
# ltx_server.py — VideoService (beta 1.1)
|
| 2 |
-
# Sempre output_type="latent"; no final: VAE (bloco inteiro) → pixels → MP4.
|
| 3 |
-
# Ignora UserWarning/FutureWarning e injeta VAE no manager com dtype/device corretos.
|
| 4 |
-
|
| 5 |
-
# --- 0. WARNINGS E AMBIENTE ---
|
| 6 |
-
import warnings
|
| 7 |
-
warnings.filterwarnings("ignore", category=UserWarning)
|
| 8 |
-
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 9 |
-
warnings.filterwarnings("ignore", message=".*")
|
| 10 |
-
|
| 11 |
-
from huggingface_hub import logging
|
| 12 |
-
|
| 13 |
-
logging.set_verbosity_error()
|
| 14 |
-
logging.set_verbosity_warning()
|
| 15 |
-
logging.set_verbosity_info()
|
| 16 |
-
logging.set_verbosity_debug()
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
LTXV_DEBUG=1
|
| 20 |
-
LTXV_FRAME_LOG_EVERY=8
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
# --- 1. IMPORTAÇÕES ---
|
| 25 |
-
import os, subprocess, shlex, tempfile
|
| 26 |
-
import torch
|
| 27 |
-
import json
|
| 28 |
-
import numpy as np
|
| 29 |
-
import random
|
| 30 |
-
import os
|
| 31 |
-
import shlex
|
| 32 |
-
import yaml
|
| 33 |
-
from typing import List, Dict
|
| 34 |
-
from pathlib import Path
|
| 35 |
-
import imageio
|
| 36 |
-
import tempfile
|
| 37 |
-
from huggingface_hub import hf_hub_download
|
| 38 |
-
import sys
|
| 39 |
-
import subprocess
|
| 40 |
-
import gc
|
| 41 |
-
import shutil
|
| 42 |
-
import contextlib
|
| 43 |
-
import time
|
| 44 |
-
import traceback
|
| 45 |
-
|
| 46 |
-
# Singletons (versões simples)
|
| 47 |
-
from managers.vae_manager import vae_manager_singleton
|
| 48 |
-
from tools.video_encode_tool import video_encode_tool_singleton
|
| 49 |
-
|
| 50 |
-
# --- 2. GERENCIAMENTO DE DEPENDÊNCIAS E SETUP ---
|
| 51 |
-
def _query_gpu_processes_via_nvml(device_index: int) -> List[Dict]:
|
| 52 |
-
try:
|
| 53 |
-
import psutil
|
| 54 |
-
import pynvml as nvml
|
| 55 |
-
nvml.nvmlInit()
|
| 56 |
-
handle = nvml.nvmlDeviceGetHandleByIndex(device_index)
|
| 57 |
-
try:
|
| 58 |
-
procs = nvml.nvmlDeviceGetComputeRunningProcesses_v3(handle)
|
| 59 |
-
except Exception:
|
| 60 |
-
procs = nvml.nvmlDeviceGetComputeRunningProcesses(handle)
|
| 61 |
-
results = []
|
| 62 |
-
for p in procs:
|
| 63 |
-
pid = int(p.pid)
|
| 64 |
-
used_mb = None
|
| 65 |
-
try:
|
| 66 |
-
if getattr(p, "usedGpuMemory", None) is not None and p.usedGpuMemory not in (0,):
|
| 67 |
-
used_mb = max(0, int(p.usedGpuMemory) // (1024 * 1024))
|
| 68 |
-
except Exception:
|
| 69 |
-
used_mb = None
|
| 70 |
-
name = "unknown"
|
| 71 |
-
user = "unknown"
|
| 72 |
-
try:
|
| 73 |
-
import psutil
|
| 74 |
-
pr = psutil.Process(pid)
|
| 75 |
-
name = pr.name()
|
| 76 |
-
user = pr.username()
|
| 77 |
-
except Exception:
|
| 78 |
-
pass
|
| 79 |
-
results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
|
| 80 |
-
nvml.nvmlShutdown()
|
| 81 |
-
return results
|
| 82 |
-
except Exception:
|
| 83 |
-
return []
|
| 84 |
-
|
| 85 |
-
def _query_gpu_processes_via_nvidiasmi(device_index: int) -> List[Dict]:
|
| 86 |
-
cmd = f"nvidia-smi -i {device_index} --query-compute-apps=pid,process_name,used_memory --format=csv,noheader,nounits"
|
| 87 |
-
try:
|
| 88 |
-
out = subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT, text=True, timeout=2.0)
|
| 89 |
-
except Exception:
|
| 90 |
-
return []
|
| 91 |
-
results = []
|
| 92 |
-
for line in out.strip().splitlines():
|
| 93 |
-
parts = [p.strip() for p in line.split(",")]
|
| 94 |
-
if len(parts) >= 3:
|
| 95 |
-
try:
|
| 96 |
-
pid = int(parts[0]); name = parts[1]; used_mb = int(parts[2])
|
| 97 |
-
user = "unknown"
|
| 98 |
-
try:
|
| 99 |
-
import psutil
|
| 100 |
-
pr = psutil.Process(pid)
|
| 101 |
-
user = pr.username()
|
| 102 |
-
except Exception:
|
| 103 |
-
pass
|
| 104 |
-
results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
|
| 105 |
-
except Exception:
|
| 106 |
-
continue
|
| 107 |
-
return results
|
| 108 |
-
|
| 109 |
-
def _gpu_process_table(processes: List[Dict], current_pid: int) -> str:
|
| 110 |
-
if not processes:
|
| 111 |
-
return " - Processos ativos: (nenhum)\n"
|
| 112 |
-
processes = sorted(processes, key=lambda x: (x.get("used_mb") or 0), reverse=True)
|
| 113 |
-
lines = [" - Processos ativos (PID | USER | NAME | VRAM MB):"]
|
| 114 |
-
for p in processes:
|
| 115 |
-
star = "*" if p["pid"] == current_pid else " "
|
| 116 |
-
used_str = str(p["used_mb"]) if p.get("used_mb") is not None else "N/A"
|
| 117 |
-
lines.append(f" {star} {p['pid']} | {p['user']} | {p['name']} | {used_str}")
|
| 118 |
-
return "\n".join(lines) + "\n"
|
| 119 |
-
|
| 120 |
-
def run_setup():
|
| 121 |
-
setup_script_path = "setup.py"
|
| 122 |
-
if not os.path.exists(setup_script_path):
|
| 123 |
-
print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
|
| 124 |
-
return
|
| 125 |
-
try:
|
| 126 |
-
print("[DEBUG] Executando setup.py para dependências...")
|
| 127 |
-
subprocess.run([sys.executable, setup_script_path], check=True)
|
| 128 |
-
print("[DEBUG] Setup concluído com sucesso.")
|
| 129 |
-
except subprocess.CalledProcessError as e:
|
| 130 |
-
print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
|
| 131 |
-
sys.exit(1)
|
| 132 |
-
|
| 133 |
-
from api.ltx.inference import (
|
| 134 |
-
create_ltx_video_pipeline,
|
| 135 |
-
create_latent_upsampler,
|
| 136 |
-
load_image_to_tensor_with_resize_and_crop,
|
| 137 |
-
seed_everething,
|
| 138 |
-
calculate_padding,
|
| 139 |
-
load_media_file,
|
| 140 |
-
)
|
| 141 |
-
|
| 142 |
-
DEPS_DIR = Path("/data")
|
| 143 |
-
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 144 |
-
if not LTX_VIDEO_REPO_DIR.exists():
|
| 145 |
-
print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
|
| 146 |
-
run_setup()
|
| 147 |
-
|
| 148 |
-
def add_deps_to_path():
|
| 149 |
-
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 150 |
-
if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
|
| 151 |
-
sys.path.insert(0, repo_path)
|
| 152 |
-
print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
|
| 153 |
-
|
| 154 |
-
add_deps_to_path()
|
| 155 |
-
|
| 156 |
-
# --- 3. IMPORTAÇÕES ESPECÍFICAS DO MODELO ---
|
| 157 |
-
|
| 158 |
-
from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline
|
| 159 |
-
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
|
| 160 |
-
|
| 161 |
-
# --- 4. FUNÇÕES HELPER DE LOG ---
|
| 162 |
-
def log_tensor_info(tensor, name="Tensor"):
|
| 163 |
-
if not isinstance(tensor, torch.Tensor):
|
| 164 |
-
print(f"\n[INFO] '{name}' não é tensor.")
|
| 165 |
-
return
|
| 166 |
-
print(f"\n--- Tensor: {name} ---")
|
| 167 |
-
print(f" - Shape: {tuple(tensor.shape)}")
|
| 168 |
-
print(f" - Dtype: {tensor.dtype}")
|
| 169 |
-
print(f" - Device: {tensor.device}")
|
| 170 |
-
if tensor.numel() > 0:
|
| 171 |
-
try:
|
| 172 |
-
print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
|
| 173 |
-
except Exception:
|
| 174 |
-
pass
|
| 175 |
-
print("------------------------------------------\n")
|
| 176 |
-
|
| 177 |
-
# --- 5. CLASSE PRINCIPAL DO SERVIÇO ---
|
| 178 |
-
class VideoService:
|
| 179 |
-
def __init__(self):
|
| 180 |
-
t0 = time.perf_counter()
|
| 181 |
-
print("[DEBUG] Inicializando VideoService...")
|
| 182 |
-
self.debug = os.getenv("LTXV_DEBUG", "1") == "1"
|
| 183 |
-
self.frame_log_every = int(os.getenv("LTXV_FRAME_LOG_EVERY", "8"))
|
| 184 |
-
self.config = self._load_config()
|
| 185 |
-
print(f"[DEBUG] Config carregada (precision={self.config.get('precision')}, sampler={self.config.get('sampler')})")
|
| 186 |
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 187 |
-
print(f"[DEBUG] Device selecionado: {self.device}")
|
| 188 |
-
self.last_memory_reserved_mb = 0.0
|
| 189 |
-
self._tmp_dirs = set(); self._tmp_files = set(); self._last_outputs = []
|
| 190 |
-
|
| 191 |
-
self.pipeline, self.latent_upsampler = self._load_models()
|
| 192 |
-
print(f"[DEBUG] Pipeline e Upsampler carregados. Upsampler ativo? {bool(self.latent_upsampler)}")
|
| 193 |
-
|
| 194 |
-
print(f"[DEBUG] Movendo modelos para {self.device}...")
|
| 195 |
-
self.pipeline.to(self.device)
|
| 196 |
-
if self.latent_upsampler:
|
| 197 |
-
self.latent_upsampler.to(self.device)
|
| 198 |
-
|
| 199 |
-
self._apply_precision_policy()
|
| 200 |
-
print(f"[DEBUG] runtime_autocast_dtype = {getattr(self, 'runtime_autocast_dtype', None)}")
|
| 201 |
-
|
| 202 |
-
# Injeta pipeline/vae no manager (impede vae=None)
|
| 203 |
-
vae_manager_singleton.attach_pipeline(
|
| 204 |
-
self.pipeline,
|
| 205 |
-
device=self.device,
|
| 206 |
-
autocast_dtype=self.runtime_autocast_dtype
|
| 207 |
-
)
|
| 208 |
-
print(f"[DEBUG] VAE manager conectado: has_vae={hasattr(self.pipeline, 'vae')} device={self.device}")
|
| 209 |
-
|
| 210 |
-
if self.device == "cuda":
|
| 211 |
-
torch.cuda.empty_cache()
|
| 212 |
-
self._log_gpu_memory("Após carregar modelos")
|
| 213 |
-
|
| 214 |
-
print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
|
| 215 |
-
|
| 216 |
-
def _log_gpu_memory(self, stage_name: str):
|
| 217 |
-
if self.device != "cuda":
|
| 218 |
-
return
|
| 219 |
-
device_index = torch.cuda.current_device() if torch.cuda.is_available() else 0
|
| 220 |
-
current_reserved_b = torch.cuda.memory_reserved(device_index)
|
| 221 |
-
current_reserved_mb = current_reserved_b / (1024 ** 2)
|
| 222 |
-
total_memory_b = torch.cuda.get_device_properties(device_index).total_memory
|
| 223 |
-
total_memory_mb = total_memory_b / (1024 ** 2)
|
| 224 |
-
peak_reserved_mb = torch.cuda.max_memory_reserved(device_index) / (1024 ** 2)
|
| 225 |
-
delta_mb = current_reserved_mb - getattr(self, "last_memory_reserved_mb", 0.0)
|
| 226 |
-
processes = _query_gpu_processes_via_nvml(device_index) or _query_gpu_processes_via_nvidiasmi(device_index)
|
| 227 |
-
print(f"\n--- [LOG GPU] {stage_name} (cuda:{device_index}) ---")
|
| 228 |
-
print(f" - Reservado: {current_reserved_mb:.2f} MB / {total_memory_mb:.2f} MB (Δ={delta_mb:+.2f} MB)")
|
| 229 |
-
if peak_reserved_mb > getattr(self, "last_memory_reserved_mb", 0.0):
|
| 230 |
-
print(f" - Pico reservado (nesta fase): {peak_reserved_mb:.2f} MB")
|
| 231 |
-
print(_gpu_process_table(processes, os.getpid()), end="")
|
| 232 |
-
print("--------------------------------------------------\n")
|
| 233 |
-
self.last_memory_reserved_mb = current_reserved_mb
|
| 234 |
-
|
| 235 |
-
def _register_tmp_dir(self, d: str):
|
| 236 |
-
if d and os.path.isdir(d):
|
| 237 |
-
self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
|
| 238 |
-
|
| 239 |
-
def _register_tmp_file(self, f: str):
|
| 240 |
-
if f and os.path.exists(f):
|
| 241 |
-
self._tmp_files.add(f); print(f"[DEBUG] Registrado tmp file: {f}")
|
| 242 |
-
|
| 243 |
-
def finalize(self, keep_paths=None, extra_paths=None, clear_gpu=True):
|
| 244 |
-
print("[DEBUG] Finalize: iniciando limpeza...")
|
| 245 |
-
keep = set(keep_paths or []); extras = set(extra_paths or [])
|
| 246 |
-
removed_files = 0
|
| 247 |
-
for f in list(self._tmp_files | extras):
|
| 248 |
-
try:
|
| 249 |
-
if f not in keep and os.path.isfile(f):
|
| 250 |
-
os.remove(f); removed_files += 1; print(f"[DEBUG] Removido arquivo tmp: {f}")
|
| 251 |
-
except Exception as e:
|
| 252 |
-
print(f"[DEBUG] Falha removendo arquivo {f}: {e}")
|
| 253 |
-
finally:
|
| 254 |
-
self._tmp_files.discard(f)
|
| 255 |
-
removed_dirs = 0
|
| 256 |
-
for d in list(self._tmp_dirs):
|
| 257 |
-
try:
|
| 258 |
-
if d not in keep and os.path.isdir(d):
|
| 259 |
-
shutil.rmtree(d, ignore_errors=True); removed_dirs += 1; print(f"[DEBUG] Removido diretório tmp: {d}")
|
| 260 |
-
except Exception as e:
|
| 261 |
-
print(f"[DEBUG] Falha removendo diretório {d}: {e}")
|
| 262 |
-
finally:
|
| 263 |
-
self._tmp_dirs.discard(d)
|
| 264 |
-
print(f"[DEBUG] Finalize: arquivos removidos={removed_files}, dirs removidos={removed_dirs}")
|
| 265 |
-
gc.collect()
|
| 266 |
-
try:
|
| 267 |
-
if clear_gpu and torch.cuda.is_available():
|
| 268 |
-
torch.cuda.empty_cache()
|
| 269 |
-
try:
|
| 270 |
-
torch.cuda.ipc_collect()
|
| 271 |
-
except Exception:
|
| 272 |
-
pass
|
| 273 |
-
except Exception as e:
|
| 274 |
-
print(f"[DEBUG] Finalize: limpeza GPU falhou: {e}")
|
| 275 |
-
try:
|
| 276 |
-
self._log_gpu_memory("Após finalize")
|
| 277 |
-
except Exception as e:
|
| 278 |
-
print(f"[DEBUG] Log GPU pós-finalize falhou: {e}")
|
| 279 |
-
|
| 280 |
-
def _load_config(self):
|
| 281 |
-
base = LTX_VIDEO_REPO_DIR / "configs"
|
| 282 |
-
candidates = [
|
| 283 |
-
base / "ltxv-13b-0.9.8-dev-fp8.yaml",
|
| 284 |
-
base / "ltxv-13b-0.9.8-distilled-fp8.yaml",
|
| 285 |
-
base / "ltxv-13b-0.9.8-distilled.yaml",
|
| 286 |
-
]
|
| 287 |
-
for cfg in candidates:
|
| 288 |
-
if cfg.exists():
|
| 289 |
-
print(f"[DEBUG] Config selecionada: {cfg}")
|
| 290 |
-
with open(cfg, "r") as file:
|
| 291 |
-
return yaml.safe_load(file)
|
| 292 |
-
cfg = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
| 293 |
-
print(f"[DEBUG] Config fallback: {cfg}")
|
| 294 |
-
with open(cfg, "r") as file:
|
| 295 |
-
return yaml.safe_load(file)
|
| 296 |
-
|
| 297 |
-
def _load_models(self):
|
| 298 |
-
t0 = time.perf_counter()
|
| 299 |
-
LTX_REPO = "Lightricks/LTX-Video"
|
| 300 |
-
print("[DEBUG] Baixando checkpoint principal...")
|
| 301 |
-
distilled_model_path = hf_hub_download(
|
| 302 |
-
repo_id=LTX_REPO,
|
| 303 |
-
filename=self.config["checkpoint_path"],
|
| 304 |
-
local_dir=os.getenv("HF_HOME"),
|
| 305 |
-
cache_dir=os.getenv("HF_HOME_CACHE"),
|
| 306 |
-
token=os.getenv("HF_TOKEN"),
|
| 307 |
-
)
|
| 308 |
-
self.config["checkpoint_path"] = distilled_model_path
|
| 309 |
-
print(f"[DEBUG] Checkpoint em: {distilled_model_path}")
|
| 310 |
-
|
| 311 |
-
print("[DEBUG] Baixando upscaler espacial...")
|
| 312 |
-
spatial_upscaler_path = hf_hub_download(
|
| 313 |
-
repo_id=LTX_REPO,
|
| 314 |
-
filename=self.config["spatial_upscaler_model_path"],
|
| 315 |
-
local_dir=os.getenv("HF_HOME"),
|
| 316 |
-
cache_dir=os.getenv("HF_HOME_CACHE"),
|
| 317 |
-
token=os.getenv("HF_TOKEN")
|
| 318 |
-
)
|
| 319 |
-
self.config["spatial_upscaler_model_path"] = spatial_upscaler_path
|
| 320 |
-
print(f"[DEBUG] Upscaler em: {spatial_upscaler_path}")
|
| 321 |
-
|
| 322 |
-
print("[DEBUG] Construindo pipeline...")
|
| 323 |
-
pipeline = create_ltx_video_pipeline(
|
| 324 |
-
ckpt_path=self.config["checkpoint_path"],
|
| 325 |
-
precision=self.config["precision"],
|
| 326 |
-
text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"],
|
| 327 |
-
sampler=self.config["sampler"],
|
| 328 |
-
device="cpu",
|
| 329 |
-
enhance_prompt=False,
|
| 330 |
-
prompt_enhancer_image_caption_model_name_or_path=self.config["prompt_enhancer_image_caption_model_name_or_path"],
|
| 331 |
-
prompt_enhancer_llm_model_name_or_path=self.config["prompt_enhancer_llm_model_name_or_path"],
|
| 332 |
-
)
|
| 333 |
-
print("[DEBUG] Pipeline pronto.")
|
| 334 |
-
|
| 335 |
-
latent_upsampler = None
|
| 336 |
-
if self.config.get("spatial_upscaler_model_path"):
|
| 337 |
-
print("[DEBUG] Construindo latent_upsampler...")
|
| 338 |
-
latent_upsampler = create_latent_upsampler(self.config["spatial_upscaler_model_path"], device="cpu")
|
| 339 |
-
print("[DEBUG] Upsampler pronto.")
|
| 340 |
-
print(f"[DEBUG] _load_models() tempo total={time.perf_counter()-t0:.3f}s")
|
| 341 |
-
return pipeline, latent_upsampler
|
| 342 |
-
|
| 343 |
-
def _promote_fp8_weights_to_bf16(self, module):
|
| 344 |
-
if not isinstance(module, torch.nn.Module):
|
| 345 |
-
print("[DEBUG] Promoção FP8→BF16 ignorada: alvo não é nn.Module.")
|
| 346 |
-
return
|
| 347 |
-
f8 = getattr(torch, "float8_e4m3fn", None)
|
| 348 |
-
if f8 is None:
|
| 349 |
-
print("[DEBUG] torch.float8_e4m3fn indisponível.")
|
| 350 |
-
return
|
| 351 |
-
p_cnt = b_cnt = 0
|
| 352 |
-
for _, p in module.named_parameters(recurse=True):
|
| 353 |
-
try:
|
| 354 |
-
if p.dtype == f8:
|
| 355 |
-
with torch.no_grad():
|
| 356 |
-
p.data = p.data.to(torch.bfloat16); p_cnt += 1
|
| 357 |
-
except Exception:
|
| 358 |
-
pass
|
| 359 |
-
for _, b in module.named_buffers(recurse=True):
|
| 360 |
-
try:
|
| 361 |
-
if hasattr(b, "dtype") and b.dtype == f8:
|
| 362 |
-
b.data = b.data.to(torch.bfloat16); b_cnt += 1
|
| 363 |
-
except Exception:
|
| 364 |
-
pass
|
| 365 |
-
print(f"[DEBUG] FP8→BF16: params_promoted={p_cnt}, buffers_promoted={b_cnt}")
|
| 366 |
-
|
| 367 |
-
def _apply_precision_policy(self):
|
| 368 |
-
prec = str(self.config.get("precision", "")).lower()
|
| 369 |
-
self.runtime_autocast_dtype = torch.float32
|
| 370 |
-
print(f"[DEBUG] Aplicando política de precisão: {prec}")
|
| 371 |
-
if prec == "float8_e4m3fn":
|
| 372 |
-
self.runtime_autocast_dtype = torch.bfloat16
|
| 373 |
-
force_promote = os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
|
| 374 |
-
print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
|
| 375 |
-
if force_promote and hasattr(torch, "float8_e4m3fn"):
|
| 376 |
-
try:
|
| 377 |
-
self._promote_fp8_weights_to_bf16(self.pipeline)
|
| 378 |
-
except Exception as e:
|
| 379 |
-
print(f"[DEBUG] Promoção FP8→BF16 na pipeline falhou: {e}")
|
| 380 |
-
try:
|
| 381 |
-
if self.latent_upsampler:
|
| 382 |
-
self._promote_fp8_weights_to_bf16(self.latent_upsampler)
|
| 383 |
-
except Exception as e:
|
| 384 |
-
print(f"[DEBUG] Promoção FP8→BF16 no upsampler falhou: {e}")
|
| 385 |
-
elif prec == "bfloat16":
|
| 386 |
-
self.runtime_autocast_dtype = torch.bfloat16
|
| 387 |
-
elif prec == "mixed_precision":
|
| 388 |
-
self.runtime_autocast_dtype = torch.float16
|
| 389 |
-
else:
|
| 390 |
-
self.runtime_autocast_dtype = torch.float32
|
| 391 |
-
|
| 392 |
-
def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
|
| 393 |
-
print(f"[DEBUG] Carregando condicionamento: {filepath}")
|
| 394 |
-
tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
|
| 395 |
-
tensor = torch.nn.functional.pad(tensor, padding_values)
|
| 396 |
-
out = tensor.to(self.device, dtype=self.runtime_autocast_dtype) if self.device == "cuda" else tensor.to(self.device)
|
| 397 |
-
print(f"[DEBUG] Cond shape={tuple(out.shape)} dtype={out.dtype} device={out.device}")
|
| 398 |
-
return out
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
def _dividir_latentes(self, latents, num_por_chunk: int, overlap: int = 1):
|
| 402 |
-
# latents: [B, C, T, H, W]
|
| 403 |
-
T = latents.shape[2]
|
| 404 |
-
L = num_por_chunk + overlap # janela com stop exclusivo
|
| 405 |
-
stride = num_por_chunk - overlap # avanço
|
| 406 |
-
print("================= DEBUG =================") # debug
|
| 407 |
-
print(f"[DEBUG] T={T} L={L} stride={stride}") # debug
|
| 408 |
-
print(f"[DEBUG] regra: slice [start:stop) com stop exclusivo") # debug
|
| 409 |
-
|
| 410 |
-
if T <= L:
|
| 411 |
-
start = max(0, T - L)
|
| 412 |
-
stop = start + L
|
| 413 |
-
chunk = latents[:, :, start:stop, :, :].clone().detach()
|
| 414 |
-
print(f"[DEBUG] único chunk: [:, :, {start}:{stop}, :, :] len={stop-start}") # debug
|
| 415 |
-
print("=========================================") # debug
|
| 416 |
-
return [chunk]
|
| 417 |
-
|
| 418 |
-
starts = []
|
| 419 |
-
s = 0
|
| 420 |
-
while s + L <= T:
|
| 421 |
-
starts.append(s)
|
| 422 |
-
s += stride
|
| 423 |
-
if starts[-1] + L < T:
|
| 424 |
-
starts.append(T - L)
|
| 425 |
-
|
| 426 |
-
# remove duplicata contígua
|
| 427 |
-
uniq = []
|
| 428 |
-
for st in starts:
|
| 429 |
-
if not uniq or uniq[-1] != st:
|
| 430 |
-
uniq.append(st)
|
| 431 |
-
|
| 432 |
-
chunks = []
|
| 433 |
-
for i, st in enumerate(uniq):
|
| 434 |
-
sp = st + L
|
| 435 |
-
chunk = latents[:, :, st:sp, :, :].clone().detach()
|
| 436 |
-
print(f"[DEBUG] chunk{i}: [:, :, {st}:{sp}, :, :] len={sp-st}") # debug
|
| 437 |
-
chunks.append(chunk)
|
| 438 |
-
|
| 439 |
-
print("=========================================") # debug
|
| 440 |
-
return chunks
|
| 441 |
-
|
| 442 |
-
def _get_total_frames(self, video_path: str) -> int:
|
| 443 |
-
cmd = [
|
| 444 |
-
"ffprobe",
|
| 445 |
-
"-v", "error",
|
| 446 |
-
"-select_streams", "v:0",
|
| 447 |
-
"-count_frames",
|
| 448 |
-
"-show_entries", "stream=nb_read_frames",
|
| 449 |
-
"-of", "default=nokey=1:noprint_wrappers=1",
|
| 450 |
-
video_path
|
| 451 |
-
]
|
| 452 |
-
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
| 453 |
-
return int(result.stdout.strip())
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
def _gerar_lista_com_transicoes(self, pasta: str, video_paths: list[str], crossfade_frames: int = 8) -> list[str]:
|
| 458 |
-
"""
|
| 459 |
-
Gera uma nova lista de vídeos aplicando transições suaves (blend frame a frame)
|
| 460 |
-
seguindo exatamente a lógica linear de Carlos.
|
| 461 |
-
"""
|
| 462 |
-
import os, subprocess, shutil
|
| 463 |
-
|
| 464 |
-
poda = crossfade_frames
|
| 465 |
-
total_partes = len(video_paths)
|
| 466 |
-
video_fade_fim = None
|
| 467 |
-
video_fade_ini = None
|
| 468 |
-
nova_lista = []
|
| 469 |
-
|
| 470 |
-
print("===========CONCATECAO CAUSAL=============")
|
| 471 |
-
|
| 472 |
-
print(f"[DEBUG] Iniciando pipeline com {total_partes} vídeos e {poda} frames de crossfade")
|
| 473 |
-
|
| 474 |
-
for i in range(total_partes):
|
| 475 |
-
base = video_paths[i]
|
| 476 |
-
|
| 477 |
-
# --- PODA ---
|
| 478 |
-
video_podado = os.path.join(pasta, f"{base}_podado_{i}.mp4")
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
if i<total_partes-1:
|
| 482 |
-
end_frame = self._get_total_frames(base) - poda
|
| 483 |
-
else:
|
| 484 |
-
end_frame = self._get_total_frames(base)
|
| 485 |
-
|
| 486 |
-
if i>0:
|
| 487 |
-
start_frame = poda
|
| 488 |
-
else:
|
| 489 |
-
start_frame = 0
|
| 490 |
-
|
| 491 |
-
cmd_fim = (
|
| 492 |
-
f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
|
| 493 |
-
f'-vf "trim=start_frame={start_frame}:end_frame={end_frame},setpts=PTS-STARTPTS" '
|
| 494 |
-
f'-an "{video_podado}"'
|
| 495 |
-
)
|
| 496 |
-
subprocess.run(cmd_fim, shell=True, check=True)
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
# --- FADE_INI ---
|
| 500 |
-
if i > 0:
|
| 501 |
-
video_fade_ini = os.path.join(pasta, f"{base}_fade_ini_{i}.mp4")
|
| 502 |
-
cmd_ini = (
|
| 503 |
-
f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
|
| 504 |
-
f'-vf "trim=end_frame={poda},setpts=PTS-STARTPTS" -an "{video_fade_ini}"'
|
| 505 |
-
)
|
| 506 |
-
subprocess.run(cmd_ini, shell=True, check=True)
|
| 507 |
-
|
| 508 |
-
# --- TRANSIÇÃO ---
|
| 509 |
-
if video_fade_fim and video_fade_ini:
|
| 510 |
-
video_fade = os.path.join(pasta, f"transicao_{i}_{i+1}.mp4")
|
| 511 |
-
cmd_blend = (
|
| 512 |
-
f'ffmpeg -y -hide_banner -loglevel error '
|
| 513 |
-
f'-i "{video_fade_fim}" -i "{video_fade_ini}" '
|
| 514 |
-
f'-filter_complex "[0:v][1:v]blend=all_expr=\'A*(1-T/{poda})+B*(T/{poda})\',format=yuv420p" '
|
| 515 |
-
f'-frames:v {poda} "{video_fade}"'
|
| 516 |
-
)
|
| 517 |
-
subprocess.run(cmd_blend, shell=True, check=True)
|
| 518 |
-
print(f"[DEBUG] transicao adicionada {i}/{i+1} {self._get_total_frames(video_fade)} frames ✅")
|
| 519 |
-
nova_lista.append(video_fade)
|
| 520 |
-
|
| 521 |
-
# --- FADE_FIM ---
|
| 522 |
-
if i<=total_partes-1:
|
| 523 |
-
video_fade_fim = os.path.join(pasta, f"{base}_fade_fim_{i}.mp4")
|
| 524 |
-
cmd_fim = (
|
| 525 |
-
f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
|
| 526 |
-
f'-vf "trim=start_frame={end_frame-poda},setpts=PTS-STARTPTS" -an "{video_fade_fim}"'
|
| 527 |
-
)
|
| 528 |
-
subprocess.run(cmd_fim, shell=True, check=True)
|
| 529 |
-
|
| 530 |
-
nova_lista.append(video_podado)
|
| 531 |
-
print(f"[DEBUG] Video podado {i+1} adicionado {self._get_total_frames(video_podado)} frames ✅")
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
print("===========CONCATECAO CAUSAL=============")
|
| 536 |
-
print(f"[DEBUG] {nova_lista}")
|
| 537 |
-
return nova_lista
|
| 538 |
-
|
| 539 |
-
def _concat_mp4s_no_reencode(self, mp4_list: List[str], out_path: str):
|
| 540 |
-
"""
|
| 541 |
-
Concatena múltiplos MP4s sem reencode usando o demuxer do ffmpeg.
|
| 542 |
-
ATENÇÃO: todos os arquivos precisam ter mesmo codec, fps, resolução etc.
|
| 543 |
-
"""
|
| 544 |
-
if not mp4_list or len(mp4_list) < 2:
|
| 545 |
-
raise ValueError("Forneça pelo menos dois arquivos MP4 para concatenar.")
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
# Cria lista temporária para o ffmpeg
|
| 549 |
-
with tempfile.NamedTemporaryFile("w", delete=False, suffix=".txt") as f:
|
| 550 |
-
for mp4 in mp4_list:
|
| 551 |
-
f.write(f"file '{os.path.abspath(mp4)}'\n")
|
| 552 |
-
list_path = f.name
|
| 553 |
-
|
| 554 |
-
cmd = f"ffmpeg -y -f concat -safe 0 -i {list_path} -c copy {out_path}"
|
| 555 |
-
print(f"[DEBUG] Concat: {cmd}")
|
| 556 |
-
|
| 557 |
-
try:
|
| 558 |
-
subprocess.check_call(shlex.split(cmd))
|
| 559 |
-
finally:
|
| 560 |
-
try:
|
| 561 |
-
os.remove(list_path)
|
| 562 |
-
except Exception:
|
| 563 |
-
pass
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
def generate(
|
| 567 |
-
self,
|
| 568 |
-
prompt,
|
| 569 |
-
negative_prompt,
|
| 570 |
-
mode="text-to-video",
|
| 571 |
-
start_image_filepath=None,
|
| 572 |
-
middle_image_filepath=None,
|
| 573 |
-
middle_frame_number=None,
|
| 574 |
-
middle_image_weight=1.0,
|
| 575 |
-
end_image_filepath=None,
|
| 576 |
-
end_image_weight=1.0,
|
| 577 |
-
input_video_filepath=None,
|
| 578 |
-
height=512,
|
| 579 |
-
width=704,
|
| 580 |
-
duration=2.0,
|
| 581 |
-
frames_to_use=9,
|
| 582 |
-
seed=42,
|
| 583 |
-
randomize_seed=True,
|
| 584 |
-
guidance_scale=3.0,
|
| 585 |
-
improve_texture=True,
|
| 586 |
-
progress_callback=None,
|
| 587 |
-
# Sempre latent → VAE → MP4 (simples)
|
| 588 |
-
external_decode=True,
|
| 589 |
-
):
|
| 590 |
-
t_all = time.perf_counter()
|
| 591 |
-
print(f"[DEBUG] generate() begin mode={mode} external_decode={external_decode} improve_texture={improve_texture}")
|
| 592 |
-
if self.device == "cuda":
|
| 593 |
-
torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
|
| 594 |
-
self._log_gpu_memory("Início da Geração")
|
| 595 |
-
|
| 596 |
-
if mode == "image-to-video" and not start_image_filepath:
|
| 597 |
-
raise ValueError("A imagem de início é obrigatória para o modo image-to-video")
|
| 598 |
-
if mode == "video-to-video" and not input_video_filepath:
|
| 599 |
-
raise ValueError("O vídeo de entrada é obrigatório para o modo video-to-video")
|
| 600 |
-
|
| 601 |
-
used_seed = random.randint(0, 2**32 - 1) if randomize_seed else int(seed)
|
| 602 |
-
seed_everething(used_seed); print(f"[DEBUG] Seed usado: {used_seed}")
|
| 603 |
-
|
| 604 |
-
FPS = 24.0; MAX_NUM_FRAMES = 2570
|
| 605 |
-
target_frames_rounded = round(duration * FPS)
|
| 606 |
-
n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
|
| 607 |
-
actual_num_frames = max(9, min(MAX_NUM_FRAMES, int(n_val * 8 + 1)))
|
| 608 |
-
print(f"[DEBUG] Frames alvo: {actual_num_frames} (dur={duration}s @ {FPS}fps)")
|
| 609 |
-
|
| 610 |
-
height_padded = ((height - 1) // 32 + 1) * 32
|
| 611 |
-
width_padded = ((width - 1) // 32 + 1) * 32
|
| 612 |
-
padding_values = calculate_padding(height, width, height_padded, width_padded)
|
| 613 |
-
print(f"[DEBUG] Dimensões: ({height},{width}) -> pad ({height_padded},{width_padded}); padding={padding_values}")
|
| 614 |
-
|
| 615 |
-
generator = torch.Generator(device=self.device).manual_seed(used_seed)
|
| 616 |
-
conditioning_items = []
|
| 617 |
-
|
| 618 |
-
if mode == "image-to-video":
|
| 619 |
-
start_tensor = self._prepare_conditioning_tensor(start_image_filepath, height, width, padding_values)
|
| 620 |
-
conditioning_items.append(ConditioningItem(start_tensor, 0, 1.0))
|
| 621 |
-
if middle_image_filepath and middle_frame_number is not None:
|
| 622 |
-
middle_tensor = self._prepare_conditioning_tensor(middle_image_filepath, height, width, padding_values)
|
| 623 |
-
safe_middle_frame = max(0, min(int(middle_frame_number), actual_num_frames - 1))
|
| 624 |
-
conditioning_items.append(ConditioningItem(middle_tensor, safe_middle_frame, float(middle_image_weight)))
|
| 625 |
-
if end_image_filepath:
|
| 626 |
-
end_tensor = self._prepare_conditioning_tensor(end_image_filepath, height, width, padding_values)
|
| 627 |
-
last_frame_index = actual_num_frames - 1
|
| 628 |
-
conditioning_items.append(ConditioningItem(end_tensor, last_frame_index, float(end_image_weight)))
|
| 629 |
-
print(f"[DEBUG] Conditioning items: {len(conditioning_items)}")
|
| 630 |
-
|
| 631 |
-
# Sempre pedimos latentes (simples)
|
| 632 |
-
call_kwargs = {
|
| 633 |
-
"prompt": prompt,
|
| 634 |
-
"negative_prompt": negative_prompt,
|
| 635 |
-
"height": height_padded,
|
| 636 |
-
"width": width_padded,
|
| 637 |
-
"num_frames": actual_num_frames,
|
| 638 |
-
"frame_rate": int(FPS),
|
| 639 |
-
"generator": generator,
|
| 640 |
-
"output_type": "latent",
|
| 641 |
-
"conditioning_items": conditioning_items if conditioning_items else None,
|
| 642 |
-
"media_items": None,
|
| 643 |
-
"decode_timestep": self.config["decode_timestep"],
|
| 644 |
-
"decode_noise_scale": self.config["decode_noise_scale"],
|
| 645 |
-
"stochastic_sampling": self.config["stochastic_sampling"],
|
| 646 |
-
"image_cond_noise_scale": 0.01,
|
| 647 |
-
"is_video": True,
|
| 648 |
-
"vae_per_channel_normalize": True,
|
| 649 |
-
"mixed_precision": (self.config["precision"] == "mixed_precision"),
|
| 650 |
-
"offload_to_cpu": False,
|
| 651 |
-
"enhance_prompt": False,
|
| 652 |
-
"skip_layer_strategy": SkipLayerStrategy.AttentionValues,
|
| 653 |
-
}
|
| 654 |
-
print(f"[DEBUG] output_type={call_kwargs['output_type']} skip_layer_strategy={call_kwargs['skip_layer_strategy']}")
|
| 655 |
-
|
| 656 |
-
if mode == "video-to-video":
|
| 657 |
-
media = load_media_file(
|
| 658 |
-
media_path=input_video_filepath,
|
| 659 |
-
height=height,
|
| 660 |
-
width=width,
|
| 661 |
-
max_frames=int(frames_to_use),
|
| 662 |
-
padding=padding_values,
|
| 663 |
-
).to(self.device)
|
| 664 |
-
call_kwargs["media_items"] = media
|
| 665 |
-
print(f"[DEBUG] media_items shape={tuple(media.shape)}")
|
| 666 |
-
|
| 667 |
-
latents = None
|
| 668 |
-
multi_scale_pipeline = None
|
| 669 |
-
|
| 670 |
-
try:
|
| 671 |
-
if improve_texture:
|
| 672 |
-
if not self.latent_upsampler:
|
| 673 |
-
raise ValueError("Upscaler espacial não carregado.")
|
| 674 |
-
print("[DEBUG] Multi-escala: construindo pipeline...")
|
| 675 |
-
multi_scale_pipeline = LTXMultiScalePipeline(self.pipeline, self.latent_upsampler)
|
| 676 |
-
first_pass_args = self.config.get("first_pass", {}).copy()
|
| 677 |
-
first_pass_args["guidance_scale"] = float(guidance_scale)
|
| 678 |
-
second_pass_args = self.config.get("second_pass", {}).copy()
|
| 679 |
-
second_pass_args["guidance_scale"] = float(guidance_scale)
|
| 680 |
-
|
| 681 |
-
multi_scale_call_kwargs = call_kwargs.copy()
|
| 682 |
-
multi_scale_call_kwargs.update(
|
| 683 |
-
{
|
| 684 |
-
"downscale_factor": self.config["downscale_factor"],
|
| 685 |
-
"first_pass": first_pass_args,
|
| 686 |
-
"second_pass": second_pass_args,
|
| 687 |
-
}
|
| 688 |
-
)
|
| 689 |
-
print("[DEBUG] Chamando multi_scale_pipeline...")
|
| 690 |
-
t_ms = time.perf_counter()
|
| 691 |
-
ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
|
| 692 |
-
with ctx:
|
| 693 |
-
result = multi_scale_pipeline(**multi_scale_call_kwargs)
|
| 694 |
-
print(f"[DEBUG] multi_scale_pipeline tempo={time.perf_counter()-t_ms:.3f}s")
|
| 695 |
-
|
| 696 |
-
if hasattr(result, "latents"):
|
| 697 |
-
latents = result.latents
|
| 698 |
-
elif hasattr(result, "images") and isinstance(result.images, torch.Tensor):
|
| 699 |
-
latents = result.images
|
| 700 |
-
else:
|
| 701 |
-
latents = result
|
| 702 |
-
print(f"[DEBUG] Latentes (multi-escala): shape={tuple(latents.shape)}")
|
| 703 |
-
else:
|
| 704 |
-
single_pass_kwargs = call_kwargs.copy()
|
| 705 |
-
first_pass_config = self.config.get("first_pass", {})
|
| 706 |
-
single_pass_kwargs.update(
|
| 707 |
-
{
|
| 708 |
-
"guidance_scale": float(guidance_scale),
|
| 709 |
-
"stg_scale": first_pass_config.get("stg_scale"),
|
| 710 |
-
"rescaling_scale": first_pass_config.get("rescaling_scale"),
|
| 711 |
-
"skip_block_list": first_pass_config.get("skip_block_list"),
|
| 712 |
-
}
|
| 713 |
-
)
|
| 714 |
-
schedule = first_pass_config.get("timesteps") or first_pass_config.get("guidance_timesteps")
|
| 715 |
-
if mode == "video-to-video":
|
| 716 |
-
schedule = [0.7]; print("[INFO] Modo video-to-video (etapa única): timesteps=[0.7]")
|
| 717 |
-
if isinstance(schedule, (list, tuple)) and len(schedule) > 0:
|
| 718 |
-
single_pass_kwargs["timesteps"] = schedule
|
| 719 |
-
single_pass_kwargs["guidance_timesteps"] = schedule
|
| 720 |
-
print(f"[DEBUG] Single-pass: timesteps_len={len(schedule) if schedule else 0}")
|
| 721 |
-
|
| 722 |
-
print("\n[INFO] Executando pipeline de etapa única...")
|
| 723 |
-
t_sp = time.perf_counter()
|
| 724 |
-
ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
|
| 725 |
-
with ctx:
|
| 726 |
-
result = self.pipeline(**single_pass_kwargs)
|
| 727 |
-
print(f"[DEBUG] single-pass tempo={time.perf_counter()-t_sp:.3f}s")
|
| 728 |
-
|
| 729 |
-
if hasattr(result, "latents"):
|
| 730 |
-
latents = result.latents
|
| 731 |
-
elif hasattr(result, "images") and isinstance(result.images, torch.Tensor):
|
| 732 |
-
latents = result.images
|
| 733 |
-
else:
|
| 734 |
-
latents = result
|
| 735 |
-
print(f"[DEBUG] Latentes (single-pass): shape={tuple(latents.shape)}")
|
| 736 |
-
|
| 737 |
-
# Staging e escrita MP4 (simples: VAE → pixels → MP4)
|
| 738 |
-
|
| 739 |
-
latents_cpu = latents.detach().to("cpu", non_blocking=True)
|
| 740 |
-
torch.cuda.empty_cache()
|
| 741 |
-
try:
|
| 742 |
-
torch.cuda.ipc_collect()
|
| 743 |
-
except Exception:
|
| 744 |
-
pass
|
| 745 |
-
|
| 746 |
-
latents_parts = self._dividir_latentes(latents_cpu,4,1)
|
| 747 |
-
|
| 748 |
-
temp_dir = tempfile.mkdtemp(prefix="ltxv_"); self._register_tmp_dir(temp_dir)
|
| 749 |
-
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
|
| 750 |
-
|
| 751 |
-
partes_mp4 = []
|
| 752 |
-
par = 0
|
| 753 |
-
|
| 754 |
-
for latents in latents_parts:
|
| 755 |
-
print(f"[DEBUG] Partição {par}: {tuple(latents.shape)}")
|
| 756 |
-
|
| 757 |
-
par = par + 1
|
| 758 |
-
output_video_path = os.path.join(temp_dir, f"output_{used_seed}_{par}.mp4")
|
| 759 |
-
final_output_path = None
|
| 760 |
-
|
| 761 |
-
print("[DEBUG] Decodificando bloco de latentes com VAE → tensor de pixels...")
|
| 762 |
-
# Usar manager com timestep por item; previne target_shape e rota NoneType.decode
|
| 763 |
-
pixel_tensor = vae_manager_singleton.decode(
|
| 764 |
-
latents.to(self.device, non_blocking=True),
|
| 765 |
-
decode_timestep=float(self.config.get("decode_timestep", 0.05))
|
| 766 |
-
)
|
| 767 |
-
log_tensor_info(pixel_tensor, "Pixel tensor (VAE saída)")
|
| 768 |
-
|
| 769 |
-
print("[DEBUG] Codificando MP4 a partir do tensor de pixels (bloco inteiro)...")
|
| 770 |
-
video_encode_tool_singleton.save_video_from_tensor(
|
| 771 |
-
pixel_tensor,
|
| 772 |
-
output_video_path,
|
| 773 |
-
fps=call_kwargs["frame_rate"],
|
| 774 |
-
progress_callback=progress_callback
|
| 775 |
-
)
|
| 776 |
-
|
| 777 |
-
candidate = os.path.join(results_dir, f"output_par_{par}.mp4")
|
| 778 |
-
try:
|
| 779 |
-
shutil.move(output_video_path, candidate)
|
| 780 |
-
final_output_path = candidate
|
| 781 |
-
print(f"[DEBUG] MP4 parte {par} movido para {final_output_path}")
|
| 782 |
-
partes_mp4.append(final_output_path)
|
| 783 |
-
|
| 784 |
-
except Exception as e:
|
| 785 |
-
final_output_path = output_video_path
|
| 786 |
-
print(f"[DEBUG] Falha no move; usando tmp como final: {e}")
|
| 787 |
-
|
| 788 |
-
total_partes = len(partes_mp4)
|
| 789 |
-
if (total_partes>1):
|
| 790 |
-
final_vid = os.path.join(results_dir, f"concat_fim_{used_seed}.mp4")
|
| 791 |
-
partes_mp4_fade = self._gerar_lista_com_transicoes(pasta=results_dir, video_paths=partes_mp4, crossfade_frames=8)
|
| 792 |
-
self._concat_mp4s_no_reencode(partes_mp4_fade, final_vid)
|
| 793 |
-
else:
|
| 794 |
-
final_vid = partes_mp4[0]
|
| 795 |
-
|
| 796 |
-
|
| 797 |
-
self._log_gpu_memory("Fim da Geração")
|
| 798 |
-
return final_vid, used_seed
|
| 799 |
-
|
| 800 |
-
|
| 801 |
-
except Exception as e:
|
| 802 |
-
print("[DEBUG] EXCEÇÃO NA GERAÇÃO:")
|
| 803 |
-
print("".join(traceback.format_exception(type(e), e, e.__traceback__)))
|
| 804 |
-
raise
|
| 805 |
-
finally:
|
| 806 |
-
try:
|
| 807 |
-
del latents
|
| 808 |
-
except Exception:
|
| 809 |
-
pass
|
| 810 |
-
try:
|
| 811 |
-
del multi_scale_pipeline
|
| 812 |
-
except Exception:
|
| 813 |
-
pass
|
| 814 |
-
|
| 815 |
-
gc.collect()
|
| 816 |
-
try:
|
| 817 |
-
if self.device == "cuda":
|
| 818 |
-
torch.cuda.empty_cache()
|
| 819 |
-
try:
|
| 820 |
-
torch.cuda.ipc_collect()
|
| 821 |
-
except Exception:
|
| 822 |
-
pass
|
| 823 |
-
except Exception as e:
|
| 824 |
-
print(f"[DEBUG] Limpeza GPU no finally falhou: {e}")
|
| 825 |
-
|
| 826 |
-
try:
|
| 827 |
-
self.finalize(keep_paths=[])
|
| 828 |
-
except Exception as e:
|
| 829 |
-
print(f"[DEBUG] finalize() no finally falhou: {e}")
|
| 830 |
-
|
| 831 |
-
print("Criando instância do VideoService. O carregamento do modelo começará agora...")
|
| 832 |
-
video_generation_service = VideoService()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|