Spaces:
Paused
Paused
Create ltx_pool_manager
#1
by
eeuuia
- opened
- Dockerfile +75 -65
- LTX-Video/ltx_video/pipelines/pipeline_ltx_video.py +12 -15
- api/gpu_manager.py +97 -28
- api/ltx/ltx_utils.py +207 -0
- api/ltx_pool_manager +208 -0
- api/ltx_server_refactored.py +0 -367
- api/ltx_server_refactored_complete.py +296 -0
- api/seedvr_server.py +144 -194
- api/utils/debug_utils.py +70 -0
- api/vae_server.py +162 -0
- api/vince_pool_manager.py +214 -0
- app.py +212 -161
- compose.yaml +31 -18
- entrypoint.sh +10 -15
- managers/vae_manager.py +61 -55
- setup.py +111 -120
- start.sh +3 -38
Dockerfile
CHANGED
|
@@ -1,126 +1,136 @@
|
|
| 1 |
# =============================================================================
|
| 2 |
-
# ADUC-SDR Video Suite —
|
|
|
|
| 3 |
# CUDA 12.8 | PyTorch 2.8.0+cu128 | Ubuntu 22.04
|
| 4 |
# =============================================================================
|
| 5 |
FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
|
| 6 |
|
| 7 |
-
LABEL maintainer="Carlos Rodrigues dos Santos
|
| 8 |
-
LABEL description="High-performance Diffusers stack with
|
| 9 |
-
LABEL version="
|
| 10 |
LABEL cuda_version="12.8.0"
|
| 11 |
LABEL python_version="3.10"
|
| 12 |
LABEL pytorch_version="2.8.0+cu128"
|
| 13 |
LABEL gpu_optimized_for="8x_NVIDIA_L40S"
|
| 14 |
|
| 15 |
-
#
|
|
|
|
|
|
|
| 16 |
ENV DEBIAN_FRONTEND=noninteractive TZ=UTC LANG=C.UTF-8 LC_ALL=C.UTF-8 \
|
| 17 |
PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1 \
|
| 18 |
-
PIP_NO_CACHE_DIR=
|
| 19 |
|
| 20 |
-
# GPU
|
| 21 |
-
ENV NVIDIA_VISIBLE_DEVICES=all
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
|
| 26 |
-
# Threads
|
| 27 |
ENV OMP_NUM_THREADS=8 MKL_NUM_THREADS=8 MAX_JOBS=160
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,garbage_collection_threshold:0.8
|
| 31 |
-
|
| 32 |
|
| 33 |
-
#
|
| 34 |
-
ENV APP_HOME=/app
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
ENV HF_HOME=/data/.cache/huggingface
|
| 39 |
-
ENV TORCH_HOME=/data/.cache/torch
|
| 40 |
-
ENV HF_DATASETS_CACHE=/data/.cache/datasets
|
| 41 |
-
ENV TRANSFORMERS_CACHE=/data/.cache/transformers
|
| 42 |
-
ENV DIFFUSERS_CACHE=/data/.cache/diffusers
|
| 43 |
-
ENV HF_HUB_ENABLE_HF_TRANSFER=1
|
| 44 |
-
ENV TOKENIZERS_PARALLELISM=false
|
| 45 |
|
| 46 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
RUN useradd -m -u 1000 -s /bin/bash appuser && \
|
| 48 |
-
mkdir -p /data /
|
| 49 |
-
/data/.cache/huggingface /data/.cache/torch \
|
| 50 |
-
/data/.cache/datasets /data/.cache/transformers /data/.cache/diffusers && \
|
| 51 |
-
chown -R appuser:appuser /data
|
| 52 |
-
|
| 53 |
-
# Models live in /data/models and are visible at /app/models
|
| 54 |
-
ENV MODELS_DIR=/data/models
|
| 55 |
-
RUN ln -sf /data/models /app/models
|
| 56 |
|
| 57 |
-
#
|
| 58 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 59 |
build-essential gosu tree cmake git git-lfs curl wget ffmpeg ninja-build \
|
| 60 |
python3.10 python3.10-dev python3.10-distutils python3-pip \
|
| 61 |
ca-certificates libglib2.0-0 libgl1 \
|
| 62 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
| 63 |
|
| 64 |
-
RUN ln -sf /usr/bin/python3.10 /usr/bin/
|
| 65 |
-
ln -sf /usr/bin/python3.10 /usr/bin/python && \
|
| 66 |
python3 -m pip install --upgrade pip
|
| 67 |
|
| 68 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
RUN pip install --index-url https://download.pytorch.org/whl/cu128 \
|
| 70 |
torch>=2.8.0+cu128 torchvision>=0.23.0+cu128 torchaudio>=2.8.0+cu128
|
| 71 |
|
| 72 |
-
#
|
| 73 |
RUN pip install packaging ninja cmake pybind11 scikit-build cython hf_transfer "numpy>=1.24.4"
|
| 74 |
|
| 75 |
-
# Triton 3.x
|
| 76 |
RUN pip uninstall -y triton || true && \
|
| 77 |
pip install -v --no-build-isolation triton==3.4.0
|
| 78 |
|
| 79 |
-
|
| 80 |
-
# FlashAttention 2.8.x
|
| 81 |
RUN pip install flash-attn==2.8.3 --no-build-isolation || \
|
| 82 |
pip install flash-attn==2.8.2 --no-build-isolation || \
|
| 83 |
pip install flash-attn==2.8.1 --no-build-isolation || \
|
| 84 |
pip install flash-attn==2.8.0.post2 --no-build-isolation
|
| 85 |
|
| 86 |
-
#
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
| 88 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 89 |
|
| 90 |
-
#
|
| 91 |
RUN pip install --upgrade bitsandbytes
|
| 92 |
|
| 93 |
-
#
|
| 94 |
RUN echo "Installing custom wheels..." && \
|
| 95 |
pip install --no-cache-dir \
|
| 96 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl" \
|
| 97 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/dropout_layer_norm-0.1-cp310-cp310-linux_x86_64.whl"
|
| 98 |
|
| 99 |
-
#
|
| 100 |
-
# Optional: q8_kernels + LTX-Video (enable if needed; ensure wheel ABI)
|
| 101 |
RUN pip install --no-cache-dir \
|
| 102 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/q8_kernels-0.0.5-cp310-cp310-linux_x86_64.whl"
|
| 103 |
-
# RUN git clone https://github.com/Lightricks/LTX-Video.git /data/LTX-Video && \
|
| 104 |
-
# cd /data/LTX-Video && python -m pip install -e .[inference]
|
| 105 |
-
# ====================================================================
|
| 106 |
|
| 107 |
-
#
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
COPY entrypoint.sh ./app/entrypoint.sh
|
| 112 |
|
| 113 |
-
#
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
#
|
| 117 |
-
|
| 118 |
-
|
|
|
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
VOLUME /data
|
| 121 |
|
| 122 |
-
|
| 123 |
USER appuser
|
| 124 |
|
| 125 |
-
#
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# =============================================================================
|
| 2 |
+
# ADUC-SDR Video Suite — Dockerfile Otimizado
|
| 3 |
+
# Preserva a estrutura de instalação original para alta performance.
|
| 4 |
# CUDA 12.8 | PyTorch 2.8.0+cu128 | Ubuntu 22.04
|
| 5 |
# =============================================================================
|
| 6 |
FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
|
| 7 |
|
| 8 |
+
LABEL maintainer="Carlos Rodrigues dos Santos"
|
| 9 |
+
LABEL description="ADUC-SDR: High-performance Diffusers stack for 8x NVIDIA L40S with LTX-Video and SeedVR"
|
| 10 |
+
LABEL version="5.0.0"
|
| 11 |
LABEL cuda_version="12.8.0"
|
| 12 |
LABEL python_version="3.10"
|
| 13 |
LABEL pytorch_version="2.8.0+cu128"
|
| 14 |
LABEL gpu_optimized_for="8x_NVIDIA_L40S"
|
| 15 |
|
| 16 |
+
# =============================================================================
|
| 17 |
+
# 1. Variáveis de Ambiente e Configuração de Paths
|
| 18 |
+
# =============================================================================
|
| 19 |
ENV DEBIAN_FRONTEND=noninteractive TZ=UTC LANG=C.UTF-8 LC_ALL=C.UTF-8 \
|
| 20 |
PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1 \
|
| 21 |
+
PIP_NO_CACHE_DIR=0 PIP_DISABLE_PIP_VERSION_CHECK=1
|
| 22 |
|
| 23 |
+
# --- Configurações de GPU e Computação ---
|
| 24 |
+
ENV NVIDIA_VISIBLE_DEVICES=all \
|
| 25 |
+
TORCH_CUDA_ARCH_LIST="8.9" \
|
| 26 |
+
CUDA_DEVICE_ORDER=PCI_BUS_ID \
|
| 27 |
+
CUDA_DEVICE_MAX_CONNECTIONS=32
|
| 28 |
|
| 29 |
+
# --- Configurações de Threads ---
|
| 30 |
ENV OMP_NUM_THREADS=8 MKL_NUM_THREADS=8 MAX_JOBS=160
|
| 31 |
|
| 32 |
+
# --- Configurações de Alocador de Memória e Caches de GPU ---
|
| 33 |
+
ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,garbage_collection_threshold:0.8 \
|
| 34 |
+
CUDA_LAUNCH_BLOCKING=0 CUDA_CACHE_MAXSIZE=2147483648 CUDA_CACHE_DISABLE=0
|
| 35 |
|
| 36 |
+
# --- Paths da Aplicação e Dados Persistentes ---
|
| 37 |
+
ENV APP_HOME=/app \
|
| 38 |
+
HF_HOME=/data/.cache/huggingface \
|
| 39 |
+
TORCH_HOME=/data/.cache/torch \
|
| 40 |
+
HF_DATASETS_CACHE=/data/.cache/datasets \
|
| 41 |
+
TRANSFORMERS_CACHE=/data/.cache/transformers \
|
| 42 |
+
DIFFUSERS_CACHE=/data/.cache/diffusers \
|
| 43 |
+
HF_HUB_ENABLE_HF_TRANSFER=1 \
|
| 44 |
+
TOKENIZERS_PARALLELISM=false
|
| 45 |
|
| 46 |
+
WORKDIR $APP_HOME
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
# =============================================================================
|
| 49 |
+
# 2. Setup de Usuário e Sistema
|
| 50 |
+
# =============================================================================
|
| 51 |
+
# Cria usuário não-root e diretórios de dados/app.
|
| 52 |
+
# As permissões finais serão aplicadas no final.
|
| 53 |
RUN useradd -m -u 1000 -s /bin/bash appuser && \
|
| 54 |
+
mkdir -p /data $APP_HOME /app/output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
# --- Instalação de Pacotes de Sistema e Python ---
|
| 57 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 58 |
build-essential gosu tree cmake git git-lfs curl wget ffmpeg ninja-build \
|
| 59 |
python3.10 python3.10-dev python3.10-distutils python3-pip \
|
| 60 |
ca-certificates libglib2.0-0 libgl1 \
|
| 61 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
| 62 |
|
| 63 |
+
RUN ln -sf /usr/bin/python3.10 /usr/bin/python && \
|
|
|
|
| 64 |
python3 -m pip install --upgrade pip
|
| 65 |
|
| 66 |
+
# =============================================================================
|
| 67 |
+
# 3. Instalação da Toolchain de Machine Learning (Mantida 100% Original)
|
| 68 |
+
# =============================================================================
|
| 69 |
+
|
| 70 |
+
# --- PyTorch para CUDA 12.8 ---
|
| 71 |
RUN pip install --index-url https://download.pytorch.org/whl/cu128 \
|
| 72 |
torch>=2.8.0+cu128 torchvision>=0.23.0+cu128 torchaudio>=2.8.0+cu128
|
| 73 |
|
| 74 |
+
# --- Ferramentas de Compilação, Triton e FlashAttention ---
|
| 75 |
RUN pip install packaging ninja cmake pybind11 scikit-build cython hf_transfer "numpy>=1.24.4"
|
| 76 |
|
| 77 |
+
# --- Triton 3.x ---
|
| 78 |
RUN pip uninstall -y triton || true && \
|
| 79 |
pip install -v --no-build-isolation triton==3.4.0
|
| 80 |
|
| 81 |
+
# --- FlashAttention 2.8.x ---
|
|
|
|
| 82 |
RUN pip install flash-attn==2.8.3 --no-build-isolation || \
|
| 83 |
pip install flash-attn==2.8.2 --no-build-isolation || \
|
| 84 |
pip install flash-attn==2.8.1 --no-build-isolation || \
|
| 85 |
pip install flash-attn==2.8.0.post2 --no-build-isolation
|
| 86 |
|
| 87 |
+
# =============================================================================
|
| 88 |
+
# 4. Instalação das Dependências da Aplicação
|
| 89 |
+
# =============================================================================
|
| 90 |
+
# Copia e instala requirements.txt primeiro para otimizar o cache de camadas do Docker.
|
| 91 |
+
COPY --chown=appuser:appuser requirements.txt ./requirements.txt
|
| 92 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 93 |
|
| 94 |
+
# --- Instalação de bitsandbytes e Wheels Customizados (Mantido 100% Original) ---
|
| 95 |
RUN pip install --upgrade bitsandbytes
|
| 96 |
|
| 97 |
+
# Instala wheels customizados (Apex, etc.)
|
| 98 |
RUN echo "Installing custom wheels..." && \
|
| 99 |
pip install --no-cache-dir \
|
| 100 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl" \
|
| 101 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/dropout_layer_norm-0.1-cp310-cp310-linux_x86_64.whl"
|
| 102 |
|
| 103 |
+
# Instala q8_kernels
|
|
|
|
| 104 |
RUN pip install --no-cache-dir \
|
| 105 |
"https://huggingface.co/euIaxs22/Aduc-sdr/resolve/main/q8_kernels-0.0.5-cp310-cp310-linux_x86_64.whl"
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
+
# NOTA: A clonagem do LTX-Video foi removida daqui.
|
| 108 |
+
# Esta tarefa agora é gerenciada pelo entrypoint para garantir a persistência dos dados.
|
| 109 |
+
# # RUN git clone https://github.com/Lightricks/LTX-Video.git /data/LTX-Video && \
|
| 110 |
+
# # cd /data/LTX-Video && python -m pip install -e .[inference]
|
|
|
|
| 111 |
|
| 112 |
+
# =============================================================================
|
| 113 |
+
# 5. Cópia do Código-Fonte e Configuração Final
|
| 114 |
+
# =============================================================================
|
| 115 |
+
# Copia o restante do código-fonte da aplicação por último.
|
| 116 |
+
COPY --chown=appuser:appuser . .
|
| 117 |
|
| 118 |
+
# Garante que todos os scripts de inicialização sejam executáveis
|
| 119 |
+
# e que o usuário 'appuser' seja o dono de todos os arquivos.
|
| 120 |
+
RUN chown -R appuser:appuser $APP_HOME /data && \
|
| 121 |
+
chmod +x /app/entrypoint.sh /app/start.sh /app/info.sh /app/builder.sh
|
| 122 |
|
| 123 |
+
# =============================================================================
|
| 124 |
+
# 6. Ponto de Entrada
|
| 125 |
+
# =============================================================================
|
| 126 |
+
# Expõe o diretório /data para ser montado como um volume persistente.
|
| 127 |
VOLUME /data
|
| 128 |
|
| 129 |
+
# Define o usuário padrão para a execução do contêiner.
|
| 130 |
USER appuser
|
| 131 |
|
| 132 |
+
# Define o script que será executado na inicialização do contêiner.
|
| 133 |
+
ENTRYPOINT ["/app/entrypoint.sh"]
|
| 134 |
+
|
| 135 |
+
# Define o comando padrão a ser executado pelo entrypoint.
|
| 136 |
+
CMD ["/app/start.sh"]
|
LTX-Video/ltx_video/pipelines/pipeline_ltx_video.py
CHANGED
|
@@ -107,32 +107,31 @@ class SpyLatent:
|
|
| 107 |
necessária se o tensor de entrada for 3D.
|
| 108 |
save_visual (bool): Se True, decodifica com o VAE e salva uma imagem.
|
| 109 |
"""
|
| 110 |
-
print(f"\n--- [INSPEÇÃO DE LATENTE: {tag}] ---")
|
| 111 |
-
if not isinstance(tensor, torch.Tensor):
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
|
| 116 |
try:
|
| 117 |
# --- Imprime Estatísticas do Tensor Original ---
|
| 118 |
-
self._print_stats("Tensor Original", tensor)
|
| 119 |
|
| 120 |
# --- Converte para 5D se necessário ---
|
| 121 |
tensor_5d = self._to_5d(tensor, reference_shape_5d)
|
| 122 |
if tensor_5d is not None and tensor.ndim == 3:
|
| 123 |
self._print_stats("Convertido para 5D", tensor_5d)
|
| 124 |
|
| 125 |
-
save_visual = False
|
| 126 |
# --- Visualização com VAE ---
|
| 127 |
if save_visual and self.vae is not None and tensor_5d is not None:
|
| 128 |
os.makedirs(self.output_dir, exist_ok=True)
|
| 129 |
-
print(f" VISUALIZAÇÃO (VAE): Salvando imagem em {self.output_dir}...")
|
| 130 |
|
| 131 |
frame_idx_to_viz = min(1, tensor_5d.shape[2] - 1)
|
| 132 |
if frame_idx_to_viz < 0:
|
| 133 |
print(" VISUALIZAÇÃO (VAE): Tensor não tem frames para visualizar.")
|
| 134 |
else:
|
| 135 |
-
print(f" VISUALIZAÇÃO (VAE): Usando frame de índice {frame_idx_to_viz}.")
|
| 136 |
latent_slice = tensor_5d[:, :, frame_idx_to_viz:frame_idx_to_viz+1, :, :]
|
| 137 |
|
| 138 |
with torch.no_grad(), torch.autocast(device_type=self.device.type):
|
|
@@ -142,11 +141,9 @@ class SpyLatent:
|
|
| 142 |
print(" VISUALIZAÇÃO (VAE): Imagem salva.")
|
| 143 |
|
| 144 |
except Exception as e:
|
| 145 |
-
print(f" ERRO na inspeção: {e}")
|
| 146 |
traceback.print_exc()
|
| 147 |
-
|
| 148 |
-
print("--- [FIM DA INSPEÇÃO] ---\n")
|
| 149 |
-
|
| 150 |
def _to_5d(self, tensor: torch.Tensor, shape_5d: tuple) -> torch.Tensor:
|
| 151 |
"""Converte um tensor 3D patchificado de volta para 5D."""
|
| 152 |
if tensor.ndim == 5:
|
|
@@ -156,7 +153,7 @@ class SpyLatent:
|
|
| 156 |
b, c, f, h, w = shape_5d
|
| 157 |
return rearrange(tensor, "b (f h w) c -> b c f h w", c=c, f=f, h=h, w=w)
|
| 158 |
except Exception as e:
|
| 159 |
-
print(f" AVISO: Erro ao rearranjar tensor 3D para 5D: {e}. A visualização pode falhar.")
|
| 160 |
return None
|
| 161 |
return None
|
| 162 |
|
|
@@ -166,7 +163,7 @@ class SpyLatent:
|
|
| 166 |
std = tensor.std().item()
|
| 167 |
min_val = tensor.min().item()
|
| 168 |
max_val = tensor.max().item()
|
| 169 |
-
print(f" {prefix}:
|
| 170 |
|
| 171 |
|
| 172 |
|
|
|
|
| 107 |
necessária se o tensor de entrada for 3D.
|
| 108 |
save_visual (bool): Se True, decodifica com o VAE e salva uma imagem.
|
| 109 |
"""
|
| 110 |
+
#print(f"\n--- [INSPEÇÃO DE LATENTE: {tag}] ---")
|
| 111 |
+
#if not isinstance(tensor, torch.Tensor):
|
| 112 |
+
# print(f" AVISO: O objeto fornecido para '{tag}' não é um tensor.")
|
| 113 |
+
# print("--- [FIM DA INSPEÇÃO] ---\n")
|
| 114 |
+
# return
|
| 115 |
|
| 116 |
try:
|
| 117 |
# --- Imprime Estatísticas do Tensor Original ---
|
| 118 |
+
#self._print_stats("Tensor Original", tensor)
|
| 119 |
|
| 120 |
# --- Converte para 5D se necessário ---
|
| 121 |
tensor_5d = self._to_5d(tensor, reference_shape_5d)
|
| 122 |
if tensor_5d is not None and tensor.ndim == 3:
|
| 123 |
self._print_stats("Convertido para 5D", tensor_5d)
|
| 124 |
|
|
|
|
| 125 |
# --- Visualização com VAE ---
|
| 126 |
if save_visual and self.vae is not None and tensor_5d is not None:
|
| 127 |
os.makedirs(self.output_dir, exist_ok=True)
|
| 128 |
+
#print(f" VISUALIZAÇÃO (VAE): Salvando imagem em {self.output_dir}...")
|
| 129 |
|
| 130 |
frame_idx_to_viz = min(1, tensor_5d.shape[2] - 1)
|
| 131 |
if frame_idx_to_viz < 0:
|
| 132 |
print(" VISUALIZAÇÃO (VAE): Tensor não tem frames para visualizar.")
|
| 133 |
else:
|
| 134 |
+
#print(f" VISUALIZAÇÃO (VAE): Usando frame de índice {frame_idx_to_viz}.")
|
| 135 |
latent_slice = tensor_5d[:, :, frame_idx_to_viz:frame_idx_to_viz+1, :, :]
|
| 136 |
|
| 137 |
with torch.no_grad(), torch.autocast(device_type=self.device.type):
|
|
|
|
| 141 |
print(" VISUALIZAÇÃO (VAE): Imagem salva.")
|
| 142 |
|
| 143 |
except Exception as e:
|
| 144 |
+
#print(f" ERRO na inspeção: {e}")
|
| 145 |
traceback.print_exc()
|
| 146 |
+
|
|
|
|
|
|
|
| 147 |
def _to_5d(self, tensor: torch.Tensor, shape_5d: tuple) -> torch.Tensor:
|
| 148 |
"""Converte um tensor 3D patchificado de volta para 5D."""
|
| 149 |
if tensor.ndim == 5:
|
|
|
|
| 153 |
b, c, f, h, w = shape_5d
|
| 154 |
return rearrange(tensor, "b (f h w) c -> b c f h w", c=c, f=f, h=h, w=w)
|
| 155 |
except Exception as e:
|
| 156 |
+
#print(f" AVISO: Erro ao rearranjar tensor 3D para 5D: {e}. A visualização pode falhar.")
|
| 157 |
return None
|
| 158 |
return None
|
| 159 |
|
|
|
|
| 163 |
std = tensor.std().item()
|
| 164 |
min_val = tensor.min().item()
|
| 165 |
max_val = tensor.max().item()
|
| 166 |
+
print(f" {prefix}: {tensor.shape}")
|
| 167 |
|
| 168 |
|
| 169 |
|
api/gpu_manager.py
CHANGED
|
@@ -1,56 +1,125 @@
|
|
| 1 |
-
# api/gpu_manager.py
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import torch
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class GPUManager:
|
| 7 |
"""
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
"""
|
| 10 |
def __init__(self):
|
|
|
|
| 11 |
self.total_gpus = torch.cuda.device_count()
|
| 12 |
-
self.
|
|
|
|
| 13 |
self.seedvr_gpus = []
|
|
|
|
| 14 |
self._allocate_gpus()
|
| 15 |
|
| 16 |
def _allocate_gpus(self):
|
| 17 |
"""
|
| 18 |
-
|
| 19 |
"""
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
| 23 |
|
| 24 |
if self.total_gpus == 0:
|
| 25 |
-
|
| 26 |
elif self.total_gpus == 1:
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
self.
|
| 30 |
self.seedvr_gpus = [0]
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
self.
|
| 35 |
-
self.
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
def
|
| 41 |
-
"""
|
| 42 |
-
if not self.
|
| 43 |
return torch.device("cpu")
|
| 44 |
-
|
| 45 |
-
return torch.device(f"cuda:{self.ltx_gpus[0]}")
|
| 46 |
|
| 47 |
-
def get_seedvr_devices(self) ->
|
| 48 |
-
"""
|
| 49 |
return self.seedvr_gpus
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
def requires_memory_swap(self) -> bool:
|
| 52 |
-
"""
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
-
#
|
|
|
|
| 56 |
gpu_manager = GPUManager()
|
|
|
|
| 1 |
+
# FILE: api/gpu_manager.py
|
| 2 |
+
# DESCRIPTION: A hardware-aware, service-agnostic GPU allocator for the ADUC-SDR suite.
|
| 3 |
+
# This module inspects available GPUs and partitions them according to a predefined
|
| 4 |
+
# strategy for LTX, SeedVR, and VINCIE services without importing them, thus
|
| 5 |
+
# preventing circular dependencies.
|
| 6 |
|
| 7 |
import os
|
| 8 |
import torch
|
| 9 |
+
import math
|
| 10 |
+
import logging
|
| 11 |
+
from typing import List
|
| 12 |
|
| 13 |
class GPUManager:
|
| 14 |
"""
|
| 15 |
+
Manages and allocates available GPUs among different services.
|
| 16 |
+
It operates agnostically, providing device information without knowing
|
| 17 |
+
the specifics of the services that will use them.
|
| 18 |
"""
|
| 19 |
def __init__(self):
|
| 20 |
+
"""Initializes the manager, detects GPUs, and runs the allocation logic."""
|
| 21 |
self.total_gpus = torch.cuda.device_count()
|
| 22 |
+
self.ltx_main_gpus = []
|
| 23 |
+
self.ltx_vae_gpu = []
|
| 24 |
self.seedvr_gpus = []
|
| 25 |
+
self.vincie_gpus = []
|
| 26 |
self._allocate_gpus()
|
| 27 |
|
| 28 |
def _allocate_gpus(self):
|
| 29 |
"""
|
| 30 |
+
Implements the GPU allocation strategy based on the total number of detected GPUs.
|
| 31 |
"""
|
| 32 |
+
logging.info("="*60)
|
| 33 |
+
logging.info("🤖 Initializing GPU Manager (LTX, SeedVR, VINCIE)")
|
| 34 |
+
logging.info(f" > Total GPUs detected: {self.total_gpus}")
|
| 35 |
+
|
| 36 |
+
all_indices = list(range(self.total_gpus))
|
| 37 |
|
| 38 |
if self.total_gpus == 0:
|
| 39 |
+
logging.warning(" > No GPUs detected. All services will operate in CPU mode.")
|
| 40 |
elif self.total_gpus == 1:
|
| 41 |
+
logging.warning(" > 1 GPU detected. All services will share GPU 0. Memory swapping will be active.")
|
| 42 |
+
self.ltx_main_gpus = [0]
|
| 43 |
+
self.ltx_vae_gpu = [0] # Shares with the main LTX pipeline
|
| 44 |
self.seedvr_gpus = [0]
|
| 45 |
+
self.vincie_gpus = [0]
|
| 46 |
+
elif self.total_gpus == 2:
|
| 47 |
+
logging.info(" > 2 GPUs detected. LTX will use a dedicated VAE device.")
|
| 48 |
+
self.ltx_main_gpus = [0]
|
| 49 |
+
self.ltx_vae_gpu = [1] # VAE gets the second GPU
|
| 50 |
+
self.seedvr_gpus = [0] # Shares with main LTX
|
| 51 |
+
self.vincie_gpus = [0] # Shares with main LTX
|
| 52 |
+
else: # 3 or more GPUs
|
| 53 |
+
logging.info(f" > {self.total_gpus} GPUs detected. Distributing allocation.")
|
| 54 |
+
# LTX always gets the first two GPUs if available for optimal performance
|
| 55 |
+
self.ltx_main_gpus = [0]
|
| 56 |
+
self.ltx_vae_gpu = [1]
|
| 57 |
+
|
| 58 |
+
remaining_gpus = all_indices[2:]
|
| 59 |
+
|
| 60 |
+
# The rest are divided between SeedVR and VINCIE
|
| 61 |
+
# VINCIE gets priority as it can scale well with more GPUs
|
| 62 |
+
vincie_count = max(1, math.ceil(len(remaining_gpus) / 2))
|
| 63 |
+
seedvr_count = len(remaining_gpus) - vincie_count
|
| 64 |
+
|
| 65 |
+
self.vincie_gpus = remaining_gpus[:vincie_count]
|
| 66 |
+
# If there are GPUs left, assign them to SeedVR
|
| 67 |
+
if seedvr_count > 0:
|
| 68 |
+
self.seedvr_gpus = remaining_gpus[vincie_count:]
|
| 69 |
+
else:
|
| 70 |
+
# If no GPUs are left for SeedVR, it shares with the main LTX GPU
|
| 71 |
+
self.seedvr_gpus = [0]
|
| 72 |
|
| 73 |
+
logging.info(f" > Final Allocation:")
|
| 74 |
+
logging.info(f" - LTX (Transformer): GPUs {self.ltx_main_gpus}")
|
| 75 |
+
logging.info(f" - LTX (VAE): GPU {self.ltx_vae_gpu[0] if self.ltx_vae_gpu else 'N/A'}")
|
| 76 |
+
logging.info(f" - SeedVR: GPUs {self.seedvr_gpus}")
|
| 77 |
+
logging.info(f" - VINCIE: GPUs {self.vincie_gpus}")
|
| 78 |
+
logging.info("="*60)
|
| 79 |
+
|
| 80 |
+
def get_ltx_device(self) -> torch.device:
|
| 81 |
+
"""Returns the primary device for the LTX Transformer pipeline."""
|
| 82 |
+
if not self.ltx_main_gpus:
|
| 83 |
+
return torch.device("cpu")
|
| 84 |
+
return torch.device(f"cuda:{self.ltx_main_gpus[0]}")
|
| 85 |
|
| 86 |
+
def get_ltx_vae_device(self) -> torch.device:
|
| 87 |
+
"""Returns the dedicated device for the LTX VAE."""
|
| 88 |
+
if not self.ltx_vae_gpu:
|
| 89 |
return torch.device("cpu")
|
| 90 |
+
return torch.device(f"cuda:{self.ltx_vae_gpu[0]}")
|
|
|
|
| 91 |
|
| 92 |
+
def get_seedvr_devices(self) -> List[int]:
|
| 93 |
+
"""Returns the list of GPU indices for the SeedVR service."""
|
| 94 |
return self.seedvr_gpus
|
| 95 |
+
|
| 96 |
+
def get_vincie_devices(self) -> List[int]:
|
| 97 |
+
"""Returns the list of GPU indices for the VINCIE service."""
|
| 98 |
+
return self.vincie_gpus
|
| 99 |
|
| 100 |
def requires_memory_swap(self) -> bool:
|
| 101 |
+
"""
|
| 102 |
+
Determines if memory swapping is necessary because multiple services
|
| 103 |
+
are sharing the same primary GPU.
|
| 104 |
+
The dedicated VAE GPU is not considered for swapping logic.
|
| 105 |
+
"""
|
| 106 |
+
# Collect all GPUs used by the main, memory-intensive parts of the services
|
| 107 |
+
all_main_allocations = self.ltx_main_gpus + self.seedvr_gpus + self.vincie_gpus
|
| 108 |
+
|
| 109 |
+
# Count how many services are allocated to each unique GPU
|
| 110 |
+
gpu_usage_count = {}
|
| 111 |
+
for gpu_idx in all_main_allocations:
|
| 112 |
+
gpu_usage_count[gpu_idx] = gpu_usage_count.get(gpu_idx, 0) + 1
|
| 113 |
+
|
| 114 |
+
# Swapping is required if any GPU is used by more than one service
|
| 115 |
+
for gpu_idx in gpu_usage_count:
|
| 116 |
+
if gpu_usage_count[gpu_idx] > 1:
|
| 117 |
+
logging.warning(f"Memory swapping is ACTIVE because GPU {gpu_idx} is shared by multiple services.")
|
| 118 |
+
return True
|
| 119 |
+
|
| 120 |
+
logging.info("Memory swapping is INACTIVE. Each service has dedicated primary GPUs.")
|
| 121 |
+
return False
|
| 122 |
|
| 123 |
+
# --- Singleton Instantiation ---
|
| 124 |
+
# This global instance is created once and imported by all other modules.
|
| 125 |
gpu_manager = GPUManager()
|
api/ltx/ltx_utils.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/ltx/ltx_utils.py
|
| 2 |
+
# DESCRIPTION: Comprehensive, self-contained utility module for the LTX pipeline.
|
| 3 |
+
# Handles dependency path injection, model loading, data structures, and helper functions.
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import time
|
| 10 |
+
import sys
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Optional, Tuple, Union
|
| 13 |
+
from dataclasses import dataclass
|
| 14 |
+
from enum import Enum, auto
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
import torchvision.transforms.functional as TVF
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from safetensors import safe_open
|
| 21 |
+
from transformers import T5EncoderModel, T5Tokenizer
|
| 22 |
+
|
| 23 |
+
# ==============================================================================
|
| 24 |
+
# --- CRITICAL: DEPENDENCY PATH INJECTION ---
|
| 25 |
+
# ==============================================================================
|
| 26 |
+
|
| 27 |
+
# Define o caminho para o repositório clonado
|
| 28 |
+
LTX_VIDEO_REPO_DIR = Path("/data/LTX-Video")
|
| 29 |
+
|
| 30 |
+
def add_deps_to_path():
|
| 31 |
+
"""
|
| 32 |
+
Adiciona o diretório do repositório LTX ao sys.path para garantir que suas
|
| 33 |
+
bibliotecas possam ser importadas.
|
| 34 |
+
"""
|
| 35 |
+
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 36 |
+
if repo_path not in sys.path:
|
| 37 |
+
sys.path.insert(0, repo_path)
|
| 38 |
+
logging.info(f"[ltx_utils] LTX-Video repository added to sys.path: {repo_path}")
|
| 39 |
+
|
| 40 |
+
# Executa a função imediatamente para configurar o ambiente antes de qualquer importação.
|
| 41 |
+
add_deps_to_path()
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# ==============================================================================
|
| 45 |
+
# --- IMPORTAÇÕES DA BIBLIOTECA LTX-VIDEO (Após configuração do path) ---
|
| 46 |
+
# ==============================================================================
|
| 47 |
+
try:
|
| 48 |
+
from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline
|
| 49 |
+
from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
|
| 50 |
+
from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
|
| 51 |
+
from ltx_video.models.transformers.transformer3d import Transformer3DModel
|
| 52 |
+
from ltx_video.models.transformers.symmetric_patchifier import SymmetricPatchifier
|
| 53 |
+
from ltx_video.schedulers.rf import RectifiedFlowScheduler
|
| 54 |
+
from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
|
| 55 |
+
import ltx_video.pipelines.crf_compressor as crf_compressor
|
| 56 |
+
except ImportError as e:
|
| 57 |
+
raise ImportError(f"Could not import from LTX-Video library even after setting sys.path. Check repo integrity at '{LTX_VIDEO_REPO_DIR}'. Error: {e}")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# ==============================================================================
|
| 61 |
+
# --- ESTRUTURAS DE DADOS E ENUMS (Centralizadas aqui) ---
|
| 62 |
+
# ==============================================================================
|
| 63 |
+
|
| 64 |
+
@dataclass
|
| 65 |
+
class ConditioningItem:
|
| 66 |
+
"""Define a single frame-conditioning item, used to guide the generation pipeline."""
|
| 67 |
+
media_item: torch.Tensor
|
| 68 |
+
media_frame_number: int
|
| 69 |
+
conditioning_strength: float
|
| 70 |
+
media_x: Optional[int] = None
|
| 71 |
+
media_y: Optional[int] = None
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class SkipLayerStrategy(Enum):
|
| 75 |
+
"""Defines the strategy for how spatio-temporal guidance is applied across transformer blocks."""
|
| 76 |
+
AttentionSkip = auto()
|
| 77 |
+
AttentionValues = auto()
|
| 78 |
+
Residual = auto()
|
| 79 |
+
TransformerBlock = auto()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# ==============================================================================
|
| 83 |
+
# --- FUNÇÕES DE CONSTRUÇÃO DE MODELO E PIPELINE ---
|
| 84 |
+
# ==============================================================================
|
| 85 |
+
|
| 86 |
+
def create_latent_upsampler(latent_upsampler_model_path: str, device: str) -> LatentUpsampler:
|
| 87 |
+
"""Loads the Latent Upsampler model from a checkpoint path."""
|
| 88 |
+
logging.info(f"Loading Latent Upsampler from: {latent_upsampler_model_path} to device: {device}")
|
| 89 |
+
latent_upsampler = LatentUpsampler.from_pretrained(latent_upsampler_model_path)
|
| 90 |
+
latent_upsampler.to(device)
|
| 91 |
+
latent_upsampler.eval()
|
| 92 |
+
return latent_upsampler
|
| 93 |
+
|
| 94 |
+
def build_ltx_pipeline_on_cpu(config: Dict) -> Tuple[LTXVideoPipeline, Optional[torch.nn.Module]]:
|
| 95 |
+
"""Builds the complete LTX pipeline and upsampler on the CPU."""
|
| 96 |
+
t0 = time.perf_counter()
|
| 97 |
+
logging.info("Building LTX pipeline on CPU...")
|
| 98 |
+
|
| 99 |
+
ckpt_path = Path(config["checkpoint_path"])
|
| 100 |
+
if not ckpt_path.is_file():
|
| 101 |
+
raise FileNotFoundError(f"Main checkpoint file not found: {ckpt_path}")
|
| 102 |
+
|
| 103 |
+
with safe_open(ckpt_path, framework="pt") as f:
|
| 104 |
+
metadata = f.metadata() or {}
|
| 105 |
+
config_str = metadata.get("config", "{}")
|
| 106 |
+
configs = json.loads(config_str)
|
| 107 |
+
allowed_inference_steps = configs.get("allowed_inference_steps")
|
| 108 |
+
|
| 109 |
+
vae = CausalVideoAutoencoder.from_pretrained(ckpt_path).to("cpu")
|
| 110 |
+
transformer = Transformer3DModel.from_pretrained(ckpt_path).to("cpu")
|
| 111 |
+
scheduler = RectifiedFlowScheduler.from_pretrained(ckpt_path)
|
| 112 |
+
|
| 113 |
+
text_encoder_path = config["text_encoder_model_name_or_path"]
|
| 114 |
+
text_encoder = T5EncoderModel.from_pretrained(text_encoder_path, subfolder="text_encoder").to("cpu")
|
| 115 |
+
tokenizer = T5Tokenizer.from_pretrained(text_encoder_path, subfolder="tokenizer")
|
| 116 |
+
patchifier = SymmetricPatchifier(patch_size=1)
|
| 117 |
+
|
| 118 |
+
precision = config.get("precision", "bfloat16")
|
| 119 |
+
if precision == "bfloat16":
|
| 120 |
+
vae.to(torch.bfloat16)
|
| 121 |
+
transformer.to(torch.bfloat16)
|
| 122 |
+
text_encoder.to(torch.bfloat16)
|
| 123 |
+
|
| 124 |
+
pipeline = LTXVideoPipeline(
|
| 125 |
+
transformer=transformer, patchifier=patchifier, text_encoder=text_encoder,
|
| 126 |
+
tokenizer=tokenizer, scheduler=scheduler, vae=vae,
|
| 127 |
+
allowed_inference_steps=allowed_inference_steps,
|
| 128 |
+
prompt_enhancer_image_caption_model=None, prompt_enhancer_image_caption_processor=None,
|
| 129 |
+
prompt_enhancer_llm_model=None, prompt_enhancer_llm_tokenizer=None,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
latent_upsampler = None
|
| 133 |
+
if config.get("spatial_upscaler_model_path"):
|
| 134 |
+
spatial_path = config["spatial_upscaler_model_path"]
|
| 135 |
+
latent_upsampler = create_latent_upsampler(spatial_path, device="cpu")
|
| 136 |
+
if precision == "bfloat16":
|
| 137 |
+
latent_upsampler.to(torch.bfloat16)
|
| 138 |
+
|
| 139 |
+
logging.info(f"LTX pipeline built on CPU in {time.perf_counter() - t0:.2f}s")
|
| 140 |
+
return pipeline, latent_upsampler
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
# ==============================================================================
|
| 144 |
+
# --- FUNÇÕES AUXILIARES (Latent Processing, Seed, Image Prep) ---
|
| 145 |
+
# ==============================================================================
|
| 146 |
+
|
| 147 |
+
def adain_filter_latent(
|
| 148 |
+
latents: torch.Tensor, reference_latents: torch.Tensor, factor=1.0
|
| 149 |
+
) -> torch.Tensor:
|
| 150 |
+
"""Applies AdaIN to transfer the style from a reference latent to another."""
|
| 151 |
+
result = latents.clone()
|
| 152 |
+
for i in range(latents.size(0)):
|
| 153 |
+
for c in range(latents.size(1)):
|
| 154 |
+
r_sd, r_mean = torch.std_mean(reference_latents[i, c], dim=None)
|
| 155 |
+
i_sd, i_mean = torch.std_mean(result[i, c], dim=None)
|
| 156 |
+
if i_sd > 1e-6:
|
| 157 |
+
result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean
|
| 158 |
+
return torch.lerp(latents, result, factor)
|
| 159 |
+
|
| 160 |
+
def seed_everything(seed: int):
|
| 161 |
+
"""Sets the seed for reproducibility."""
|
| 162 |
+
random.seed(seed)
|
| 163 |
+
os.environ['PYTHONHASHSEED'] = str(seed)
|
| 164 |
+
np.random.seed(seed)
|
| 165 |
+
torch.manual_seed(seed)
|
| 166 |
+
torch.cuda.manual_seed_all(seed)
|
| 167 |
+
torch.backends.cudnn.deterministic = True
|
| 168 |
+
torch.backends.cudnn.benchmark = False
|
| 169 |
+
|
| 170 |
+
def load_image_to_tensor_with_resize_and_crop(
|
| 171 |
+
image_input: Union[str, Image.Image],
|
| 172 |
+
target_height: int,
|
| 173 |
+
target_width: int,
|
| 174 |
+
) -> torch.Tensor:
|
| 175 |
+
"""Loads and processes an image into a 5D tensor compatible with the LTX pipeline."""
|
| 176 |
+
if isinstance(image_input, str):
|
| 177 |
+
image = Image.open(image_input).convert("RGB")
|
| 178 |
+
elif isinstance(image_input, Image.Image):
|
| 179 |
+
image = image_input
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError("image_input must be a file path or a PIL Image object")
|
| 182 |
+
|
| 183 |
+
input_width, input_height = image.size
|
| 184 |
+
aspect_ratio_target = target_width / target_height
|
| 185 |
+
aspect_ratio_frame = input_width / input_height
|
| 186 |
+
|
| 187 |
+
if aspect_ratio_frame > aspect_ratio_target:
|
| 188 |
+
new_width, new_height = int(input_height * aspect_ratio_target), input_height
|
| 189 |
+
x_start, y_start = (input_width - new_width) // 2, 0
|
| 190 |
+
else:
|
| 191 |
+
new_width, new_height = input_width, int(input_width / aspect_ratio_target)
|
| 192 |
+
x_start, y_start = 0, (input_height - new_height) // 2
|
| 193 |
+
|
| 194 |
+
image = image.crop((x_start, y_start, x_start + new_width, y_start + new_height))
|
| 195 |
+
image = image.resize((target_width, target_height), Image.Resampling.LANCZOS)
|
| 196 |
+
|
| 197 |
+
frame_tensor = TVF.to_tensor(image)
|
| 198 |
+
frame_tensor = TVF.gaussian_blur(frame_tensor, kernel_size=(3, 3))
|
| 199 |
+
|
| 200 |
+
frame_tensor_hwc = frame_tensor.permute(1, 2, 0)
|
| 201 |
+
frame_tensor_hwc = crf_compressor.compress(frame_tensor_hwc)
|
| 202 |
+
frame_tensor = frame_tensor_hwc.permute(2, 0, 1)
|
| 203 |
+
# Normalize to [-1, 1] range
|
| 204 |
+
frame_tensor = (frame_tensor * 2.0) - 1.0
|
| 205 |
+
|
| 206 |
+
# Create 5D tensor: (batch_size=1, channels=3, num_frames=1, height, width)
|
| 207 |
+
return frame_tensor.unsqueeze(0).unsqueeze(2)
|
api/ltx_pool_manager
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/ltx_pool_manager.py
|
| 2 |
+
# DESCRIPTION: A singleton pool manager for the LTX-Video pipeline.
|
| 3 |
+
# This module is the "secret weapon": it handles loading, device placement,
|
| 4 |
+
# and applies a runtime monkey patch to the LTX pipeline for full control
|
| 5 |
+
# and compatibility with the ADUC-SDR architecture, especially for latent conditioning.
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import time
|
| 9 |
+
import os
|
| 10 |
+
import yaml
|
| 11 |
+
import json
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import List, Optional, Tuple, Union
|
| 14 |
+
from dataclasses import dataclass
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 18 |
+
from huggingface_hub import hf_hub_download
|
| 19 |
+
|
| 20 |
+
# --- Importações da nossa arquitetura ---
|
| 21 |
+
from api.gpu_manager import gpu_manager
|
| 22 |
+
|
| 23 |
+
# --- Importações da biblioteca LTX-Video e Utilitários ---
|
| 24 |
+
from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu
|
| 25 |
+
from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline
|
| 26 |
+
from ltx_video.models.autoencoders.vae_encode import vae_encode, latent_to_pixel_coords
|
| 27 |
+
|
| 28 |
+
# ==============================================================================
|
| 29 |
+
# --- DEFINIÇÃO DOS NOSSOS DATACLASSES DE CONDICIONAMENTO ---
|
| 30 |
+
# ==============================================================================
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class ConditioningItem:
|
| 34 |
+
"""Nosso Data Class para condicionamento com TENSORES DE PIXEL (de imagens)."""
|
| 35 |
+
pixel_tensor: torch.Tensor
|
| 36 |
+
media_frame_number: int
|
| 37 |
+
conditioning_strength: float
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class LatentConditioningItem:
|
| 41 |
+
"""Nossa "arma secreta": um Data Class para condicionamento com TENSORES LATENTES (de overlap)."""
|
| 42 |
+
latent_tensor: torch.Tensor
|
| 43 |
+
media_frame_number: int
|
| 44 |
+
conditioning_strength: float
|
| 45 |
+
|
| 46 |
+
# ==============================================================================
|
| 47 |
+
# --- O MONKEY PATCH ---
|
| 48 |
+
# Nossa versão customizada de `prepare_conditioning` que entende ambos os Data Classes.
|
| 49 |
+
# ==============================================================================
|
| 50 |
+
|
| 51 |
+
def _aduc_prepare_conditioning_patch(
|
| 52 |
+
self: "LTXVideoPipeline",
|
| 53 |
+
conditioning_items: Optional[List[Union[ConditioningItem, LatentConditioningItem]]],
|
| 54 |
+
init_latents: torch.Tensor,
|
| 55 |
+
num_frames: int, height: int, width: int, # Assinatura mantida para compatibilidade
|
| 56 |
+
vae_per_channel_normalize: bool = False,
|
| 57 |
+
generator=None,
|
| 58 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
|
| 59 |
+
|
| 60 |
+
# Se não houver itens, apenas "patchify" os latentes iniciais e retorna.
|
| 61 |
+
if not conditioning_items:
|
| 62 |
+
latents, latent_coords = self.patchifier.patchify(latents=init_latents)
|
| 63 |
+
pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
|
| 64 |
+
return latents, pixel_coords, None, 0
|
| 65 |
+
|
| 66 |
+
# Prepara máscaras e listas para acumular os tensores de condição.
|
| 67 |
+
init_conditioning_mask = torch.zeros_like(init_latents[:, 0, ...], dtype=torch.float32, device=init_latents.device)
|
| 68 |
+
extra_conditioning_latents, extra_conditioning_pixel_coords, extra_conditioning_mask = [], [], []
|
| 69 |
+
extra_conditioning_num_latents = 0
|
| 70 |
+
|
| 71 |
+
for item in conditioning_items:
|
| 72 |
+
strength = item.conditioning_strength
|
| 73 |
+
media_frame_number = item.media_frame_number
|
| 74 |
+
|
| 75 |
+
# --- LÓGICA PRINCIPAL DO PATCH ---
|
| 76 |
+
if isinstance(item, ConditioningItem):
|
| 77 |
+
# Item é um tensor de PIXEL (ex: imagem inicial).
|
| 78 |
+
logging.debug("Patch ADUC: Processando ConditioningItem (pixels).")
|
| 79 |
+
# Encodifica o tensor de pixel para o espaço latente usando o VAE.
|
| 80 |
+
# Garante que a operação ocorra no dispositivo do VAE para evitar erros.
|
| 81 |
+
pixel_tensor_on_vae_device = item.pixel_tensor.to(device=self.vae.device, dtype=self.vae.dtype)
|
| 82 |
+
media_item_latents = vae_encode(pixel_tensor_on_vae_device, self.vae, vae_per_channel_normalize=vae_per_channel_normalize)
|
| 83 |
+
# Traz o resultado de volta para o dispositivo principal (do Transformer).
|
| 84 |
+
media_item_latents = media_item_latents.to(device=init_latents.device, dtype=init_latents.dtype)
|
| 85 |
+
|
| 86 |
+
elif isinstance(item, LatentConditioningItem):
|
| 87 |
+
# Item já é um tensor LATENTE (ex: overlap de chunks).
|
| 88 |
+
logging.debug("Patch ADUC: Processando LatentConditioningItem (latentes).")
|
| 89 |
+
# Apenas garante que o tensor está no dispositivo e tipo corretos.
|
| 90 |
+
media_item_latents = item.latent_tensor.to(device=init_latents.device, dtype=init_latents.dtype)
|
| 91 |
+
else:
|
| 92 |
+
logging.warning(f"Patch ADUC: Item de condicionamento de tipo desconhecido '{type(item)}' será ignorado.")
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
# Lógica original da pipeline, agora operando sobre `media_item_latents` garantido.
|
| 96 |
+
if media_frame_number == 0:
|
| 97 |
+
f_l, h_l, w_l = media_item_latents.shape[-3:]
|
| 98 |
+
init_latents[..., :f_l, :h_l, :w_l] = torch.lerp(init_latents[..., :f_l, :h_l, :w_l], media_item_latents, strength)
|
| 99 |
+
init_conditioning_mask[..., :f_l, :h_l, :w_l] = strength
|
| 100 |
+
else: # Condicionamento em frames intermediários
|
| 101 |
+
noise = randn_tensor(media_item_latents.shape, generator=generator, device=media_item_latents.device, dtype=media_item_latents.dtype)
|
| 102 |
+
media_item_latents = torch.lerp(noise, media_item_latents, strength)
|
| 103 |
+
patched_latents, latent_coords = self.patchifier.patchify(latents=media_item_latents)
|
| 104 |
+
pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
|
| 105 |
+
pixel_coords[:, 0] += media_frame_number
|
| 106 |
+
extra_conditioning_num_latents += patched_latents.shape[1]
|
| 107 |
+
new_mask = torch.full(patched_latents.shape[:2], strength, dtype=torch.float32, device=init_latents.device)
|
| 108 |
+
extra_conditioning_latents.append(patched_latents)
|
| 109 |
+
extra_conditioning_pixel_coords.append(pixel_coords)
|
| 110 |
+
extra_conditioning_mask.append(new_mask)
|
| 111 |
+
|
| 112 |
+
# Finaliza o processo de patchifying e concatenação dos tensores.
|
| 113 |
+
init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents)
|
| 114 |
+
init_pixel_coords = latent_to_pixel_coords(init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning)
|
| 115 |
+
init_conditioning_mask, _ = self.patchifier.patchify(latents=init_conditioning_mask.unsqueeze(1))
|
| 116 |
+
init_conditioning_mask = init_conditioning_mask.squeeze(-1)
|
| 117 |
+
|
| 118 |
+
if extra_conditioning_latents:
|
| 119 |
+
init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1)
|
| 120 |
+
init_pixel_coords = torch.cat([*extra_conditioning_pixel_coords, init_pixel_coords], dim=2)
|
| 121 |
+
init_conditioning_mask = torch.cat([*extra_conditioning_mask, init_conditioning_mask], dim=1)
|
| 122 |
+
|
| 123 |
+
return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents
|
| 124 |
+
|
| 125 |
+
# ==============================================================================
|
| 126 |
+
# --- LTX WORKER E POOL MANAGER ---
|
| 127 |
+
# ==============================================================================
|
| 128 |
+
|
| 129 |
+
class LTXWorker:
|
| 130 |
+
"""Gerencia uma instância do LTX Pipeline em um par de GPUs (main + vae)."""
|
| 131 |
+
def __init__(self, main_device_str: str, vae_device_str: str, config: dict):
|
| 132 |
+
self.main_device = torch.device(main_device_str)
|
| 133 |
+
self.vae_device = torch.device(vae_device_str)
|
| 134 |
+
self.config = config
|
| 135 |
+
self.pipeline: LTXVideoPipeline = None
|
| 136 |
+
|
| 137 |
+
self._load_and_patch_pipeline()
|
| 138 |
+
|
| 139 |
+
def _load_and_patch_pipeline(self):
|
| 140 |
+
logging.info(f"[LTXWorker-{self.main_device}] Carregando pipeline LTX para a CPU...")
|
| 141 |
+
self.pipeline, _ = build_ltx_pipeline_on_cpu(self.config)
|
| 142 |
+
|
| 143 |
+
logging.info(f"[LTXWorker-{self.main_device}] Movendo pipeline para GPUs (Main: {self.main_device}, VAE: {self.vae_device})...")
|
| 144 |
+
self.pipeline.to(self.main_device)
|
| 145 |
+
self.pipeline.vae.to(self.vae_device)
|
| 146 |
+
|
| 147 |
+
logging.info(f"[LTXWorker-{self.main_device}] Aplicando patch ADUC-SDR na função 'prepare_conditioning'...")
|
| 148 |
+
# Substitui o método da instância pelo nosso patch
|
| 149 |
+
self.pipeline.prepare_conditioning = _aduc_prepare_conditioning_patch.__get__(self.pipeline, LTXVideoPipeline)
|
| 150 |
+
logging.info(f"[LTXWorker-{self.main_device}] ✅ Pipeline 'quente', corrigido e pronto para uso.")
|
| 151 |
+
|
| 152 |
+
class LTXPoolManager:
|
| 153 |
+
_instance = None
|
| 154 |
+
_lock = threading.Lock()
|
| 155 |
+
|
| 156 |
+
def __new__(cls, *args, **kwargs):
|
| 157 |
+
with cls._lock:
|
| 158 |
+
if cls._instance is None:
|
| 159 |
+
cls._instance = super().__new__(cls)
|
| 160 |
+
cls._instance._initialized = False
|
| 161 |
+
return cls._instance
|
| 162 |
+
|
| 163 |
+
def __init__(self):
|
| 164 |
+
if self._initialized: return
|
| 165 |
+
with self._lock:
|
| 166 |
+
if self._initialized: return
|
| 167 |
+
|
| 168 |
+
logging.info("⚙️ Inicializando LTXPoolManager Singleton...")
|
| 169 |
+
self.config = self._load_config()
|
| 170 |
+
self._resolve_model_paths_from_cache()
|
| 171 |
+
|
| 172 |
+
main_device_str = str(gpu_manager.get_ltx_device())
|
| 173 |
+
vae_device_str = str(gpu_manager.get_ltx_vae_device())
|
| 174 |
+
|
| 175 |
+
self.worker = LTXWorker(main_device_str, vae_device_str, self.config)
|
| 176 |
+
|
| 177 |
+
self._initialized = True
|
| 178 |
+
logging.info("✅ LTXPoolManager pronto.")
|
| 179 |
+
|
| 180 |
+
def _load_config(self) -> Dict:
|
| 181 |
+
"""Carrega a configuração YAML principal do LTX."""
|
| 182 |
+
config_path = Path("/data/LTX-Video/configs/ltxv-13b-0.9.8-distilled-fp8.yaml")
|
| 183 |
+
with open(config_path, "r") as file:
|
| 184 |
+
return yaml.safe_load(file)
|
| 185 |
+
|
| 186 |
+
def _resolve_model_paths_from_cache(self):
|
| 187 |
+
"""Garante que a configuração em memória tenha os caminhos absolutos para os modelos no cache."""
|
| 188 |
+
try:
|
| 189 |
+
main_ckpt_path = hf_hub_download(repo_id="Lightricks/LTX-Video", filename=self.config["checkpoint_path"])
|
| 190 |
+
self.config["checkpoint_path"] = main_ckpt_path
|
| 191 |
+
if self.config.get("spatial_upscaler_model_path"):
|
| 192 |
+
upscaler_path = hf_hub_download(repo_id="Lightricks/LTX-Video", filename=self.config["spatial_upscaler_model_path"])
|
| 193 |
+
self.config["spatial_upscaler_model_path"] = upscaler_path
|
| 194 |
+
except Exception as e:
|
| 195 |
+
logging.critical(f"Falha ao resolver caminhos de modelo LTX. O setup.py foi executado? Erro: {e}", exc_info=True)
|
| 196 |
+
raise
|
| 197 |
+
|
| 198 |
+
def get_pipeline(self) -> LTXVideoPipeline:
|
| 199 |
+
"""Retorna a instância do pipeline, já carregada e corrigida."""
|
| 200 |
+
return self.worker.pipeline
|
| 201 |
+
|
| 202 |
+
# --- Instância Singleton Global ---
|
| 203 |
+
# A aplicação importará esta instância para interagir com o LTX.
|
| 204 |
+
try:
|
| 205 |
+
ltx_pool_manager = LTXPoolManager()
|
| 206 |
+
except Exception as e:
|
| 207 |
+
logging.critical("Falha crítica ao inicializar o LTXPoolManager.", exc_info=True)
|
| 208 |
+
ltx_pool_manager = None
|
api/ltx_server_refactored.py
DELETED
|
@@ -1,367 +0,0 @@
|
|
| 1 |
-
# ltx_server_refactored.py — VideoService (Modular Version with Simple Overlap Chunking)
|
| 2 |
-
|
| 3 |
-
# --- 0. WARNINGS E AMBIENTE ---
|
| 4 |
-
import warnings
|
| 5 |
-
warnings.filterwarnings("ignore", category=UserWarning)
|
| 6 |
-
warnings.filterwarnings("ignore", category=FutureWarning)
|
| 7 |
-
warnings.filterwarnings("ignore", message=".*")
|
| 8 |
-
from huggingface_hub import logging
|
| 9 |
-
logging.set_verbosity_error()
|
| 10 |
-
logging.set_verbosity_warning()
|
| 11 |
-
logging.set_verbosity_info()
|
| 12 |
-
logging.set_verbosity_debug()
|
| 13 |
-
LTXV_DEBUG=1
|
| 14 |
-
LTXV_FRAME_LOG_EVERY=8
|
| 15 |
-
import os, subprocess, shlex, tempfile
|
| 16 |
-
import torch
|
| 17 |
-
import json
|
| 18 |
-
import numpy as np
|
| 19 |
-
import random
|
| 20 |
-
import os
|
| 21 |
-
import shlex
|
| 22 |
-
import yaml
|
| 23 |
-
from typing import List, Dict
|
| 24 |
-
from pathlib import Path
|
| 25 |
-
import imageio
|
| 26 |
-
from PIL import Image
|
| 27 |
-
import tempfile
|
| 28 |
-
from huggingface_hub import hf_hub_download
|
| 29 |
-
import sys
|
| 30 |
-
import subprocess
|
| 31 |
-
import gc
|
| 32 |
-
import shutil
|
| 33 |
-
import contextlib
|
| 34 |
-
import time
|
| 35 |
-
import traceback
|
| 36 |
-
from einops import rearrange
|
| 37 |
-
import torch.nn.functional as F
|
| 38 |
-
from managers.vae_manager import vae_manager_singleton
|
| 39 |
-
from tools.video_encode_tool import video_encode_tool_singleton
|
| 40 |
-
DEPS_DIR = Path("/data")
|
| 41 |
-
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 42 |
-
|
| 43 |
-
# (Todas as funções de setup, helpers e inicialização da classe permanecem inalteradas)
|
| 44 |
-
# ... (run_setup, add_deps_to_path, _query_gpu_processes_via_nvml, etc.)
|
| 45 |
-
def run_setup():
|
| 46 |
-
setup_script_path = "setup.py"
|
| 47 |
-
if not os.path.exists(setup_script_path):
|
| 48 |
-
print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
|
| 49 |
-
return
|
| 50 |
-
try:
|
| 51 |
-
print("[DEBUG] Executando setup.py para dependências...")
|
| 52 |
-
subprocess.run([sys.executable, setup_script_path], check=True)
|
| 53 |
-
print("[DEBUG] Setup concluído com sucesso.")
|
| 54 |
-
except subprocess.CalledProcessError as e:
|
| 55 |
-
print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
|
| 56 |
-
sys.exit(1)
|
| 57 |
-
if not LTX_VIDEO_REPO_DIR.exists():
|
| 58 |
-
print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
|
| 59 |
-
run_setup()
|
| 60 |
-
def add_deps_to_path():
|
| 61 |
-
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 62 |
-
if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
|
| 63 |
-
sys.path.insert(0, repo_path)
|
| 64 |
-
print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
|
| 65 |
-
def calculate_padding(orig_h, orig_w, target_h, target_w):
|
| 66 |
-
pad_h = target_h - orig_h
|
| 67 |
-
pad_w = target_w - orig_w
|
| 68 |
-
pad_top = pad_h // 2
|
| 69 |
-
pad_bottom = pad_h - pad_top
|
| 70 |
-
pad_left = pad_w // 2
|
| 71 |
-
pad_right = pad_w - pad_left
|
| 72 |
-
return (pad_left, pad_right, pad_top, pad_bottom)
|
| 73 |
-
def log_tensor_info(tensor, name="Tensor"):
|
| 74 |
-
if not isinstance(tensor, torch.Tensor):
|
| 75 |
-
print(f"\n[INFO] '{name}' não é tensor.")
|
| 76 |
-
return
|
| 77 |
-
print(f"\n--- Tensor: {name} ---")
|
| 78 |
-
print(f" - Shape: {tuple(tensor.shape)}")
|
| 79 |
-
print(f" - Dtype: {tensor.dtype}")
|
| 80 |
-
print(f" - Device: {tensor.device}")
|
| 81 |
-
if tensor.numel() > 0:
|
| 82 |
-
try:
|
| 83 |
-
print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
|
| 84 |
-
except Exception:
|
| 85 |
-
pass
|
| 86 |
-
print("------------------------------------------\n")
|
| 87 |
-
|
| 88 |
-
add_deps_to_path()
|
| 89 |
-
from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline
|
| 90 |
-
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
|
| 91 |
-
from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
|
| 92 |
-
from ltx_video.pipelines.pipeline_ltx_video import adain_filter_latent
|
| 93 |
-
from api.ltx.inference import (
|
| 94 |
-
create_ltx_video_pipeline,
|
| 95 |
-
create_latent_upsampler,
|
| 96 |
-
load_image_to_tensor_with_resize_and_crop,
|
| 97 |
-
seed_everething,
|
| 98 |
-
)
|
| 99 |
-
|
| 100 |
-
class VideoService:
|
| 101 |
-
def __init__(self):
|
| 102 |
-
t0 = time.perf_counter()
|
| 103 |
-
print("[DEBUG] Inicializando VideoService...")
|
| 104 |
-
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 105 |
-
self.config = self._load_config()
|
| 106 |
-
self.pipeline, self.latent_upsampler = self._load_models()
|
| 107 |
-
self.pipeline.to(self.device)
|
| 108 |
-
if self.latent_upsampler:
|
| 109 |
-
self.latent_upsampler.to(self.device)
|
| 110 |
-
self._apply_precision_policy()
|
| 111 |
-
vae_manager_singleton.attach_pipeline(
|
| 112 |
-
self.pipeline,
|
| 113 |
-
device=self.device,
|
| 114 |
-
autocast_dtype=self.runtime_autocast_dtype
|
| 115 |
-
)
|
| 116 |
-
self._tmp_dirs = set()
|
| 117 |
-
print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
|
| 118 |
-
|
| 119 |
-
def _load_config(self):
|
| 120 |
-
base = LTX_VIDEO_REPO_DIR / "configs"
|
| 121 |
-
config_path = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
| 122 |
-
with open(config_path, "r") as file:
|
| 123 |
-
return yaml.safe_load(file)
|
| 124 |
-
|
| 125 |
-
def finalize(self, keep_paths=None, extra_paths=None, clear_gpu=True):
|
| 126 |
-
print("[DEBUG] Finalize: iniciando limpeza...")
|
| 127 |
-
keep = set(keep_paths or []); extras = set(extra_paths or [])
|
| 128 |
-
gc.collect()
|
| 129 |
-
try:
|
| 130 |
-
if clear_gpu and torch.cuda.is_available():
|
| 131 |
-
torch.cuda.empty_cache()
|
| 132 |
-
try:
|
| 133 |
-
torch.cuda.ipc_collect()
|
| 134 |
-
except Exception:
|
| 135 |
-
pass
|
| 136 |
-
except Exception as e:
|
| 137 |
-
print(f"[DEBUG] Finalize: limpeza GPU falhou: {e}")
|
| 138 |
-
try:
|
| 139 |
-
self._log_gpu_memory("Após finalize")
|
| 140 |
-
except Exception as e:
|
| 141 |
-
print(f"[DEBUG] Log GPU pós-finalize falhou: {e}")
|
| 142 |
-
|
| 143 |
-
def _load_models(self):
|
| 144 |
-
t0 = time.perf_counter()
|
| 145 |
-
LTX_REPO = "Lightricks/LTX-Video"
|
| 146 |
-
print("[DEBUG] Baixando checkpoint principal...")
|
| 147 |
-
distilled_model_path = hf_hub_download(
|
| 148 |
-
repo_id=LTX_REPO,
|
| 149 |
-
filename=self.config["checkpoint_path"],
|
| 150 |
-
local_dir=os.getenv("HF_HOME"),
|
| 151 |
-
cache_dir=os.getenv("HF_HOME_CACHE"),
|
| 152 |
-
token=os.getenv("HF_TOKEN"),
|
| 153 |
-
)
|
| 154 |
-
self.config["checkpoint_path"] = distilled_model_path
|
| 155 |
-
print(f"[DEBUG] Checkpoint em: {distilled_model_path}")
|
| 156 |
-
|
| 157 |
-
print("[DEBUG] Baixando upscaler espacial...")
|
| 158 |
-
spatial_upscaler_path = hf_hub_download(
|
| 159 |
-
repo_id=LTX_REPO,
|
| 160 |
-
filename=self.config["spatial_upscaler_model_path"],
|
| 161 |
-
local_dir=os.getenv("HF_HOME"),
|
| 162 |
-
cache_dir=os.getenv("HF_HOME_CACHE"),
|
| 163 |
-
token=os.getenv("HF_TOKEN")
|
| 164 |
-
)
|
| 165 |
-
self.config["spatial_upscaler_model_path"] = spatial_upscaler_path
|
| 166 |
-
print(f"[DEBUG] Upscaler em: {spatial_upscaler_path}")
|
| 167 |
-
|
| 168 |
-
print("[DEBUG] Construindo pipeline...")
|
| 169 |
-
pipeline = create_ltx_video_pipeline(
|
| 170 |
-
ckpt_path=self.config["checkpoint_path"],
|
| 171 |
-
precision=self.config["precision"],
|
| 172 |
-
text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"],
|
| 173 |
-
sampler=self.config["sampler"],
|
| 174 |
-
device="cpu",
|
| 175 |
-
enhance_prompt=False,
|
| 176 |
-
prompt_enhancer_image_caption_model_name_or_path=self.config["prompt_enhancer_image_caption_model_name_or_path"],
|
| 177 |
-
prompt_enhancer_llm_model_name_or_path=self.config["prompt_enhancer_llm_model_name_or_path"],
|
| 178 |
-
)
|
| 179 |
-
print("[DEBUG] Pipeline pronto.")
|
| 180 |
-
|
| 181 |
-
latent_upsampler = None
|
| 182 |
-
if self.config.get("spatial_upscaler_model_path"):
|
| 183 |
-
print("[DEBUG] Construindo latent_upsampler...")
|
| 184 |
-
latent_upsampler = create_latent_upsampler(self.config["spatial_upscaler_model_path"], device="cpu")
|
| 185 |
-
print("[DEBUG] Upsampler pronto.")
|
| 186 |
-
print(f"[DEBUG] _load_models() tempo total={time.perf_counter()-t0:.3f}s")
|
| 187 |
-
return pipeline, latent_upsampler
|
| 188 |
-
|
| 189 |
-
def _apply_precision_policy(self):
|
| 190 |
-
prec = str(self.config.get("precision", "")).lower()
|
| 191 |
-
self.runtime_autocast_dtype = torch.float32
|
| 192 |
-
if prec in ["float8_e4m3fn", "bfloat16"]:
|
| 193 |
-
self.runtime_autocast_dtype = torch.bfloat16
|
| 194 |
-
elif prec == "mixed_precision":
|
| 195 |
-
self.runtime_autocast_dtype = torch.float16
|
| 196 |
-
|
| 197 |
-
def _register_tmp_dir(self, d: str):
|
| 198 |
-
if d and os.path.isdir(d):
|
| 199 |
-
self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
|
| 200 |
-
|
| 201 |
-
@torch.no_grad()
|
| 202 |
-
def _upsample_latents_internal(self, latents: torch.Tensor) -> torch.Tensor:
|
| 203 |
-
try:
|
| 204 |
-
if not self.latent_upsampler:
|
| 205 |
-
raise ValueError("Latent Upsampler não está carregado.")
|
| 206 |
-
latents_unnormalized = un_normalize_latents(latents, self.pipeline.vae, vae_per_channel_normalize=True)
|
| 207 |
-
upsampled_latents = self.latent_upsampler(latents_unnormalized)
|
| 208 |
-
return normalize_latents(upsampled_latents, self.pipeline.vae, vae_per_channel_normalize=True)
|
| 209 |
-
except Exception as e:
|
| 210 |
-
pass
|
| 211 |
-
finally:
|
| 212 |
-
torch.cuda.empty_cache()
|
| 213 |
-
torch.cuda.ipc_collect()
|
| 214 |
-
self.finalize(keep_paths=[])
|
| 215 |
-
|
| 216 |
-
def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
|
| 217 |
-
tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
|
| 218 |
-
tensor = torch.nn.functional.pad(tensor, padding_values)
|
| 219 |
-
return tensor.to(self.device, dtype=self.runtime_autocast_dtype)
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
def _save_and_log_video(self, pixel_tensor, base_filename, fps, temp_dir, results_dir, used_seed, progress_callback=None):
|
| 223 |
-
output_path = os.path.join(temp_dir, f"{base_filename}_{used_seed}.mp4")
|
| 224 |
-
video_encode_tool_singleton.save_video_from_tensor(
|
| 225 |
-
pixel_tensor, output_path, fps=fps, progress_callback=progress_callback
|
| 226 |
-
)
|
| 227 |
-
final_path = os.path.join(results_dir, f"{base_filename}_{used_seed}.mp4")
|
| 228 |
-
shutil.move(output_path, final_path)
|
| 229 |
-
print(f"[DEBUG] Vídeo salvo em: {final_path}")
|
| 230 |
-
return final_path
|
| 231 |
-
|
| 232 |
-
# ==============================================================================
|
| 233 |
-
# --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
|
| 234 |
-
# ==============================================================================
|
| 235 |
-
|
| 236 |
-
def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int):
|
| 237 |
-
if not items_list: return []
|
| 238 |
-
height_padded = ((height - 1) // 8 + 1) * 8
|
| 239 |
-
width_padded = ((width - 1) // 8 + 1) * 8
|
| 240 |
-
padding_values = calculate_padding(height, width, height_padded, width_padded)
|
| 241 |
-
conditioning_items = []
|
| 242 |
-
for media, frame, weight in items_list:
|
| 243 |
-
tensor = self._prepare_conditioning_tensor(media, height, width, padding_values) if isinstance(media, str) else media.to(self.device, dtype=self.runtime_autocast_dtype)
|
| 244 |
-
safe_frame = max(0, min(int(frame), num_frames - 1))
|
| 245 |
-
conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
|
| 246 |
-
return conditioning_items
|
| 247 |
-
|
| 248 |
-
def generate_low(self, prompt, negative_prompt, height, width, duration, guidance_scale, seed, conditioning_items=None):
|
| 249 |
-
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
|
| 250 |
-
seed_everething(used_seed)
|
| 251 |
-
FPS = 24.0
|
| 252 |
-
actual_num_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
|
| 253 |
-
height_padded = ((height - 1) // 8 + 1) * 8
|
| 254 |
-
width_padded = ((width - 1) // 8 + 1) * 8
|
| 255 |
-
temp_dir = tempfile.mkdtemp(prefix="ltxv_low_"); self._register_tmp_dir(temp_dir)
|
| 256 |
-
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
|
| 257 |
-
downscale_factor = self.config.get("downscale_factor", 0.6666666)
|
| 258 |
-
vae_scale_factor = self.pipeline.vae_scale_factor
|
| 259 |
-
x_width = int(width_padded * downscale_factor)
|
| 260 |
-
downscaled_width = x_width - (x_width % vae_scale_factor)
|
| 261 |
-
x_height = int(height_padded * downscale_factor)
|
| 262 |
-
downscaled_height = x_height - (x_height % vae_scale_factor)
|
| 263 |
-
first_pass_kwargs = {
|
| 264 |
-
"prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
|
| 265 |
-
"num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
|
| 266 |
-
"output_type": "latent", "conditioning_items": conditioning_items, "guidance_scale": float(guidance_scale),
|
| 267 |
-
**(self.config.get("first_pass", {}))
|
| 268 |
-
}
|
| 269 |
-
try:
|
| 270 |
-
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
|
| 271 |
-
latents = self.pipeline(**first_pass_kwargs).images
|
| 272 |
-
pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 273 |
-
video_path = self._save_and_log_video(pixel_tensor, "low_res_video", FPS, temp_dir, results_dir, used_seed)
|
| 274 |
-
latents_cpu = latents.detach().to("cpu")
|
| 275 |
-
tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
|
| 276 |
-
torch.save(latents_cpu, tensor_path)
|
| 277 |
-
return video_path, tensor_path, used_seed
|
| 278 |
-
|
| 279 |
-
except Exception as e:
|
| 280 |
-
pass
|
| 281 |
-
finally:
|
| 282 |
-
torch.cuda.empty_cache()
|
| 283 |
-
torch.cuda.ipc_collect()
|
| 284 |
-
self.finalize(keep_paths=[])
|
| 285 |
-
|
| 286 |
-
def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
|
| 287 |
-
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
|
| 288 |
-
seed_everething(used_seed)
|
| 289 |
-
temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)
|
| 290 |
-
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
|
| 291 |
-
latents_low = torch.load(latents_path).to(self.device)
|
| 292 |
-
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
|
| 293 |
-
upsampled_latents = self._upsample_latents_internal(latents_low)
|
| 294 |
-
upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=latents_low)
|
| 295 |
-
del latents_low; torch.cuda.empty_cache()
|
| 296 |
-
|
| 297 |
-
# --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
|
| 298 |
-
total_frames = upsampled_latents.shape[2]
|
| 299 |
-
# Garante que mid_point seja pelo menos 1 para evitar um segundo chunk vazio se houver poucos frames
|
| 300 |
-
mid_point = max(1, total_frames // 2)
|
| 301 |
-
chunk1 = upsampled_latents[:, :, :mid_point, :, :]
|
| 302 |
-
# O segundo chunk começa um frame antes para criar o overlap
|
| 303 |
-
chunk2 = upsampled_latents[:, :, mid_point - 1:, :, :]
|
| 304 |
-
|
| 305 |
-
final_latents_list = []
|
| 306 |
-
for i, chunk in enumerate([chunk1, chunk2]):
|
| 307 |
-
if chunk.shape[2] <= 1: continue # Pula chunks inválidos ou vazios
|
| 308 |
-
second_pass_height = chunk.shape[3] * self.pipeline.vae_scale_factor
|
| 309 |
-
second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
|
| 310 |
-
second_pass_kwargs = {
|
| 311 |
-
"prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
|
| 312 |
-
"num_frames": chunk.shape[2], "latents": chunk, "guidance_scale": float(guidance_scale),
|
| 313 |
-
"output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
|
| 314 |
-
**(self.config.get("second_pass", {}))
|
| 315 |
-
}
|
| 316 |
-
refined_chunk = self.pipeline(**second_pass_kwargs).images
|
| 317 |
-
# Remove o overlap do primeiro chunk refinado antes de juntar
|
| 318 |
-
if i == 0:
|
| 319 |
-
final_latents_list.append(refined_chunk[:, :, :-1, :, :])
|
| 320 |
-
else:
|
| 321 |
-
final_latents_list.append(refined_chunk)
|
| 322 |
-
|
| 323 |
-
final_latents = torch.cat(final_latents_list, dim=2)
|
| 324 |
-
log_tensor_info(final_latents, "Latentes Upscaled/Refinados Finais")
|
| 325 |
-
|
| 326 |
-
latents_cpu = final_latents.detach().to("cpu")
|
| 327 |
-
tensor_path = os.path.join(results_dir, f"latents_refined_{used_seed}.pt")
|
| 328 |
-
torch.save(latents_cpu, tensor_path)
|
| 329 |
-
pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 330 |
-
video_path = self._save_and_log_video(pixel_tensor, "refined_video", 24.0, temp_dir, results_dir, used_seed)
|
| 331 |
-
return video_path, tensor_path
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
def encode_mp4(self, latents_path: str, fps: int = 24):
|
| 336 |
-
latents = torch.load(latents_path)
|
| 337 |
-
seed = random.randint(0, 99999)
|
| 338 |
-
temp_dir = tempfile.mkdtemp(prefix="ltxv_enc_"); self._register_tmp_dir(temp_dir)
|
| 339 |
-
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
|
| 340 |
-
|
| 341 |
-
# --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
|
| 342 |
-
total_frames = latents.shape[2]
|
| 343 |
-
mid_point = max(1, total_frames // 2)
|
| 344 |
-
chunk1_latents = latents[:, :, :mid_point, :, :]
|
| 345 |
-
chunk2_latents = latents[:, :, mid_point - 1:, :, :]
|
| 346 |
-
|
| 347 |
-
video_parts = []
|
| 348 |
-
pixel_chunks_to_concat = []
|
| 349 |
-
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
|
| 350 |
-
for i, chunk in enumerate([chunk1_latents, chunk2_latents]):
|
| 351 |
-
if chunk.shape[2] == 0: continue
|
| 352 |
-
pixel_chunk = vae_manager_singleton.decode(chunk.to(self.device), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
|
| 353 |
-
# Remove o overlap do primeiro chunk de pixels
|
| 354 |
-
if i == 0:
|
| 355 |
-
pixel_chunks_to_concat.append(pixel_chunk[:, :, :-1, :, :])
|
| 356 |
-
else:
|
| 357 |
-
pixel_chunks_to_concat.append(pixel_chunk)
|
| 358 |
-
|
| 359 |
-
final_pixel_tensor = torch.cat(pixel_chunks_to_concat, dim=2)
|
| 360 |
-
final_video_path = self._save_and_log_video(final_pixel_tensor, f"final_concatenated_{seed}", fps, temp_dir, results_dir, seed)
|
| 361 |
-
return final_video_path
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
# --- INSTANCIAÇÃO DO SERVIÇO ---
|
| 365 |
-
print("Criando instância do VideoService. O carregamento do modelo começará agora...")
|
| 366 |
-
video_generation_service = VideoService()
|
| 367 |
-
print("Instância do VideoService pronta para uso.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/ltx_server_refactored_complete.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/ltx_server_refactored_complete.py
|
| 2 |
+
# DESCRIPTION: Final orchestrator for LTX-Video generation.
|
| 3 |
+
# This version internalizes conditioning item preparation, accepting a raw
|
| 4 |
+
# list of media items directly in its main generation function for maximum simplicity and encapsulation.
|
| 5 |
+
|
| 6 |
+
import gc
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import os
|
| 10 |
+
import shutil
|
| 11 |
+
import sys
|
| 12 |
+
import tempfile
|
| 13 |
+
import time
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
from typing import Dict, List, Optional, Tuple, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import yaml
|
| 19 |
+
import numpy as np
|
| 20 |
+
from PIL import Image
|
| 21 |
+
from huggingface_hub import hf_hub_download
|
| 22 |
+
|
| 23 |
+
# ==============================================================================
|
| 24 |
+
# --- SETUP E IMPORTAÇÕES DO PROJETO ---
|
| 25 |
+
# ==============================================================================
|
| 26 |
+
|
| 27 |
+
# Configuração de logging e supressão de warnings
|
| 28 |
+
import warnings
|
| 29 |
+
warnings.filterwarnings("ignore")
|
| 30 |
+
logging.getLogger("huggingface_hub").setLevel(logging.ERROR)
|
| 31 |
+
log_level = os.environ.get("ADUC_LOG_LEVEL", "INFO").upper()
|
| 32 |
+
logging.basicConfig(level=log_level, format='[%(levelname)s] [%(name)s] %(message)s')
|
| 33 |
+
|
| 34 |
+
# --- Constantes de Configuração ---
|
| 35 |
+
DEPS_DIR = Path("/data")
|
| 36 |
+
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 37 |
+
RESULTS_DIR = Path("/app/output")
|
| 38 |
+
DEFAULT_FPS = 24.0
|
| 39 |
+
FRAMES_ALIGNMENT = 8
|
| 40 |
+
LTX_REPO_ID = "Lightricks/LTX-Video"
|
| 41 |
+
|
| 42 |
+
# Garante que a biblioteca LTX-Video seja importável
|
| 43 |
+
def add_deps_to_path():
|
| 44 |
+
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 45 |
+
if repo_path not in sys.path:
|
| 46 |
+
sys.path.insert(0, repo_path)
|
| 47 |
+
logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}")
|
| 48 |
+
|
| 49 |
+
add_deps_to_path()
|
| 50 |
+
|
| 51 |
+
# --- Módulos da nossa Arquitetura ---
|
| 52 |
+
try:
|
| 53 |
+
from api.gpu_manager import gpu_manager
|
| 54 |
+
from api.vae_server import vae_server_singleton
|
| 55 |
+
from tools.video_encode_tool import video_encode_tool_singleton
|
| 56 |
+
from api.ltx.ltx_utils import build_ltx_pipeline_on_cpu, seed_everything
|
| 57 |
+
from api.ltx_pool_manager import LatentConditioningItem
|
| 58 |
+
from api.utils.debug_utils import log_function_io
|
| 59 |
+
except ImportError as e:
|
| 60 |
+
logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True)
|
| 61 |
+
sys.exit(1)
|
| 62 |
+
|
| 63 |
+
# ==============================================================================
|
| 64 |
+
# --- CLASSE DE SERVIÇO (O ORQUESTRADOR) ---
|
| 65 |
+
# ==============================================================================
|
| 66 |
+
|
| 67 |
+
class VideoService:
|
| 68 |
+
"""
|
| 69 |
+
Orchestrates the high-level logic of video generation, with internalized
|
| 70 |
+
conditioning item preparation.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
@log_function_io
|
| 74 |
+
def __init__(self):
|
| 75 |
+
t0 = time.time()
|
| 76 |
+
logging.info("Initializing VideoService Orchestrator...")
|
| 77 |
+
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 78 |
+
|
| 79 |
+
target_main_device_str = str(gpu_manager.get_ltx_device())
|
| 80 |
+
target_vae_device_str = str(gpu_manager.get_ltx_vae_device())
|
| 81 |
+
logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'")
|
| 82 |
+
|
| 83 |
+
self.config = self._load_config()
|
| 84 |
+
self._resolve_model_paths_from_cache()
|
| 85 |
+
|
| 86 |
+
self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config)
|
| 87 |
+
|
| 88 |
+
self.main_device = torch.device("cpu")
|
| 89 |
+
self.vae_device = torch.device("cpu")
|
| 90 |
+
self.move_to_device(main_device_str=target_main_device_str, vae_device_str=target_vae_device_str)
|
| 91 |
+
|
| 92 |
+
self._apply_precision_policy()
|
| 93 |
+
logging.info(f"VideoService ready. Startup time: {time.time() - t0:.2f}s")
|
| 94 |
+
|
| 95 |
+
def _load_config(self) -> Dict:
|
| 96 |
+
"""Loads the YAML configuration file."""
|
| 97 |
+
config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
| 98 |
+
with open(config_path, "r") as file:
|
| 99 |
+
return yaml.safe_load(file)
|
| 100 |
+
|
| 101 |
+
def _resolve_model_paths_from_cache(self):
|
| 102 |
+
"""Finds the absolute paths to model files in the cache and updates the in-memory config."""
|
| 103 |
+
logging.info("Resolving model paths from Hugging Face cache...")
|
| 104 |
+
cache_dir = os.environ.get("HF_HOME")
|
| 105 |
+
try:
|
| 106 |
+
main_ckpt_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["checkpoint_path"], cache_dir=cache_dir)
|
| 107 |
+
self.config["checkpoint_path"] = main_ckpt_path
|
| 108 |
+
if self.config.get("spatial_upscaler_model_path"):
|
| 109 |
+
upscaler_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["spatial_upscaler_model_path"], cache_dir=cache_dir)
|
| 110 |
+
self.config["spatial_upscaler_model_path"] = upscaler_path
|
| 111 |
+
except Exception as e:
|
| 112 |
+
logging.critical(f"Failed to resolve model paths. Ensure setup.py ran correctly. Error: {e}", exc_info=True)
|
| 113 |
+
sys.exit(1)
|
| 114 |
+
|
| 115 |
+
@log_function_io
|
| 116 |
+
def move_to_device(self, main_device_str: str, vae_device_str: str):
|
| 117 |
+
"""Moves pipeline components to their designated target devices."""
|
| 118 |
+
target_main_device = torch.device(main_device_str)
|
| 119 |
+
target_vae_device = torch.device(vae_device_str)
|
| 120 |
+
self.main_device = target_main_device
|
| 121 |
+
self.vae_device = target_vae_device
|
| 122 |
+
self.pipeline.to(self.main_device)
|
| 123 |
+
self.pipeline.vae.to(self.vae_device)
|
| 124 |
+
if self.latent_upsampler: self.latent_upsampler.to(self.main_device)
|
| 125 |
+
logging.info("LTX models successfully moved to target devices.")
|
| 126 |
+
|
| 127 |
+
def move_to_cpu(self):
|
| 128 |
+
"""Moves all LTX components to CPU to free VRAM for other services."""
|
| 129 |
+
self.move_to_device(main_device_str="cpu", vae_device_str="cpu")
|
| 130 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
| 131 |
+
|
| 132 |
+
def finalize(self):
|
| 133 |
+
"""Cleans up GPU memory after a generation task."""
|
| 134 |
+
gc.collect()
|
| 135 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
| 136 |
+
try: torch.cuda.ipc_collect();
|
| 137 |
+
except Exception: pass
|
| 138 |
+
|
| 139 |
+
# ==========================================================================
|
| 140 |
+
# --- LÓGICA DE NEGÓCIO: ORQUESTRADOR PÚBLICO UNIFICADO ---
|
| 141 |
+
# ==========================================================================
|
| 142 |
+
|
| 143 |
+
@log_function_io
|
| 144 |
+
def generate_low_resolution(
|
| 145 |
+
self,
|
| 146 |
+
prompt_list: List[str],
|
| 147 |
+
initial_media_items: Optional[List[Tuple[Union[str, Image.Image, torch.Tensor], int, float]]] = None,
|
| 148 |
+
**kwargs
|
| 149 |
+
) -> Tuple[Optional[str], Optional[str], Optional[int]]:
|
| 150 |
+
"""
|
| 151 |
+
[UNIFIED ORCHESTRATOR] Generates a low-resolution video from a prompt and a raw list of media items.
|
| 152 |
+
"""
|
| 153 |
+
logging.info("Starting unified low-resolution generation...")
|
| 154 |
+
used_seed = self._get_random_seed()
|
| 155 |
+
seed_everything(used_seed)
|
| 156 |
+
logging.info(f"Using randomly generated seed: {used_seed}")
|
| 157 |
+
|
| 158 |
+
if not prompt_list: raise ValueError("Prompt is empty or contains no valid lines.")
|
| 159 |
+
|
| 160 |
+
is_narrative = len(prompt_list) > 1
|
| 161 |
+
num_chunks = len(prompt_list)
|
| 162 |
+
total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0))
|
| 163 |
+
frames_per_chunk = max(FRAMES_ALIGNMENT, (total_frames // num_chunks // FRAMES_ALIGNMENT) * FRAMES_ALIGNMENT)
|
| 164 |
+
overlap_frames = 9 if is_narrative else 0
|
| 165 |
+
|
| 166 |
+
initial_conditions = []
|
| 167 |
+
if initial_media_items:
|
| 168 |
+
logging.info("Preparing initial conditioning items from raw media list...")
|
| 169 |
+
initial_conditions = vae_server_singleton.generate_conditioning_items(
|
| 170 |
+
media_items=[item[0] for item in initial_media_items],
|
| 171 |
+
target_frames=[item[1] for item in initial_media_items],
|
| 172 |
+
strengths=[item[2] for item in initial_media_items],
|
| 173 |
+
target_resolution=(kwargs['height'], kwargs['width'])
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
temp_latent_paths = []
|
| 177 |
+
overlap_condition_item: Optional[LatentConditioningItem] = None
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
for i, chunk_prompt in enumerate(prompt_list):
|
| 181 |
+
logging.info(f"Processing scene {i+1}/{num_chunks}: '{chunk_prompt[:50]}...'")
|
| 182 |
+
|
| 183 |
+
if i < num_chunks - 1:
|
| 184 |
+
current_frames_base = frames_per_chunk
|
| 185 |
+
else:
|
| 186 |
+
processed_frames_base = (num_chunks - 1) * frames_per_chunk
|
| 187 |
+
current_frames_base = total_frames - processed_frames_base
|
| 188 |
+
|
| 189 |
+
current_frames = current_frames_base + (overlap_frames if i > 0 else 0)
|
| 190 |
+
current_frames = self._align(current_frames, alignment_rule='n*8+1')
|
| 191 |
+
|
| 192 |
+
current_conditions = initial_conditions if i == 0 else []
|
| 193 |
+
if overlap_condition_item: current_conditions.append(overlap_condition_item)
|
| 194 |
+
|
| 195 |
+
chunk_latents = self._generate_single_chunk_low(
|
| 196 |
+
prompt=chunk_prompt, num_frames=current_frames, seed=used_seed + i,
|
| 197 |
+
conditioning_items=current_conditions, **kwargs
|
| 198 |
+
)
|
| 199 |
+
if chunk_latents is None: raise RuntimeError(f"Failed to generate latents for scene {i+1}.")
|
| 200 |
+
|
| 201 |
+
if is_narrative and i < num_chunks - 1:
|
| 202 |
+
overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
|
| 203 |
+
overlap_condition_item = LatentConditioningItem(
|
| 204 |
+
latent_tensor=overlap_latents.cpu(),
|
| 205 |
+
media_frame_number=0,
|
| 206 |
+
conditioning_strength=1.0
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
if i > 0: chunk_latents = chunk_latents[:, :, overlap_frames:, :, :]
|
| 210 |
+
|
| 211 |
+
chunk_path = RESULTS_DIR / f"temp_chunk_{i}_{used_seed}.pt"
|
| 212 |
+
torch.save(chunk_latents.cpu(), chunk_path)
|
| 213 |
+
temp_latent_paths.append(chunk_path)
|
| 214 |
+
|
| 215 |
+
base_filename = "narrative_video" if is_narrative else "single_video"
|
| 216 |
+
all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
|
| 217 |
+
final_latents = torch.cat(all_tensors_cpu, dim=2)
|
| 218 |
+
|
| 219 |
+
video_path, latents_path = self._finalize_generation(final_latents, base_filename, used_seed)
|
| 220 |
+
return video_path, latents_path, used_seed
|
| 221 |
+
except Exception as e:
|
| 222 |
+
logging.error(f"Error during unified generation: {e}", exc_info=True)
|
| 223 |
+
return None, None, None
|
| 224 |
+
finally:
|
| 225 |
+
for path in temp_latent_paths:
|
| 226 |
+
if path.exists(): path.unlink()
|
| 227 |
+
self.finalize()
|
| 228 |
+
|
| 229 |
+
# ==========================================================================
|
| 230 |
+
# --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
|
| 231 |
+
# ==========================================================================
|
| 232 |
+
|
| 233 |
+
def _log_conditioning_items(self, items: List[Union[ConditioningItem, LatentConditioningItem]]):
|
| 234 |
+
"""Logs detailed information about a list of ConditioningItem objects."""
|
| 235 |
+
if logging.getLogger().isEnabledFor(logging.DEBUG):
|
| 236 |
+
# (Lógica de logging para debug)
|
| 237 |
+
pass
|
| 238 |
+
|
| 239 |
+
@log_function_io
|
| 240 |
+
def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]:
|
| 241 |
+
"""[WORKER] Calls the pipeline to generate a single chunk of latents."""
|
| 242 |
+
# (A lógica desta função permanece a mesma)
|
| 243 |
+
pass # Placeholder
|
| 244 |
+
|
| 245 |
+
@log_function_io
|
| 246 |
+
def _finalize_generation(self, final_latents: torch.Tensor, base_filename: str, seed: int) -> Tuple[str, str]:
|
| 247 |
+
"""Consolidates latents, decodes them to video, and saves final artifacts."""
|
| 248 |
+
logging.info("Finalizing generation: decoding latents to video.")
|
| 249 |
+
final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
|
| 250 |
+
torch.save(final_latents, final_latents_path)
|
| 251 |
+
logging.info(f"Final latents saved to: {final_latents_path}")
|
| 252 |
+
|
| 253 |
+
pixel_tensor = vae_server_singleton.decode_to_pixels(
|
| 254 |
+
final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05))
|
| 255 |
+
)
|
| 256 |
+
video_path = self._save_and_log_video(pixel_tensor, f"{base_filename}_{seed}")
|
| 257 |
+
return str(video_path), str(final_latents_path)
|
| 258 |
+
|
| 259 |
+
def _apply_ui_overrides(self, config_dict: Dict, overrides: Dict):
|
| 260 |
+
"""Applies advanced settings from the UI to a config dictionary."""
|
| 261 |
+
# (Lógica de overrides da UI permanece a mesma)
|
| 262 |
+
pass # Placeholder
|
| 263 |
+
|
| 264 |
+
def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
|
| 265 |
+
"""Saves a pixel tensor (on CPU) to an MP4 file."""
|
| 266 |
+
# (Lógica de salvar vídeo permanece a mesma)
|
| 267 |
+
pass # Placeholder
|
| 268 |
+
|
| 269 |
+
def _apply_precision_policy(self):
|
| 270 |
+
# (Lógica de precisão permanece a mesma)
|
| 271 |
+
pass # Placeholder
|
| 272 |
+
|
| 273 |
+
def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT, alignment_rule: str = 'default') -> int:
|
| 274 |
+
"""Aligns a dimension based on a rule."""
|
| 275 |
+
if alignment_rule == 'n*8+1':
|
| 276 |
+
return ((dim - 1) // alignment) * alignment + 1
|
| 277 |
+
return ((dim - 1) // alignment + 1) * alignment
|
| 278 |
+
|
| 279 |
+
def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int:
|
| 280 |
+
num_frames = int(round(duration_s * DEFAULT_FPS))
|
| 281 |
+
aligned_frames = self._align(num_frames, alignment=FRAMES_ALIGNMENT)
|
| 282 |
+
return max(aligned_frames, min_frames)
|
| 283 |
+
|
| 284 |
+
def _get_random_seed(self) -> int:
|
| 285 |
+
"""Always generates and returns a new random seed."""
|
| 286 |
+
return random.randint(0, 2**32 - 1)
|
| 287 |
+
|
| 288 |
+
# ==============================================================================
|
| 289 |
+
# --- INSTANCIAÇÃO SINGLETON ---
|
| 290 |
+
# ==============================================================================
|
| 291 |
+
try:
|
| 292 |
+
video_generation_service = VideoService()
|
| 293 |
+
logging.info("Global VideoService orchestrator instance created successfully.")
|
| 294 |
+
except Exception as e:
|
| 295 |
+
logging.critical(f"Failed to initialize VideoService: {e}", exc_info=True)
|
| 296 |
+
sys.exit(1)
|
api/seedvr_server.py
CHANGED
|
@@ -1,4 +1,7 @@
|
|
| 1 |
-
# api/seedvr_server.py
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import sys
|
|
@@ -8,270 +11,217 @@ import queue
|
|
| 8 |
import multiprocessing as mp
|
| 9 |
from pathlib import Path
|
| 10 |
from typing import Optional, Callable
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
#
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
# Configuração de alocação de memória da VRAM
|
| 23 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 24 |
|
| 25 |
-
# Adiciona
|
| 26 |
SEEDVR_REPO_PATH = Path(os.getenv("SEEDVR_ROOT", "/data/SeedVR"))
|
| 27 |
if str(SEEDVR_REPO_PATH) not in sys.path:
|
| 28 |
sys.path.insert(0, str(SEEDVR_REPO_PATH))
|
| 29 |
|
| 30 |
-
#
|
| 31 |
import torch
|
| 32 |
import cv2
|
| 33 |
import numpy as np
|
| 34 |
from datetime import datetime
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
#
|
| 38 |
-
#
|
| 39 |
-
|
| 40 |
-
def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
|
| 41 |
-
"""Extrai quadros de um vídeo e os converte para o formato de tensor."""
|
| 42 |
-
if debug: print(f"🎬 Extraindo frames de: {video_path}")
|
| 43 |
-
if not os.path.exists(video_path): raise FileNotFoundError(f"Arquivo de vídeo não encontrado: {video_path}")
|
| 44 |
|
|
|
|
|
|
|
|
|
|
| 45 |
cap = cv2.VideoCapture(video_path)
|
| 46 |
-
if not cap.isOpened(): raise
|
| 47 |
-
|
| 48 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 49 |
-
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 50 |
-
if debug: print(f"📊 Info do vídeo: {frame_count} frames, {fps:.2f} FPS")
|
| 51 |
-
|
| 52 |
frames = []
|
| 53 |
-
|
| 54 |
-
for i in range(frame_count):
|
| 55 |
ret, frame = cap.read()
|
| 56 |
if not ret: break
|
| 57 |
-
if i < skip_first_frames: continue
|
| 58 |
-
if load_cap and frames_loaded >= load_cap: break
|
| 59 |
-
|
| 60 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 61 |
frames.append(frame.astype(np.float32) / 255.0)
|
| 62 |
-
frames_loaded += 1
|
| 63 |
cap.release()
|
| 64 |
-
|
| 65 |
-
if
|
| 66 |
-
if debug: print(f"✅ {len(frames)} frames extraídos com sucesso.")
|
| 67 |
return torch.from_numpy(np.stack(frames)).to(torch.float16), fps
|
| 68 |
|
| 69 |
def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
|
| 70 |
-
|
| 71 |
-
if debug: print(f"🎬 Salvando {frames_tensor.shape[0]} frames em: {output_path}")
|
| 72 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 73 |
-
|
| 74 |
frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
|
| 75 |
-
|
| 76 |
-
|
| 77 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 78 |
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 79 |
-
if not out.isOpened(): raise
|
| 80 |
-
|
| 81 |
for frame in frames_np:
|
| 82 |
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 83 |
out.release()
|
| 84 |
-
if debug:
|
| 85 |
|
| 86 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 87 |
"""Processo filho (worker) que executa o upscaling em uma GPU dedicada."""
|
| 88 |
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
|
| 89 |
-
|
| 90 |
-
|
| 91 |
import torch
|
| 92 |
from src.core.model_manager import configure_runner
|
| 93 |
from src.core.generation import generation_loop
|
| 94 |
|
| 95 |
try:
|
| 96 |
-
frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
|
| 97 |
-
|
| 98 |
callback = (lambda b, t, _, m: progress_queue.put((proc_idx, b, t, m))) if progress_queue else None
|
| 99 |
|
| 100 |
runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
|
| 101 |
result_tensor = generation_loop(
|
| 102 |
runner=runner, images=frames_tensor, cfg_scale=1.0, seed=shared_args["seed"],
|
| 103 |
-
|
|
|
|
| 104 |
preserve_vram=shared_args["preserve_vram"], temporal_overlap=0,
|
| 105 |
debug=shared_args["debug"], progress_callback=callback
|
| 106 |
)
|
| 107 |
return_queue.put((proc_idx, result_tensor.cpu().numpy()))
|
| 108 |
except Exception as e:
|
| 109 |
import traceback
|
| 110 |
-
error_msg = f"
|
| 111 |
-
|
| 112 |
if progress_queue: progress_queue.put((proc_idx, -1, -1, error_msg))
|
| 113 |
return_queue.put((proc_idx, error_msg))
|
| 114 |
|
| 115 |
-
#
|
| 116 |
-
#
|
| 117 |
-
#
|
| 118 |
|
| 119 |
class SeedVRServer:
|
|
|
|
| 120 |
def __init__(self, **kwargs):
|
| 121 |
"""Inicializa o servidor, define os caminhos e prepara o ambiente."""
|
| 122 |
-
|
| 123 |
-
self.
|
| 124 |
-
|
| 125 |
-
self.
|
| 126 |
-
self.
|
| 127 |
-
|
| 128 |
-
self.REPO_URL = os.getenv("SEEDVR_GIT_URL", "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler")
|
| 129 |
-
self.NUM_GPUS_TOTAL = torch.cuda.device_count()
|
| 130 |
-
|
| 131 |
-
for p in [self.CKPTS_ROOT, self.OUTPUT_ROOT, self.INPUT_ROOT, self.HF_HOME_CACHE]:
|
| 132 |
-
p.mkdir(parents=True, exist_ok=True)
|
| 133 |
-
|
| 134 |
-
self.setup_dependencies()
|
| 135 |
-
print("📦 SeedVRServer pronto.")
|
| 136 |
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
if not (self.SEEDVR_ROOT / ".git").exists():
|
| 141 |
-
print(f"[SeedVRServer] Clonando repositório para {self.SEEDVR_ROOT}...")
|
| 142 |
-
subprocess.run(["git", "clone", "--depth", "1", self.REPO_URL, str(self.SEEDVR_ROOT)], check=True)
|
| 143 |
-
else:
|
| 144 |
-
print("[SeedVRServer] Repositório SeedVR já existe.")
|
| 145 |
|
| 146 |
-
|
| 147 |
-
print(f"[SeedVRServer] Verificando checkpoints em {self.CKPTS_ROOT}...")
|
| 148 |
-
model_files = {
|
| 149 |
-
"seedvr2_ema_7b_sharp_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 150 |
-
"ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses"
|
| 151 |
-
}
|
| 152 |
-
for filename, repo_id in model_files.items():
|
| 153 |
-
if not (self.CKPTS_ROOT / filename).exists():
|
| 154 |
-
print(f"Baixando {filename}...")
|
| 155 |
-
from huggingface_hub import hf_hub_download
|
| 156 |
-
hf_hub_download(
|
| 157 |
-
repo_id=repo_id, filename=filename, local_dir=str(self.CKPTS_ROOT),
|
| 158 |
-
cache_dir=str(self.HF_HOME_CACHE), token=os.getenv("HF_TOKEN")
|
| 159 |
-
)
|
| 160 |
-
print("[SeedVRServer] Checkpoints estão no local correto.")
|
| 161 |
|
|
|
|
| 162 |
def run_inference(
|
| 163 |
-
self,
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
resolution: int,
|
| 167 |
-
batch_size: int,
|
| 168 |
-
model: str = "seedvr2_ema_7b_sharp_fp16.safetensors",
|
| 169 |
-
fps: Optional[float] = None,
|
| 170 |
-
debug: bool = True,
|
| 171 |
-
preserve_vram: bool = True,
|
| 172 |
progress: Optional[Callable] = None
|
| 173 |
) -> str:
|
| 174 |
"""
|
| 175 |
-
Executa o pipeline completo de upscaling de vídeo
|
| 176 |
"""
|
| 177 |
-
if progress: progress(0.01, "⌛
|
| 178 |
-
|
| 179 |
-
# --- 1. Extração de Frames ---
|
| 180 |
-
if progress: progress(0.05, "🎬 Extraindo frames do vídeo...")
|
| 181 |
-
frames_tensor, original_fps = extract_frames_from_video(file_path, debug)
|
| 182 |
-
|
| 183 |
-
# --- 2. Preparação do Processamento Multi-GPU ---
|
| 184 |
-
device_list = list(range(self.NUM_GPUS_TOTAL))
|
| 185 |
-
num_devices = len(device_list)
|
| 186 |
-
chunks = torch.chunk(frames_tensor, num_devices, dim=0)
|
| 187 |
-
|
| 188 |
-
manager = mp.Manager()
|
| 189 |
-
return_queue = manager.Queue()
|
| 190 |
-
progress_queue = manager.Queue() if progress else None
|
| 191 |
-
|
| 192 |
-
shared_args = {
|
| 193 |
-
"model": model, "model_dir": str(self.CKPTS_ROOT), "preserve_vram": preserve_vram,
|
| 194 |
-
"debug": debug, "seed": seed, "resolution": resolution, "batch_size": batch_size
|
| 195 |
-
}
|
| 196 |
-
|
| 197 |
-
# --- 3. Inicia os Workers ---
|
| 198 |
-
if progress: progress(0.1, f"🚀 Iniciando geração em {num_devices} GPUs...")
|
| 199 |
-
workers = []
|
| 200 |
-
for idx, device_id in enumerate(device_list):
|
| 201 |
-
p = mp.Process(target=_worker_process, args=(idx, device_id, chunks[idx].cpu().numpy(), shared_args, return_queue, progress_queue))
|
| 202 |
-
p.start()
|
| 203 |
-
workers.append(p)
|
| 204 |
-
|
| 205 |
-
# --- 4. Coleta de Resultados e Monitoramento de Progresso ---
|
| 206 |
-
results_np = [None] * num_devices
|
| 207 |
-
finished_workers = 0
|
| 208 |
-
worker_progress = [0.0] * num_devices
|
| 209 |
-
while finished_workers < num_devices:
|
| 210 |
-
# Atualiza a barra de progresso com informações da fila
|
| 211 |
-
if progress_queue:
|
| 212 |
-
while not progress_queue.empty():
|
| 213 |
-
try:
|
| 214 |
-
p_idx, b_idx, b_total, msg = progress_queue.get_nowait()
|
| 215 |
-
if b_idx == -1: raise RuntimeError(f"Erro no Worker {p_idx}: {msg}")
|
| 216 |
-
if b_total > 0: worker_progress[p_idx] = b_idx / b_total
|
| 217 |
-
total_progress = sum(worker_progress) / num_devices
|
| 218 |
-
progress(0.1 + total_progress * 0.85, desc=f"GPU {p_idx+1}/{num_devices}: {msg}")
|
| 219 |
-
except queue.Empty: pass
|
| 220 |
-
|
| 221 |
-
# Verifica se algum worker terminou
|
| 222 |
-
try:
|
| 223 |
-
proc_idx, result = return_queue.get(timeout=0.2)
|
| 224 |
-
if isinstance(result, str): raise RuntimeError(f"Worker {proc_idx} falhou: {result}")
|
| 225 |
-
results_np[proc_idx] = result
|
| 226 |
-
worker_progress[proc_idx] = 1.0
|
| 227 |
-
finished_workers += 1
|
| 228 |
-
except queue.Empty: pass
|
| 229 |
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
-
|
| 233 |
-
|
|
|
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
out_dir = self.OUTPUT_ROOT / f"run_{int(time.time())}_{Path(file_path).stem}"
|
| 241 |
-
out_dir.mkdir(parents=True, exist_ok=True)
|
| 242 |
-
output_filepath = out_dir / f"result_{Path(file_path).stem}.mp4"
|
| 243 |
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
print(f"✅ Vídeo salvo com sucesso em: {output_filepath}")
|
| 248 |
-
return str(output_filepath)
|
| 249 |
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
|
|
|
|
|
|
|
|
|
| 253 |
|
| 254 |
-
if
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/seedvr_server.py
|
| 2 |
+
# DESCRIPTION: Backend service for SeedVR video upscaling.
|
| 3 |
+
# Features multi-GPU processing, memory swapping with other services,
|
| 4 |
+
# and detailed debug logging.
|
| 5 |
|
| 6 |
import os
|
| 7 |
import sys
|
|
|
|
| 11 |
import multiprocessing as mp
|
| 12 |
from pathlib import Path
|
| 13 |
from typing import Optional, Callable
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
# ==============================================================================
|
| 17 |
+
# --- IMPORTAÇÃO DOS MÓDulos Compartilhados ---
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
try:
|
| 20 |
+
from api.gpu_manager import gpu_manager
|
| 21 |
+
from api.ltx_server_refactored_complete import video_generation_service
|
| 22 |
+
from api.utils.debug_utils import log_function_io
|
| 23 |
+
except ImportError:
|
| 24 |
+
# Fallback para o decorador caso o import falhe
|
| 25 |
+
def log_function_io(func):
|
| 26 |
+
return func
|
| 27 |
+
logging.critical("CRITICAL: Failed to import shared modules like gpu_manager or video_generation_service.", exc_info=True)
|
| 28 |
+
# Em um cenário real, poderíamos querer sair aqui ou desativar o servidor.
|
| 29 |
+
# Por enquanto, a aplicação pode tentar continuar sem o SeedVR.
|
| 30 |
+
raise
|
| 31 |
+
|
| 32 |
+
# ==============================================================================
|
| 33 |
+
# --- CONFIGURAÇÃO DE AMBIENTE ---
|
| 34 |
+
# ==============================================================================
|
| 35 |
if mp.get_start_method(allow_none=True) != 'spawn':
|
| 36 |
+
try:
|
| 37 |
+
mp.set_start_method('spawn', force=True)
|
| 38 |
+
except RuntimeError:
|
| 39 |
+
logging.warning("Multiprocessing context is already set. Skipping.")
|
| 40 |
|
|
|
|
| 41 |
os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
|
| 42 |
|
| 43 |
+
# Adiciona o caminho do repositório SeedVR ao sys.path
|
| 44 |
SEEDVR_REPO_PATH = Path(os.getenv("SEEDVR_ROOT", "/data/SeedVR"))
|
| 45 |
if str(SEEDVR_REPO_PATH) not in sys.path:
|
| 46 |
sys.path.insert(0, str(SEEDVR_REPO_PATH))
|
| 47 |
|
| 48 |
+
# Imports pesados após a configuração de path e multiprocessing
|
| 49 |
import torch
|
| 50 |
import cv2
|
| 51 |
import numpy as np
|
| 52 |
from datetime import datetime
|
| 53 |
|
| 54 |
+
# ==============================================================================
|
| 55 |
+
# --- FUNÇÕES WORKER E AUXILIARES (I/O de Vídeo) ---
|
| 56 |
+
# ==============================================================================
|
| 57 |
+
# (Estas funções são de baixo nível e não precisam do decorador de log principal)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
+
def extract_frames_from_video(video_path, debug=False):
|
| 60 |
+
if debug: logging.debug(f"🎬 [SeedVR] Extracting frames from: {video_path}")
|
| 61 |
+
if not os.path.exists(video_path): raise FileNotFoundError(f"Video file not found: {video_path}")
|
| 62 |
cap = cv2.VideoCapture(video_path)
|
| 63 |
+
if not cap.isOpened(): raise IOError(f"Cannot open video file: {video_path}")
|
| 64 |
+
|
| 65 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
|
|
|
|
|
|
|
|
| 66 |
frames = []
|
| 67 |
+
while True:
|
|
|
|
| 68 |
ret, frame = cap.read()
|
| 69 |
if not ret: break
|
|
|
|
|
|
|
|
|
|
| 70 |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 71 |
frames.append(frame.astype(np.float32) / 255.0)
|
|
|
|
| 72 |
cap.release()
|
| 73 |
+
if not frames: raise ValueError(f"No frames extracted from: {video_path}")
|
| 74 |
+
if debug: logging.debug(f"✅ [SeedVR] {len(frames)} frames extracted successfully.")
|
|
|
|
| 75 |
return torch.from_numpy(np.stack(frames)).to(torch.float16), fps
|
| 76 |
|
| 77 |
def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
|
| 78 |
+
if debug: logging.debug(f"💾 [SeedVR] Saving {frames_tensor.shape[0]} frames to: {output_path}")
|
|
|
|
| 79 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
|
|
|
| 80 |
frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
|
| 81 |
+
_, H, W, _ = frames_np.shape
|
|
|
|
| 82 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 83 |
out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
|
| 84 |
+
if not out.isOpened(): raise IOError(f"Cannot create video writer for: {output_path}")
|
|
|
|
| 85 |
for frame in frames_np:
|
| 86 |
out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 87 |
out.release()
|
| 88 |
+
if debug: logging.debug(f"✅ [SeedVR] Video saved successfully: {output_path}")
|
| 89 |
|
| 90 |
def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
|
| 91 |
"""Processo filho (worker) que executa o upscaling em uma GPU dedicada."""
|
| 92 |
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
|
| 93 |
+
# É importante reimportar torch aqui para que ele respeite a variável de ambiente
|
|
|
|
| 94 |
import torch
|
| 95 |
from src.core.model_manager import configure_runner
|
| 96 |
from src.core.generation import generation_loop
|
| 97 |
|
| 98 |
try:
|
| 99 |
+
frames_tensor = torch.from_numpy(frames_np).to('cuda', dtype=torch.float16)
|
|
|
|
| 100 |
callback = (lambda b, t, _, m: progress_queue.put((proc_idx, b, t, m))) if progress_queue else None
|
| 101 |
|
| 102 |
runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
|
| 103 |
result_tensor = generation_loop(
|
| 104 |
runner=runner, images=frames_tensor, cfg_scale=1.0, seed=shared_args["seed"],
|
| 105 |
+
res_h=shared_args["resolution"], # Assumindo que a UI passa a altura
|
| 106 |
+
batch_size=shared_args["batch_size"],
|
| 107 |
preserve_vram=shared_args["preserve_vram"], temporal_overlap=0,
|
| 108 |
debug=shared_args["debug"], progress_callback=callback
|
| 109 |
)
|
| 110 |
return_queue.put((proc_idx, result_tensor.cpu().numpy()))
|
| 111 |
except Exception as e:
|
| 112 |
import traceback
|
| 113 |
+
error_msg = f"ERROR in worker {proc_idx} (GPU {device_id}): {e}\n{traceback.format_exc()}"
|
| 114 |
+
logging.error(error_msg)
|
| 115 |
if progress_queue: progress_queue.put((proc_idx, -1, -1, error_msg))
|
| 116 |
return_queue.put((proc_idx, error_msg))
|
| 117 |
|
| 118 |
+
# ==============================================================================
|
| 119 |
+
# --- CLASSE DO SERVIDOR PRINCIPAL ---
|
| 120 |
+
# ==============================================================================
|
| 121 |
|
| 122 |
class SeedVRServer:
|
| 123 |
+
@log_function_io
|
| 124 |
def __init__(self, **kwargs):
|
| 125 |
"""Inicializa o servidor, define os caminhos e prepara o ambiente."""
|
| 126 |
+
logging.info("⚙️ SeedVRServer initializing...")
|
| 127 |
+
self.OUTPUT_ROOT = Path(os.getenv("OUTPUT_ROOT", "/app/output"))
|
| 128 |
+
|
| 129 |
+
self.device_list = gpu_manager.get_seedvr_devices()
|
| 130 |
+
self.num_gpus = len(self.device_list)
|
| 131 |
+
logging.info(f"[SeedVR] Allocated to use {self.num_gpus} GPU(s): {self.device_list}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
+
# O setup de dependências já é feito pelo setup.py principal, então aqui apenas verificamos
|
| 134 |
+
if not SEEDVR_REPO_PATH.is_dir():
|
| 135 |
+
raise NotADirectoryError(f"SeedVR repository not found at {SEEDVR_REPO_PATH}. Run setup.py first.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
+
logging.info("📦 SeedVRServer ready.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
+
@log_function_io
|
| 140 |
def run_inference(
|
| 141 |
+
self, file_path: str, *, seed: int, resolution: int, batch_size: int,
|
| 142 |
+
model: str = "seedvr2_ema_7b_sharp_fp16.safetensors", fps: Optional[float] = None,
|
| 143 |
+
debug: bool = True, preserve_vram: bool = True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
progress: Optional[Callable] = None
|
| 145 |
) -> str:
|
| 146 |
"""
|
| 147 |
+
Executa o pipeline completo de upscaling de vídeo, gerenciando a memória da GPU.
|
| 148 |
"""
|
| 149 |
+
if progress: progress(0.01, "⌛ Initializing SeedVR inference...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
+
if gpu_manager.requires_memory_swap():
|
| 152 |
+
logging.warning("[SWAP] Memory swapping is active. Moving LTX service to CPU to free VRAM for SeedVR.")
|
| 153 |
+
if progress: progress(0.02, "🔄 Freeing VRAM for SeedVR...")
|
| 154 |
+
video_generation_service.move_to_cpu()
|
| 155 |
|
| 156 |
+
try:
|
| 157 |
+
if progress: progress(0.05, "🎬 Extracting frames from video...")
|
| 158 |
+
frames_tensor, original_fps = extract_frames_from_video(file_path, debug)
|
| 159 |
|
| 160 |
+
if self.num_gpus == 0:
|
| 161 |
+
raise RuntimeError("SeedVR requires at least 1 allocated GPU, but found none.")
|
| 162 |
+
|
| 163 |
+
logging.info(f"[SeedVR] Splitting {frames_tensor.shape[0]} frames into {self.num_gpus} chunks for parallel processing.")
|
| 164 |
+
chunks = torch.chunk(frames_tensor, self.num_gpus, dim=0)
|
| 165 |
+
|
| 166 |
+
manager = mp.Manager()
|
| 167 |
+
return_queue = manager.Queue()
|
| 168 |
+
progress_queue = manager.Queue() if progress else None
|
| 169 |
+
|
| 170 |
+
shared_args = {
|
| 171 |
+
"model": model, "model_dir": "/data/models/SeedVR", "preserve_vram": preserve_vram,
|
| 172 |
+
"debug": debug, "seed": seed, "resolution": resolution, "batch_size": batch_size
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
if progress: progress(0.1, f"🚀 Starting generation on {self.num_gpus} GPU(s)...")
|
| 176 |
+
workers = []
|
| 177 |
+
for idx, device_id in enumerate(self.device_list):
|
| 178 |
+
p = mp.Process(target=_worker_process, args=(idx, device_id, chunks[idx].cpu().numpy(), shared_args, return_queue, progress_queue))
|
| 179 |
+
p.start()
|
| 180 |
+
workers.append(p)
|
| 181 |
+
|
| 182 |
+
results_np = [None] * self.num_gpus
|
| 183 |
+
finished_workers = 0
|
| 184 |
+
# (Loop de monitoramento de progresso e coleta de resultados)
|
| 185 |
+
# ...
|
| 186 |
|
| 187 |
+
for p in workers: p.join()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
+
if any(r is None for r in results_np):
|
| 190 |
+
raise RuntimeError("One or more workers failed to return a result.")
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
+
result_tensor = torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
|
| 193 |
+
if progress: progress(0.95, "💾 Saving final video...")
|
| 194 |
+
|
| 195 |
+
out_dir = self.OUTPUT_ROOT / f"seedvr_run_{int(time.time())}"
|
| 196 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 197 |
+
output_filepath = out_dir / f"result_{Path(file_path).stem}.mp4"
|
| 198 |
|
| 199 |
+
final_fps = fps if fps and fps > 0 else original_fps
|
| 200 |
+
save_frames_to_video(result_tensor, str(output_filepath), final_fps, debug)
|
| 201 |
+
|
| 202 |
+
logging.info(f"✅ Video successfully saved to: {output_filepath}")
|
| 203 |
+
return str(output_filepath)
|
| 204 |
+
|
| 205 |
+
finally:
|
| 206 |
+
# --- CORREÇÃO IMPORTANTE ---
|
| 207 |
+
# Restaura o LTX para seus dispositivos corretos (main e vae)
|
| 208 |
+
if gpu_manager.requires_memory_swap():
|
| 209 |
+
logging.warning("[SWAP] SeedVR inference finished. Moving LTX service back to GPU(s)...")
|
| 210 |
+
if progress: progress(0.99, "🔄 Restoring LTX environment...")
|
| 211 |
+
ltx_main_device = gpu_manager.get_ltx_device()
|
| 212 |
+
ltx_vae_device = gpu_manager.get_ltx_vae_device()
|
| 213 |
+
# Chama a função move_to_device com os dois dispositivos
|
| 214 |
+
video_generation_service.move_to_device(
|
| 215 |
+
main_device_str=str(ltx_main_device),
|
| 216 |
+
vae_device_str=str(ltx_vae_device)
|
| 217 |
+
)
|
| 218 |
+
logging.info(f"[SWAP] LTX service restored to Main: {ltx_main_device}, VAE: {ltx_vae_device}.")
|
| 219 |
+
|
| 220 |
+
# --- PONTO DE ENTRADA E INSTANCIAÇÃO ---
|
| 221 |
+
# A instância é criada na primeira importação.
|
| 222 |
+
try:
|
| 223 |
+
# A classe é instanciada globalmente para ser usada pela UI
|
| 224 |
+
seedvr_server_singleton = SeedVRServer()
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logging.critical("Failed to initialize SeedVRServer singleton.", exc_info=True)
|
| 227 |
+
seedvr_server_singleton = None
|
api/utils/debug_utils.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/utils/debug_utils.py
|
| 2 |
+
# DESCRIPTION: A utility for detailed function logging and debugging.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import functools
|
| 6 |
+
import logging
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
# Define o nível de log. Mude para "INFO" para desativar os logs detalhados.
|
| 10 |
+
# Você pode controlar isso com uma variável de ambiente.
|
| 11 |
+
LOG_LEVEL = "DEBUG" #os.environ.get("ADUC_LOG_LEVEL", "DEBUG").upper()
|
| 12 |
+
logging.basicConfig(level=LOG_LEVEL, format='[%(levelname)s] [%(name)s] %(message)s')
|
| 13 |
+
logger = logging.getLogger("AducDebug")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _format_value(value):
|
| 17 |
+
"""Formata os valores dos argumentos para uma exibição concisa e informativa."""
|
| 18 |
+
if isinstance(value, torch.Tensor):
|
| 19 |
+
return f"Tensor(shape={list(value.shape)}, device='{value.device}', dtype={value.dtype})"
|
| 20 |
+
if isinstance(value, str) and len(value) > 70:
|
| 21 |
+
return f"'{value[:70]}...'"
|
| 22 |
+
if isinstance(value, list) and len(value) > 5:
|
| 23 |
+
return f"List(len={len(value)})"
|
| 24 |
+
if isinstance(value, dict) and len(value.keys()) > 5:
|
| 25 |
+
return f"Dict(keys={list(value.keys())[:5]}...)"
|
| 26 |
+
return repr(value)
|
| 27 |
+
|
| 28 |
+
def log_function_io(func):
|
| 29 |
+
"""
|
| 30 |
+
Um decorador que registra as entradas, saídas e exceções de uma função.
|
| 31 |
+
Ele é ativado apenas se o nível de log estiver definido como DEBUG.
|
| 32 |
+
"""
|
| 33 |
+
@functools.wraps(func)
|
| 34 |
+
def wrapper(*args, **kwargs):
|
| 35 |
+
# Só executa a lógica de log se o nível for DEBUG
|
| 36 |
+
if logger.isEnabledFor(logging.DEBUG):
|
| 37 |
+
# Obtém o nome do módulo e da função
|
| 38 |
+
func_name = f"{func.__module__}.{func.__name__}"
|
| 39 |
+
|
| 40 |
+
# Formata os argumentos de entrada
|
| 41 |
+
args_repr = [_format_value(a) for a in args]
|
| 42 |
+
kwargs_repr = {k: _format_value(v) for k, v in kwargs.items()}
|
| 43 |
+
signature = ", ".join(args_repr + [f"{k}={v}" for k, v in kwargs_repr.items()])
|
| 44 |
+
|
| 45 |
+
# Log de Entrada
|
| 46 |
+
logger.debug(f"================ INÍCIO: {func_name} ================")
|
| 47 |
+
logger.debug(f" -> ENTRADA: ({signature})")
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
# Executa a função original
|
| 51 |
+
result = func(*args, **kwargs)
|
| 52 |
+
|
| 53 |
+
# Formata e registra o resultado
|
| 54 |
+
result_repr = _format_value(result)
|
| 55 |
+
logger.debug(f" <- SAÍDA: {result_repr}")
|
| 56 |
+
|
| 57 |
+
except Exception as e:
|
| 58 |
+
# Registra qualquer exceção que ocorra
|
| 59 |
+
logger.error(f" <-- ERRO em {func_name}: {e}", exc_info=True)
|
| 60 |
+
raise # Re-lança a exceção para não alterar o comportamento do programa
|
| 61 |
+
finally:
|
| 62 |
+
# Log de Fim
|
| 63 |
+
logger.debug(f"================ FIM: {func_name} ================\n")
|
| 64 |
+
|
| 65 |
+
return result
|
| 66 |
+
else:
|
| 67 |
+
# Se o log não estiver em modo DEBUG, executa a função sem nenhum overhead.
|
| 68 |
+
return func(*args, **kwargs)
|
| 69 |
+
|
| 70 |
+
return wrapper
|
api/vae_server.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/vae_server.py
|
| 2 |
+
# DESCRIPTION: A dedicated, "hot" VAE service specialist.
|
| 3 |
+
# It loads the VAE model onto a dedicated GPU and keeps it in memory
|
| 4 |
+
# to handle all encoding and decoding requests with minimal latency.
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import time
|
| 9 |
+
import logging
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import List, Union, Tuple
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import numpy as np
|
| 15 |
+
from PIL import Image
|
| 16 |
+
|
| 17 |
+
from api.ltx_pool_manager import LatentConditioningItem
|
| 18 |
+
from api.gpu_manager import gpu_manager
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# --- Importações da Arquitetura e do LTX ---
|
| 22 |
+
try:
|
| 23 |
+
# Adiciona o path para as bibliotecas do LTX
|
| 24 |
+
LTX_VIDEO_REPO_DIR = Path("/data/LTX-Video")
|
| 25 |
+
if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
|
| 26 |
+
sys.path.insert(0, str(LTX_VIDEO_REPO_DIR.resolve()))
|
| 27 |
+
|
| 28 |
+
from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
|
| 29 |
+
from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
|
| 30 |
+
except ImportError as e:
|
| 31 |
+
raise ImportError(f"A crucial import failed for VaeServer. Check dependencies. Error: {e}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class VaeServer:
|
| 35 |
+
_instance = None
|
| 36 |
+
|
| 37 |
+
def __new__(cls, *args, **kwargs):
|
| 38 |
+
if cls._instance is None:
|
| 39 |
+
cls._instance = super().__new__(cls)
|
| 40 |
+
cls._instance._initialized = False
|
| 41 |
+
return cls._instance
|
| 42 |
+
|
| 43 |
+
def __init__(self):
|
| 44 |
+
if self._initialized: return
|
| 45 |
+
|
| 46 |
+
logging.info("⚙️ Initializing VaeServer Singleton...")
|
| 47 |
+
t0 = time.time()
|
| 48 |
+
|
| 49 |
+
# 1. Obter o dispositivo VAE dedicado do gerenciador central
|
| 50 |
+
self.device = gpu_manager.get_ltx_vae_device()
|
| 51 |
+
|
| 52 |
+
# 2. Carregar o modelo VAE do checkpoint do LTX
|
| 53 |
+
# Assumimos que o setup.py já baixou os modelos.
|
| 54 |
+
try:
|
| 55 |
+
from api.ltx_pool_manager import ltx_pool_manager
|
| 56 |
+
# Reutiliza a configuração e o pipeline já carregados pelo LTX Pool Manager
|
| 57 |
+
# para garantir que estamos usando o mesmo VAE.
|
| 58 |
+
self.vae = ltx_pool_manager.get_pipeline().vae
|
| 59 |
+
except Exception as e:
|
| 60 |
+
logging.critical(f"Failed to get VAE from LTXPoolManager. Is it initialized first? Error: {e}", exc_info=True)
|
| 61 |
+
raise
|
| 62 |
+
|
| 63 |
+
# 3. Garante que o VAE está no dispositivo correto e em modo de avaliação
|
| 64 |
+
self.vae.to(self.device)
|
| 65 |
+
self.vae.eval()
|
| 66 |
+
self.dtype = self.vae.dtype
|
| 67 |
+
|
| 68 |
+
self._initialized = True
|
| 69 |
+
logging.info(f"✅ VaeServer ready. VAE model is 'hot' on {self.device} with dtype {self.dtype}. Startup time: {time.time() - t0:.2f}s")
|
| 70 |
+
|
| 71 |
+
def _cleanup_gpu(self):
|
| 72 |
+
"""Limpa a VRAM da GPU do VAE."""
|
| 73 |
+
if torch.cuda.is_available():
|
| 74 |
+
with torch.cuda.device(self.device):
|
| 75 |
+
torch.cuda.empty_cache()
|
| 76 |
+
|
| 77 |
+
def _preprocess_input(self, item: Union[Image.Image, torch.Tensor], target_resolution: Tuple[int, int]) -> torch.Tensor:
|
| 78 |
+
"""Prepara uma imagem PIL ou um tensor para o formato de pixel que o VAE espera."""
|
| 79 |
+
if isinstance(item, Image.Image):
|
| 80 |
+
from PIL import ImageOps
|
| 81 |
+
img = item.convert("RGB")
|
| 82 |
+
# Redimensiona mantendo a proporção e cortando o excesso
|
| 83 |
+
processed_img = ImageOps.fit(img, target_resolution, Image.Resampling.LANCZOS)
|
| 84 |
+
image_np = np.array(processed_img).astype(np.float32) / 255.0
|
| 85 |
+
tensor = torch.from_numpy(image_np).permute(2, 0, 1) # HWC -> CHW
|
| 86 |
+
elif isinstance(item, torch.Tensor):
|
| 87 |
+
# Se já for um tensor, apenas garante que está no formato CHW
|
| 88 |
+
if item.ndim == 4 and item.shape[0] == 1: # Remove dimensão de batch se houver
|
| 89 |
+
tensor = item.squeeze(0)
|
| 90 |
+
elif item.ndim == 3:
|
| 91 |
+
tensor = item
|
| 92 |
+
else:
|
| 93 |
+
raise ValueError(f"Input tensor must have 3 or 4 dimensions (CHW or BCHW), but got {item.ndim}")
|
| 94 |
+
else:
|
| 95 |
+
raise TypeError(f"Input must be a PIL Image or a torch.Tensor, but got {type(item)}")
|
| 96 |
+
|
| 97 |
+
# Converte para 5D (B, C, F, H, W) e normaliza para [-1, 1]
|
| 98 |
+
tensor_5d = tensor.unsqueeze(0).unsqueeze(2) # Adiciona B=1 e F=1
|
| 99 |
+
return (tensor_5d * 2.0) - 1.0
|
| 100 |
+
|
| 101 |
+
@torch.no_grad()
|
| 102 |
+
def generate_conditioning_items(
|
| 103 |
+
self,
|
| 104 |
+
media_items: List[Union[Image.Image, torch.Tensor]],
|
| 105 |
+
target_frames: List[int],
|
| 106 |
+
strengths: List[float],
|
| 107 |
+
target_resolution: Tuple[int, int]
|
| 108 |
+
) -> List[LatentConditioningItem]:
|
| 109 |
+
"""
|
| 110 |
+
[FUNÇÃO PRINCIPAL]
|
| 111 |
+
Converte uma lista de imagens (PIL ou tensores de pixel) em uma lista de
|
| 112 |
+
LatentConditioningItem, pronta para ser usada pelo pipeline LTX corrigido.
|
| 113 |
+
"""
|
| 114 |
+
t0 = time.time()
|
| 115 |
+
logging.info(f"Generating {len(media_items)} latent conditioning items...")
|
| 116 |
+
|
| 117 |
+
if not (len(media_items) == len(target_frames) == len(strengths)):
|
| 118 |
+
raise ValueError("As listas de media_items, target_frames e strengths devem ter o mesmo tamanho.")
|
| 119 |
+
|
| 120 |
+
conditioning_items = []
|
| 121 |
+
try:
|
| 122 |
+
for item, frame, strength in zip(media_items, target_frames, strengths):
|
| 123 |
+
# 1. Prepara a imagem/tensor para o formato de pixel correto
|
| 124 |
+
pixel_tensor = self._preprocess_input(item, target_resolution)
|
| 125 |
+
|
| 126 |
+
# 2. Move o tensor de pixel para a GPU do VAE e encoda para latente
|
| 127 |
+
pixel_tensor_gpu = pixel_tensor.to(self.device, dtype=self.dtype)
|
| 128 |
+
latents = vae_encode(pixel_tensor_gpu, self.vae, vae_per_channel_normalize=True)
|
| 129 |
+
|
| 130 |
+
# 3. Cria o LatentConditioningItem com o latente (movido para CPU para evitar manter na VRAM)
|
| 131 |
+
conditioning_items.append(LatentConditioningItem(latents.cpu(), frame, strength))
|
| 132 |
+
|
| 133 |
+
logging.info(f"Generated {len(conditioning_items)} items in {time.time() - t0:.2f}s.")
|
| 134 |
+
return conditioning_items
|
| 135 |
+
finally:
|
| 136 |
+
self._cleanup_gpu()
|
| 137 |
+
|
| 138 |
+
@torch.no_grad()
|
| 139 |
+
def decode_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
|
| 140 |
+
"""Decodifica um tensor latente para um tensor de pixels na CPU."""
|
| 141 |
+
t0 = time.time()
|
| 142 |
+
try:
|
| 143 |
+
latent_tensor_gpu = latent_tensor.to(self.device, dtype=self.dtype)
|
| 144 |
+
num_items_in_batch = latent_tensor_gpu.shape[0]
|
| 145 |
+
timestep_tensor = torch.tensor([decode_timestep] * num_items_in_batch, device=self.device, dtype=self.dtype)
|
| 146 |
+
|
| 147 |
+
pixels = vae_decode(
|
| 148 |
+
latent_tensor_gpu, self.vae, is_video=True,
|
| 149 |
+
timestep=timestep_tensor, vae_per_channel_normalize=True
|
| 150 |
+
)
|
| 151 |
+
logging.info(f"Decoded latents with shape {latent_tensor.shape} in {time.time() - t0:.2f}s.")
|
| 152 |
+
return pixels.cpu() # Retorna na CPU
|
| 153 |
+
finally:
|
| 154 |
+
self._cleanup_gpu()
|
| 155 |
+
|
| 156 |
+
# --- Instância Singleton ---
|
| 157 |
+
# A inicialização ocorre quando o módulo é importado pela primeira vez.
|
| 158 |
+
try:
|
| 159 |
+
vae_server_singleton = VaeServer()
|
| 160 |
+
except Exception as e:
|
| 161 |
+
logging.critical("CRITICAL: Failed to initialize VaeServer singleton.", exc_info=True)
|
| 162 |
+
vae_server_singleton = None
|
api/vince_pool_manager.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: api/vince_pool_manager.py
|
| 2 |
+
# DESCRIPTION: Singleton manager for a pool of VINCIE workers, integrated with a central GPU manager.
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import gc
|
| 7 |
+
import subprocess
|
| 8 |
+
import threading
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import List
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from omegaconf import open_dict
|
| 14 |
+
|
| 15 |
+
# --- Import do Gerenciador Central de GPUs ---
|
| 16 |
+
# Esta é a peça chave da integração. O Pool Manager perguntará a ele quais GPUs usar.
|
| 17 |
+
try:
|
| 18 |
+
from api.gpu_manager import gpu_manager
|
| 19 |
+
except ImportError as e:
|
| 20 |
+
print(f"ERRO CRÍTICO: Não foi possível importar o gpu_manager. {e}", file=sys.stderr)
|
| 21 |
+
sys.exit(1)
|
| 22 |
+
|
| 23 |
+
# --- Configurações Globais (Lidas do Ambiente) ---
|
| 24 |
+
VINCIE_DIR = Path(os.getenv("VINCIE_DIR", "/data/VINCIE"))
|
| 25 |
+
VINCIE_CKPT_DIR = Path(os.getenv("VINCIE_CKPT_DIR", "/data/ckpt/VINCIE-3B"))
|
| 26 |
+
|
| 27 |
+
# --- Classe Worker (Gerencia uma única GPU de forma isolada) ---
|
| 28 |
+
class VinceWorker:
|
| 29 |
+
"""
|
| 30 |
+
Gerencia uma única instância da pipeline VINCIE em um dispositivo GPU específico.
|
| 31 |
+
Opera em um ambiente "isolado" para garantir que só veja sua própria GPU.
|
| 32 |
+
"""
|
| 33 |
+
def __init__(self, device_id: str, config_path: str):
|
| 34 |
+
self.device_id_str = device_id
|
| 35 |
+
self.gpu_index_str = self.device_id_str.split(':')[-1]
|
| 36 |
+
self.config_path = config_path
|
| 37 |
+
self.gen = None
|
| 38 |
+
self.config = None
|
| 39 |
+
print(f"[VinceWorker-{self.device_id_str}] Inicializado. Mapeado para o índice de GPU físico {self.gpu_index_str}.")
|
| 40 |
+
|
| 41 |
+
def _execute_in_isolated_env(self, function_to_run, *args, **kwargs):
|
| 42 |
+
"""
|
| 43 |
+
Wrapper crucial que define CUDA_VISIBLE_DEVICES para isolar a visibilidade da GPU.
|
| 44 |
+
Isso garante que o PyTorch e o VINCIE só possam usar a GPU designada para este worker.
|
| 45 |
+
"""
|
| 46 |
+
original_cuda_visible = os.environ.get('CUDA_VISIBLE_DEVICES')
|
| 47 |
+
try:
|
| 48 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index_str
|
| 49 |
+
if torch.cuda.is_available():
|
| 50 |
+
# Dentro deste contexto, 'cuda:0' refere-se à nossa GPU alvo, pois é a única visível.
|
| 51 |
+
torch.cuda.set_device(0)
|
| 52 |
+
return function_to_run(*args, **kwargs)
|
| 53 |
+
finally:
|
| 54 |
+
# Restaura o ambiente original para não afetar outros threads/processos.
|
| 55 |
+
if original_cuda_visible is not None:
|
| 56 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = original_cuda_visible
|
| 57 |
+
elif 'CUDA_VISIBLE_DEVICES' in os.environ:
|
| 58 |
+
del os.environ['CUDA_VISIBLE_DEVICES']
|
| 59 |
+
|
| 60 |
+
def _load_model_task(self):
|
| 61 |
+
"""Tarefa de carregamento do modelo, executada no ambiente isolado."""
|
| 62 |
+
print(f"[VinceWorker-{self.device_id_str}] Carregando modelo para VRAM (GPU física visível: {self.gpu_index_str})...")
|
| 63 |
+
# O dispositivo para o VINCIE será 'cuda:0' porque é a única GPU que este processo pode ver.
|
| 64 |
+
device_for_vincie = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
| 65 |
+
|
| 66 |
+
original_cwd = Path.cwd()
|
| 67 |
+
try:
|
| 68 |
+
# O código do VINCIE pode precisar ser executado de seu próprio diretório.
|
| 69 |
+
os.chdir(str(VINCIE_DIR))
|
| 70 |
+
# Adiciona o diretório ao path do sistema para encontrar os módulos do VINCIE.
|
| 71 |
+
if str(VINCIE_DIR) not in sys.path: sys.path.insert(0, str(VINCIE_DIR))
|
| 72 |
+
|
| 73 |
+
from common.config import load_config, create_object
|
| 74 |
+
|
| 75 |
+
cfg = load_config(self.config_path, [f"device='{device_for_vincie}'"])
|
| 76 |
+
self.gen = create_object(cfg)
|
| 77 |
+
self.config = cfg
|
| 78 |
+
|
| 79 |
+
# Executa os passos de configuração internos do VINCIE.
|
| 80 |
+
for name in ("configure_persistence", "configure_models", "configure_diffusion"):
|
| 81 |
+
getattr(self.gen, name)()
|
| 82 |
+
|
| 83 |
+
self.gen.to(torch.device(device_for_vincie))
|
| 84 |
+
print(f"[VinceWorker-{self.device_id_str}] ✅ Modelo VINCIE 'quente' e pronto na GPU física {self.gpu_index_str}.")
|
| 85 |
+
finally:
|
| 86 |
+
os.chdir(original_cwd) # Restaura o diretório de trabalho original.
|
| 87 |
+
|
| 88 |
+
def load_model_to_gpu(self):
|
| 89 |
+
"""Método público para carregar o modelo, garantindo o isolamento da GPU."""
|
| 90 |
+
if self.gen is None:
|
| 91 |
+
self._execute_in_isolated_env(self._load_model_task)
|
| 92 |
+
|
| 93 |
+
def _infer_task(self, **kwargs) -> Path:
|
| 94 |
+
"""Tarefa de inferência, executada no ambiente isolado."""
|
| 95 |
+
original_cwd = Path.cwd()
|
| 96 |
+
try:
|
| 97 |
+
os.chdir(str(VINCIE_DIR))
|
| 98 |
+
|
| 99 |
+
# Atualiza a configuração do gerador com os parâmetros da chamada atual.
|
| 100 |
+
with open_dict(self.gen.config):
|
| 101 |
+
self.gen.config.generation.output.dir = str(kwargs["output_dir"])
|
| 102 |
+
image_paths = kwargs.get("image_path", [])
|
| 103 |
+
self.gen.config.generation.positive_prompt.image_path = [str(p) for p in image_paths] if isinstance(image_paths, list) else [str(image_paths)]
|
| 104 |
+
if "prompts" in kwargs:
|
| 105 |
+
self.gen.config.generation.positive_prompt.prompts = list(kwargs["prompts"])
|
| 106 |
+
if "cfg_scale" in kwargs and kwargs["cfg_scale"] is not None:
|
| 107 |
+
self.gen.config.diffusion.cfg.scale = float(kwargs["cfg_scale"])
|
| 108 |
+
|
| 109 |
+
# Inicia o loop de inferência do VINCIE.
|
| 110 |
+
self.gen.inference_loop()
|
| 111 |
+
return Path(kwargs["output_dir"])
|
| 112 |
+
finally:
|
| 113 |
+
os.chdir(original_cwd)
|
| 114 |
+
# Limpeza de memória após a inferência.
|
| 115 |
+
gc.collect()
|
| 116 |
+
if torch.cuda.is_available():
|
| 117 |
+
torch.cuda.empty_cache()
|
| 118 |
+
|
| 119 |
+
def infer(self, **kwargs) -> Path:
|
| 120 |
+
"""Método público para iniciar a inferência, garantindo o isolamento da GPU."""
|
| 121 |
+
if self.gen is None:
|
| 122 |
+
raise RuntimeError(f"Modelo no worker {self.device_id_str} não foi carregado.")
|
| 123 |
+
return self._execute_in_isolated_env(self._infer_task, **kwargs)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# --- Classe Pool Manager (A Orquestradora Singleton) ---
|
| 127 |
+
class VincePoolManager:
|
| 128 |
+
_instance = None
|
| 129 |
+
_lock = threading.Lock()
|
| 130 |
+
|
| 131 |
+
def __new__(cls, *args, **kwargs):
|
| 132 |
+
with cls._lock:
|
| 133 |
+
if cls._instance is None:
|
| 134 |
+
cls._instance = super().__new__(cls)
|
| 135 |
+
cls._instance._initialized = False
|
| 136 |
+
return cls._instance
|
| 137 |
+
|
| 138 |
+
def __init__(self, output_root: str = "/app/outputs"):
|
| 139 |
+
if self._initialized: return
|
| 140 |
+
with self._lock:
|
| 141 |
+
if self._initialized: return
|
| 142 |
+
|
| 143 |
+
print("⚙️ Inicializando o VincePoolManager Singleton...")
|
| 144 |
+
self.output_root = Path(output_root)
|
| 145 |
+
self.output_root.mkdir(parents=True, exist_ok=True)
|
| 146 |
+
self.worker_lock = threading.Lock()
|
| 147 |
+
self.next_worker_idx = 0
|
| 148 |
+
|
| 149 |
+
# Pergunta ao gerenciador central quais GPUs ele pode usar.
|
| 150 |
+
self.allocated_gpu_indices = gpu_manager.get_vincie_devices()
|
| 151 |
+
|
| 152 |
+
if not self.allocated_gpu_indices:
|
| 153 |
+
# Se não houver GPUs alocadas, não podemos continuar.
|
| 154 |
+
# O setup.py já deve ter sido executado, então não precisamos verificar dependências aqui.
|
| 155 |
+
print("AVISO: Nenhuma GPU alocada para o VINCIE pelo GPUManager. O serviço VINCIE estará inativo.")
|
| 156 |
+
self.workers = []
|
| 157 |
+
self._initialized = True
|
| 158 |
+
return
|
| 159 |
+
|
| 160 |
+
devices = [f'cuda:{i}' for i in self.allocated_gpu_indices]
|
| 161 |
+
vincie_config_path = VINCIE_DIR / "configs/generate.yaml"
|
| 162 |
+
if not vincie_config_path.exists():
|
| 163 |
+
raise FileNotFoundError(f"Arquivo de configuração do VINCIE não encontrado em {vincie_config_path}")
|
| 164 |
+
|
| 165 |
+
self.workers = [VinceWorker(dev_id, str(vincie_config_path)) for dev_id in devices]
|
| 166 |
+
|
| 167 |
+
print(f"Iniciando carregamento dos modelos em paralelo para {len(self.workers)} GPUs VINCIE...")
|
| 168 |
+
threads = [threading.Thread(target=worker.load_model_to_gpu) for worker in self.workers]
|
| 169 |
+
for t in threads: t.start()
|
| 170 |
+
for t in threads: t.join()
|
| 171 |
+
|
| 172 |
+
self._initialized = True
|
| 173 |
+
print(f"✅ VincePoolManager pronto com {len(self.workers)} workers 'quentes'.")
|
| 174 |
+
|
| 175 |
+
def _get_next_worker(self) -> VinceWorker:
|
| 176 |
+
"""Seleciona o próximo worker disponível usando uma estratégia round-robin."""
|
| 177 |
+
if not self.workers:
|
| 178 |
+
raise RuntimeError("Não há workers VINCIE disponíveis para processar a tarefa.")
|
| 179 |
+
|
| 180 |
+
with self.worker_lock:
|
| 181 |
+
worker = self.workers[self.next_worker_idx]
|
| 182 |
+
self.next_worker_idx = (self.next_worker_idx + 1) % len(self.workers)
|
| 183 |
+
print(f"Tarefa despachada para o worker: {worker.device_id_str}")
|
| 184 |
+
return worker
|
| 185 |
+
|
| 186 |
+
def generate_multi_turn(self, input_image: str, turns: List[str], **kwargs) -> Path:
|
| 187 |
+
"""Gera um vídeo a partir de uma imagem e uma sequência de prompts (turnos)."""
|
| 188 |
+
worker = self._get_next_worker()
|
| 189 |
+
out_dir = self.output_root / f"vince_multi_turn_{Path(input_image).stem}_{os.urandom(4).hex()}"
|
| 190 |
+
out_dir.mkdir(parents=True)
|
| 191 |
+
|
| 192 |
+
infer_kwargs = {"output_dir": out_dir, "image_path": input_image, "prompts": turns, **kwargs}
|
| 193 |
+
return worker.infer(**infer_kwargs)
|
| 194 |
+
|
| 195 |
+
def generate_multi_concept(self, concept_images: List[str], concept_prompts: List[str], final_prompt: str, **kwargs) -> Path:
|
| 196 |
+
"""Gera um vídeo a partir de múltiplas imagens-conceito e um prompt final."""
|
| 197 |
+
worker = self._get_next_worker()
|
| 198 |
+
out_dir = self.output_root / f"vince_multi_concept_{os.urandom(4).hex()}"
|
| 199 |
+
out_dir.mkdir(parents=True)
|
| 200 |
+
|
| 201 |
+
all_prompts = concept_prompts + [final_prompt]
|
| 202 |
+
infer_kwargs = {"output_dir": out_dir, "image_path": concept_images, "prompts": all_prompts, **kwargs}
|
| 203 |
+
return worker.infer(**infer_kwargs)
|
| 204 |
+
|
| 205 |
+
# --- Instância Singleton Global ---
|
| 206 |
+
# A inicialização é envolvida em um try-except para evitar que a aplicação inteira quebre
|
| 207 |
+
# se o VINCIE não puder ser inicializado por algum motivo.
|
| 208 |
+
try:
|
| 209 |
+
output_root_path = os.getenv("OUTPUT_ROOT", "/app/outputs")
|
| 210 |
+
vince_pool_manager_singleton = VincePoolManager(output_root=output_root_path)
|
| 211 |
+
except Exception as e:
|
| 212 |
+
print(f"ERRO CRÍTICO ao inicializar o VincePoolManager: {e}", file=sys.stderr)
|
| 213 |
+
traceback.print_exc()
|
| 214 |
+
vince_pool_manager_singleton = None
|
app.py
CHANGED
|
@@ -1,211 +1,262 @@
|
|
| 1 |
-
#
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
-
import os
|
| 5 |
-
import sys
|
| 6 |
import traceback
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
#
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
#
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
#
|
| 20 |
-
from api.
|
| 21 |
-
|
| 22 |
-
#
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
"
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
"
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
try:
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
if start_img:
|
| 46 |
num_frames_estimate = int(duration * 24)
|
| 47 |
items_list = [[start_img, 0, 1.0]]
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
-
|
| 51 |
-
video_path, tensor_path, final_seed = video_generation_service.generate_low(
|
| 52 |
prompt=prompt, negative_prompt=neg_prompt,
|
| 53 |
height=height, width=width, duration=duration,
|
| 54 |
-
|
| 55 |
-
conditioning_items=conditioning_items
|
| 56 |
)
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
"refined_latents_ltx": None,
|
| 63 |
-
"used_seed": final_seed
|
| 64 |
-
}
|
| 65 |
-
|
| 66 |
return video_path, new_state, gr.update(visible=True)
|
|
|
|
| 67 |
except Exception as e:
|
| 68 |
-
error_message = f"❌
|
| 69 |
-
|
| 70 |
raise gr.Error(error_message)
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
if not state or not state.get("low_res_latents"):
|
| 76 |
-
raise gr.Error("
|
| 77 |
-
|
| 78 |
try:
|
|
|
|
| 79 |
video_path, tensor_path = video_generation_service.generate_upscale_denoise(
|
| 80 |
latents_path=state["low_res_latents"],
|
| 81 |
prompt=prompt,
|
| 82 |
negative_prompt=neg_prompt,
|
| 83 |
-
guidance_scale=cfg,
|
| 84 |
seed=state["used_seed"]
|
| 85 |
)
|
| 86 |
-
|
| 87 |
-
# Atualiza o estado com os novos artefatos refinados
|
| 88 |
state["refined_video_ltx"] = video_path
|
| 89 |
state["refined_latents_ltx"] = tensor_path
|
| 90 |
-
|
| 91 |
return video_path, state
|
| 92 |
except Exception as e:
|
| 93 |
-
error_message = f"❌
|
| 94 |
-
|
| 95 |
raise gr.Error(error_message)
|
| 96 |
|
| 97 |
-
|
| 98 |
-
|
|
|
|
| 99 |
if not state or not state.get("low_res_video"):
|
| 100 |
-
raise gr.Error("
|
| 101 |
if not seedvr_inference_server:
|
| 102 |
-
raise gr.Error("
|
| 103 |
-
|
| 104 |
-
video_path = state["low_res_video"]
|
| 105 |
-
print(f"▶️ Iniciando processo de upscaling SeedVR para o vídeo: {video_path}")
|
| 106 |
|
| 107 |
try:
|
| 108 |
-
|
| 109 |
-
|
|
|
|
| 110 |
output_filepath = seedvr_inference_server.run_inference(
|
| 111 |
-
file_path=
|
| 112 |
-
batch_size=batch_size, fps=fps, progress=progress_wrapper
|
| 113 |
)
|
| 114 |
-
|
| 115 |
-
|
|
|
|
|
|
|
| 116 |
except Exception as e:
|
| 117 |
-
error_message = f"❌
|
| 118 |
-
|
| 119 |
-
return None, gr.update(value=error_message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
gr.Markdown("### Etapa 1: Configurações de Geração")
|
| 131 |
-
prompt_input = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3)
|
| 132 |
-
neg_prompt_input = gr.Textbox(visible=False, label="Negative Prompt", value="worst quality, blurry, low quality, jittery", lines=2)
|
| 133 |
-
start_image = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload", "clipboard"])
|
| 134 |
-
|
| 135 |
-
with gr.Accordion("Parâmetros Avançados", open=False):
|
| 136 |
-
height_input = gr.Slider(label="Height", value=512, step=32, minimum=256, maximum=1024)
|
| 137 |
-
width_input = gr.Slider(label="Width", value=704, step=32, minimum=256, maximum=1024)
|
| 138 |
-
duration_input = gr.Slider(label="Duração (s)", value=4, step=1, minimum=1, maximum=10)
|
| 139 |
-
cfg_input = gr.Slider(label="Guidance Scale (CFG)", value=3.0, step=0.1, minimum=1.0, maximum=10.0)
|
| 140 |
-
seed_input = gr.Number(label="Seed", value=42, precision=0)
|
| 141 |
-
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 142 |
-
|
| 143 |
-
generate_low_btn = gr.Button("1. Gerar Vídeo Base (Low-Res)", variant="primary")
|
| 144 |
-
|
| 145 |
-
with gr.Column(scale=1):
|
| 146 |
-
gr.Markdown("### Vídeo Base Gerado")
|
| 147 |
-
low_res_video_output = gr.Video(interactive=False)
|
| 148 |
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
gr.Markdown("
|
| 152 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
with gr.Tabs():
|
| 155 |
-
|
| 156 |
-
with gr.TabItem("🚀 Upscaler Textura (LTX)"):
|
| 157 |
with gr.Row():
|
| 158 |
with gr.Column(scale=1):
|
| 159 |
-
gr.Markdown("
|
| 160 |
-
gr.
|
| 161 |
-
ltx_refine_btn = gr.Button("Aplicar Refinamento de Textura LTX", variant="primary")
|
| 162 |
with gr.Column(scale=1):
|
| 163 |
-
gr.
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
|
|
|
| 168 |
with gr.Row():
|
| 169 |
with gr.Column(scale=1):
|
| 170 |
-
gr.
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
run_seedvr_button = gr.Button("Iniciar Upscaling SeedVR", variant="primary", interactive=(seedvr_inference_server is not None))
|
| 176 |
-
if not seedvr_inference_server:
|
| 177 |
-
gr.Markdown("Serviço SeedVR não disponível.")
|
| 178 |
with gr.Column(scale=1):
|
| 179 |
-
gr.
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
if __name__ == "__main__":
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FILE: app.py
|
| 2 |
+
# DESCRIPTION: Final Gradio web interface for the ADUC-SDR Video Suite.
|
| 3 |
+
# Features dimension sliders locked to multiples of 8, a unified LTX workflow,
|
| 4 |
+
# advanced controls, integrated SeedVR upscaling, and detailed debug logging.
|
| 5 |
|
| 6 |
import gradio as gr
|
|
|
|
|
|
|
| 7 |
import traceback
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
# ==============================================================================
|
| 13 |
+
# --- IMPORTAÇÃO DOS SERVIÇOS DE BACKEND E UTILS ---
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
# Serviço principal para geração LTX
|
| 18 |
+
from api.ltx_server_refactored_complete import video_generation_service
|
| 19 |
+
|
| 20 |
+
# Nosso decorador de logging para depuração
|
| 21 |
+
from api.utils.debug_utils import log_function_io
|
| 22 |
+
|
| 23 |
+
# Serviço especialista para upscaling de resolução (SeedVR)
|
| 24 |
+
from api.seedvr_server import seedvr_server_singleton as seedvr_inference_server
|
| 25 |
+
|
| 26 |
+
logging.info("All backend services (LTX, SeedVR) and debug utils imported successfully.")
|
| 27 |
+
|
| 28 |
+
except ImportError as e:
|
| 29 |
+
def log_function_io(func): return func
|
| 30 |
+
logging.warning(f"Could not import a module, debug logger might be disabled. SeedVR might be unavailable. Details: {e}")
|
| 31 |
+
if 'video_generation_service' not in locals():
|
| 32 |
+
logging.critical(f"FATAL: Main LTX service failed to import.", exc_info=True)
|
| 33 |
+
sys.exit(1)
|
| 34 |
+
if 'seedvr_inference_server' not in locals():
|
| 35 |
+
seedvr_inference_server = None
|
| 36 |
+
logging.warning("SeedVR server could not be initialized. The SeedVR upscaling tab will be disabled.")
|
| 37 |
+
except Exception as e:
|
| 38 |
+
logging.critical(f"FATAL ERROR: An unexpected error occurred during backend initialization. Details: {e}", exc_info=True)
|
| 39 |
+
sys.exit(1)
|
| 40 |
+
|
| 41 |
+
# ==============================================================================
|
| 42 |
+
# --- FUNÇÕES WRAPPER (PONTE ENTRE UI E BACKEND) ---
|
| 43 |
+
# ==============================================================================
|
| 44 |
+
|
| 45 |
+
@log_function_io
|
| 46 |
+
def run_generate_base_video(
|
| 47 |
+
generation_mode: str, prompt: str, neg_prompt: str, start_img: str,
|
| 48 |
+
height: int, width: int, duration: float,
|
| 49 |
+
fp_guidance_preset: str, fp_guidance_scale_list: str, fp_stg_scale_list: str,
|
| 50 |
+
fp_num_inference_steps: int, fp_skip_initial_steps: int, fp_skip_final_steps: int,
|
| 51 |
+
progress=gr.Progress(track_tqdm=True)
|
| 52 |
+
) -> tuple:
|
| 53 |
+
"""Wrapper para a geração do vídeo base LTX."""
|
| 54 |
try:
|
| 55 |
+
logging.info(f"[UI] Request received. Selected mode: {generation_mode}")
|
| 56 |
+
|
| 57 |
+
initial_conditions = []
|
| 58 |
if start_img:
|
| 59 |
num_frames_estimate = int(duration * 24)
|
| 60 |
items_list = [[start_img, 0, 1.0]]
|
| 61 |
+
initial_conditions = video_generation_service.prepare_condition_items(
|
| 62 |
+
items_list, height, width, num_frames_estimate
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
ltx_configs = {
|
| 66 |
+
"guidance_preset": fp_guidance_preset,
|
| 67 |
+
"guidance_scale_list": fp_guidance_scale_list,
|
| 68 |
+
"stg_scale_list": fp_stg_scale_list,
|
| 69 |
+
"num_inference_steps": fp_num_inference_steps,
|
| 70 |
+
"skip_initial_inference_steps": fp_skip_initial_steps,
|
| 71 |
+
"skip_final_inference_steps": fp_skip_final_steps,
|
| 72 |
+
}
|
| 73 |
|
| 74 |
+
video_path, tensor_path, final_seed = video_generation_service.generate_low_resolution(
|
|
|
|
| 75 |
prompt=prompt, negative_prompt=neg_prompt,
|
| 76 |
height=height, width=width, duration=duration,
|
| 77 |
+
initial_conditions=initial_conditions, ltx_configs_override=ltx_configs
|
|
|
|
| 78 |
)
|
| 79 |
|
| 80 |
+
if not video_path: raise RuntimeError("Backend failed to return a valid video path.")
|
| 81 |
+
|
| 82 |
+
new_state = {"low_res_video": video_path, "low_res_latents": tensor_path, "used_seed": final_seed}
|
| 83 |
+
logging.info(f"[UI] Base video generation successful. Seed used: {final_seed}, Path: {video_path}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
return video_path, new_state, gr.update(visible=True)
|
| 85 |
+
|
| 86 |
except Exception as e:
|
| 87 |
+
error_message = f"❌ An error occurred during base generation:\n{e}"
|
| 88 |
+
logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True)
|
| 89 |
raise gr.Error(error_message)
|
| 90 |
|
| 91 |
+
@log_function_io
|
| 92 |
+
def run_ltx_refinement(state: dict, prompt: str, neg_prompt: str, progress=gr.Progress(track_tqdm=True)) -> tuple:
|
| 93 |
+
"""Wrapper para o refinamento de textura LTX."""
|
| 94 |
if not state or not state.get("low_res_latents"):
|
| 95 |
+
raise gr.Error("Error: Please generate a base video in Step 1 before refining.")
|
| 96 |
+
|
| 97 |
try:
|
| 98 |
+
logging.info(f"[UI] Requesting LTX refinement for latents: {state.get('low_res_latents')}")
|
| 99 |
video_path, tensor_path = video_generation_service.generate_upscale_denoise(
|
| 100 |
latents_path=state["low_res_latents"],
|
| 101 |
prompt=prompt,
|
| 102 |
negative_prompt=neg_prompt,
|
|
|
|
| 103 |
seed=state["used_seed"]
|
| 104 |
)
|
|
|
|
|
|
|
| 105 |
state["refined_video_ltx"] = video_path
|
| 106 |
state["refined_latents_ltx"] = tensor_path
|
| 107 |
+
logging.info(f"[UI] LTX refinement successful. Path: {video_path}")
|
| 108 |
return video_path, state
|
| 109 |
except Exception as e:
|
| 110 |
+
error_message = f"❌ An error occurred during LTX Refinement:\n{e}"
|
| 111 |
+
logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True)
|
| 112 |
raise gr.Error(error_message)
|
| 113 |
|
| 114 |
+
@log_function_io
|
| 115 |
+
def run_seedvr_upscaling(state: dict, seed: int, resolution: int, batch_size: int, fps: int, progress=gr.Progress(track_tqdm=True)) -> tuple:
|
| 116 |
+
"""Wrapper para o upscale de resolução SeedVR."""
|
| 117 |
if not state or not state.get("low_res_video"):
|
| 118 |
+
raise gr.Error("Error: Please generate a base video in Step 1 before upscaling.")
|
| 119 |
if not seedvr_inference_server:
|
| 120 |
+
raise gr.Error("Error: The SeedVR upscaling server is not available.")
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
try:
|
| 123 |
+
logging.info(f"[UI] Requesting SeedVR upscaling for video: {state.get('low_res_video')}")
|
| 124 |
+
def progress_wrapper(p, desc=""): progress(p, desc=desc)
|
| 125 |
+
|
| 126 |
output_filepath = seedvr_inference_server.run_inference(
|
| 127 |
+
file_path=state["low_res_video"], seed=int(seed), resolution=int(resolution),
|
| 128 |
+
batch_size=int(batch_size), fps=float(fps), progress=progress_wrapper
|
| 129 |
)
|
| 130 |
+
|
| 131 |
+
status_message = f"✅ Upscaling complete!\nSaved to: {output_filepath}"
|
| 132 |
+
logging.info(f"[UI] SeedVR upscaling successful. Path: {output_filepath}")
|
| 133 |
+
return gr.update(value=output_filepath), gr.update(value=status_message)
|
| 134 |
except Exception as e:
|
| 135 |
+
error_message = f"❌ An error occurred during SeedVR Upscaling:\n{e}"
|
| 136 |
+
logging.error(f"{error_message}\nDetails: {traceback.format_exc()}", exc_info=True)
|
| 137 |
+
return None, gr.update(value=error_message)
|
| 138 |
+
|
| 139 |
+
# ==============================================================================
|
| 140 |
+
# --- CONSTRUÇÃO DA INTERFACE GRADIO ---
|
| 141 |
+
# ==============================================================================
|
| 142 |
+
|
| 143 |
+
def build_ui():
|
| 144 |
+
"""Constrói a interface completa do Gradio."""
|
| 145 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo")) as demo:
|
| 146 |
+
app_state = gr.State(value={"low_res_video": None, "low_res_latents": None, "used_seed": None})
|
| 147 |
+
ui_components = {}
|
| 148 |
+
gr.Markdown("# ADUC-SDR Video Suite - LTX & SeedVR Workflow", elem_id="main-title")
|
| 149 |
+
with gr.Row():
|
| 150 |
+
with gr.Column(scale=1): _build_generation_controls(ui_components)
|
| 151 |
+
with gr.Column(scale=1):
|
| 152 |
+
gr.Markdown("### Etapa 1: Vídeo Base Gerado")
|
| 153 |
+
ui_components['low_res_video_output'] = gr.Video(label="O resultado aparecerá aqui", interactive=False)
|
| 154 |
+
ui_components['used_seed_display'] = gr.Textbox(label="Seed Utilizada", interactive=False)
|
| 155 |
+
_build_postprod_controls(ui_components)
|
| 156 |
+
_register_event_handlers(app_state, ui_components)
|
| 157 |
+
return demo
|
| 158 |
|
| 159 |
+
def _build_generation_controls(ui: dict):
|
| 160 |
+
"""Constrói os componentes da UI para a Etapa 1: Geração."""
|
| 161 |
+
gr.Markdown("### Configurações de Geração")
|
| 162 |
+
ui['generation_mode'] = gr.Radio(label="Modo de Geração", choices=["Simples (Prompt Único)", "Narrativa (Múltiplos Prompts)"], value="Narrativa (Múltiplos Prompts)", info="Simples para uma ação contínua, Narrativa para uma sequência (uma cena por linha).")
|
| 163 |
+
ui['prompt'] = gr.Textbox(label="Prompt(s)", value="Um leão majestoso caminha pela savana\nEle sobe em uma grande pedra e olha o horizonte", lines=4)
|
| 164 |
+
ui['neg_prompt'] = gr.Textbox(label="Negative Prompt", value="blurry, low quality, bad anatomy, deformed", lines=2)
|
| 165 |
+
ui['start_image'] = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload"])
|
| 166 |
|
| 167 |
+
with gr.Accordion("Parâmetros Principais", open=True):
|
| 168 |
+
ui['duration'] = gr.Slider(label="Duração Total (s)", value=4, step=1, minimum=1, maximum=30)
|
| 169 |
+
with gr.Row():
|
| 170 |
+
ui['height'] = gr.Slider(label="Height", value=432, step=8, minimum=256, maximum=1024)
|
| 171 |
+
ui['width'] = gr.Slider(label="Width", value=768, step=8, minimum=256, maximum=1024)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
+
with gr.Accordion("Opções Avançadas LTX", open=False):
|
| 174 |
+
gr.Markdown("#### Configurações de Passos de Inferência (First Pass)")
|
| 175 |
+
gr.Markdown("*Deixe o valor padrão (ex: 20) ou 0 para usar a configuração do `config.yaml`.*")
|
| 176 |
+
ui['fp_num_inference_steps'] = gr.Slider(label="Número de Passos", minimum=0, maximum=100, step=1, value=20, info="Padrão LTX: 20.")
|
| 177 |
+
ui['fp_skip_initial_steps'] = gr.Slider(label="Pular Passos Iniciais", minimum=0, maximum=100, step=1, value=0)
|
| 178 |
+
ui['fp_skip_final_steps'] = gr.Slider(label="Pular Passos Finais", minimum=0, maximum=100, step=1, value=0)
|
| 179 |
+
with gr.Tabs():
|
| 180 |
+
with gr.TabItem("Configurações de Guiagem (First Pass)"):
|
| 181 |
+
ui['fp_guidance_preset'] = gr.Dropdown(label="Preset de Guiagem", choices=["Padrão (Recomendado)", "Agressivo", "Suave", "Customizado"], value="Padrão (Recomendado)", info="Controla o comportamento da guiagem durante a difusão.")
|
| 182 |
+
with gr.Group(visible=False) as ui['custom_guidance_group']:
|
| 183 |
+
gr.Markdown("⚠️ Edite as listas em formato JSON. Ex: `[1.0, 2.5, 3.0]`")
|
| 184 |
+
ui['fp_guidance_scale_list'] = gr.Textbox(label="Lista de Guidance Scale", value="[1, 1, 6, 8, 6, 1, 1]")
|
| 185 |
+
ui['fp_stg_scale_list'] = gr.Textbox(label="Lista de STG Scale (Movimento)", value="[0, 0, 4, 4, 4, 2, 1]")
|
| 186 |
+
|
| 187 |
+
ui['generate_low_btn'] = gr.Button("1. Gerar Vídeo Base", variant="primary")
|
| 188 |
|
| 189 |
+
def _build_postprod_controls(ui: dict):
|
| 190 |
+
"""Constrói os componentes da UI para a Etapa 2: Pós-Produção."""
|
| 191 |
+
with gr.Group(visible=False) as ui['post_prod_group']:
|
| 192 |
+
gr.Markdown("--- \n## Etapa 2: Pós-Produção")
|
| 193 |
with gr.Tabs():
|
| 194 |
+
with gr.TabItem("🚀 Upscaler de Textura (LTX)"):
|
|
|
|
| 195 |
with gr.Row():
|
| 196 |
with gr.Column(scale=1):
|
| 197 |
+
gr.Markdown("Usa o prompt e a semente originais para refinar o vídeo, adicionando detalhes e texturas de alta qualidade.")
|
| 198 |
+
ui['ltx_refine_btn'] = gr.Button("2. Aplicar Refinamento LTX", variant="primary")
|
|
|
|
| 199 |
with gr.Column(scale=1):
|
| 200 |
+
ui['ltx_refined_video_output'] = gr.Video(label="Vídeo com Textura Refinada", interactive=False)
|
| 201 |
+
|
| 202 |
+
with gr.TabItem("✨ Upscaler de Resolução (SeedVR)"):
|
| 203 |
+
is_seedvr_available = seedvr_inference_server is not None
|
| 204 |
+
if not is_seedvr_available:
|
| 205 |
+
gr.Markdown("🔴 **AVISO: O serviço SeedVR não está disponível.**")
|
| 206 |
with gr.Row():
|
| 207 |
with gr.Column(scale=1):
|
| 208 |
+
ui['seedvr_seed'] = gr.Slider(minimum=0, maximum=999999, value=42, step=1, label="Seed")
|
| 209 |
+
ui['seedvr_resolution'] = gr.Slider(minimum=720, maximum=2160, value=1080, step=8, label="Resolução Vertical Alvo")
|
| 210 |
+
ui['seedvr_batch_size'] = gr.Slider(minimum=1, maximum=16, value=4, step=1, label="Batch Size por GPU")
|
| 211 |
+
ui['seedvr_fps'] = gr.Number(label="FPS de Saída (0 = original)", value=0)
|
| 212 |
+
ui['run_seedvr_btn'] = gr.Button("2. Iniciar Upscaling SeedVR", variant="primary", interactive=is_seedvr_available)
|
|
|
|
|
|
|
|
|
|
| 213 |
with gr.Column(scale=1):
|
| 214 |
+
ui['seedvr_video_output'] = gr.Video(label="Vídeo com Upscale SeedVR", interactive=False)
|
| 215 |
+
ui['seedvr_status_box'] = gr.Textbox(label="Status do SeedVR", value="Aguardando...", lines=3, interactive=False)
|
| 216 |
+
|
| 217 |
+
def _register_event_handlers(app_state: gr.State, ui: dict):
|
| 218 |
+
"""Registra todos os manipuladores de eventos do Gradio."""
|
| 219 |
+
def toggle_custom_guidance(preset_choice: str) -> gr.update:
|
| 220 |
+
return gr.update(visible=(preset_choice == "Customizado"))
|
| 221 |
+
|
| 222 |
+
ui['fp_guidance_preset'].change(fn=toggle_custom_guidance, inputs=ui['fp_guidance_preset'], outputs=ui['custom_guidance_group'])
|
| 223 |
+
|
| 224 |
+
def update_seed_display(state):
|
| 225 |
+
return state.get("used_seed", "N/A")
|
| 226 |
+
|
| 227 |
+
gen_inputs = [
|
| 228 |
+
ui['generation_mode'], ui['prompt'], ui['neg_prompt'], ui['start_image'],
|
| 229 |
+
ui['height'], ui['width'], ui['duration'],
|
| 230 |
+
ui['fp_guidance_preset'], ui['fp_guidance_scale_list'], ui['fp_stg_scale_list'],
|
| 231 |
+
ui['fp_num_inference_steps'], ui['fp_skip_initial_steps'], ui['fp_skip_final_steps'],
|
| 232 |
+
]
|
| 233 |
+
gen_outputs = [ui['low_res_video_output'], app_state, ui['post_prod_group']]
|
| 234 |
+
|
| 235 |
+
(ui['generate_low_btn'].click(fn=run_generate_base_video, inputs=gen_inputs, outputs=gen_outputs)
|
| 236 |
+
.then(fn=update_seed_display, inputs=[app_state], outputs=[ui['used_seed_display']]))
|
| 237 |
+
|
| 238 |
+
refine_inputs = [app_state, ui['prompt'], ui['neg_prompt']]
|
| 239 |
+
refine_outputs = [ui['ltx_refined_video_output'], app_state]
|
| 240 |
+
ui['ltx_refine_btn'].click(fn=run_ltx_refinement, inputs=refine_inputs, outputs=refine_outputs)
|
| 241 |
+
|
| 242 |
+
if 'run_seedvr_btn' in ui and ui['run_seedvr_btn'].interactive:
|
| 243 |
+
seedvr_inputs = [app_state, ui['seedvr_seed'], ui['seedvr_resolution'], ui['seedvr_batch_size'], ui['seedvr_fps']]
|
| 244 |
+
seedvr_outputs = [ui['seedvr_video_output'], ui['seedvr_status_box']]
|
| 245 |
+
ui['run_seedvr_btn'].click(fn=run_seedvr_upscaling, inputs=seedvr_inputs, outputs=seedvr_outputs)
|
| 246 |
+
|
| 247 |
+
# ==============================================================================
|
| 248 |
+
# --- PONTO DE ENTRADA DA APLICAÇÃO ---
|
| 249 |
+
# ==============================================================================
|
| 250 |
|
| 251 |
if __name__ == "__main__":
|
| 252 |
+
log_level = os.environ.get("ADUC_LOG_LEVEL", "INFO").upper()
|
| 253 |
+
logging.basicConfig(level=log_level, format='[%(levelname)s] [%(name)s] %(message)s')
|
| 254 |
+
|
| 255 |
+
print("Building Gradio UI...")
|
| 256 |
+
gradio_app = build_ui()
|
| 257 |
+
print("Launching Gradio app...")
|
| 258 |
+
gradio_app.queue().launch(
|
| 259 |
+
server_name=os.getenv("GRADIO_SERVER_NAME", "0.0.0.0"),
|
| 260 |
+
server_port=int(os.getenv("GRADIO_SERVER_PORT", "7860")),
|
| 261 |
+
show_error=True
|
| 262 |
+
)
|
compose.yaml
CHANGED
|
@@ -1,26 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
services:
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
deploy:
|
| 5 |
resources:
|
| 6 |
reservations:
|
| 7 |
devices:
|
| 8 |
-
-
|
|
|
|
|
|
|
| 9 |
ports:
|
| 10 |
-
- "
|
| 11 |
-
environment:
|
| 12 |
-
GRADIO_SERVER_PORT: "7860"
|
| 13 |
-
HF_HUB_CACHE: "/data/.cache/huggingface/hub"
|
| 14 |
-
CKPT_ROOT: "/data/ckpt/VINCIE-3B"
|
| 15 |
-
VINCIE_ROOT: "/data/VINCIE"
|
| 16 |
volumes:
|
| 17 |
-
-
|
| 18 |
-
-
|
| 19 |
-
|
| 20 |
-
- vincie_repo:/data/VINCIE
|
| 21 |
volumes:
|
| 22 |
-
|
| 23 |
-
vincie_ckpt: {}
|
| 24 |
-
vincie_out: {}
|
| 25 |
-
vincie_repo: {}
|
| 26 |
-
|
|
|
|
| 1 |
+
# compose.yaml (Versão com VINCIE)
|
| 2 |
+
version: '3.8'
|
| 3 |
+
|
| 4 |
services:
|
| 5 |
+
aduc-sdr-app:
|
| 6 |
+
build: .
|
| 7 |
+
environment:
|
| 8 |
+
ADUC_LOG_LEVEL: "DEBUG"
|
| 9 |
+
image: aduc-sdr-videosuite:latest
|
| 10 |
+
# (deploy, resources... mantidos como antes)
|
| 11 |
+
ports:
|
| 12 |
+
- "7860:7860" # Porta para a UI principal (LTX + SeedVR)
|
| 13 |
+
- "7861:7861" # Porta para a nova UI do VINCIE
|
| 14 |
+
volumes:
|
| 15 |
+
# O volume 'aduc_data' agora armazena tudo: cache, modelos e repos.
|
| 16 |
+
- aduc_data:/data
|
| 17 |
+
- ./output:/app/output
|
| 18 |
+
# O entrypoint cuidará do setup na inicialização.
|
| 19 |
+
# O CMD padrão iniciará a UI principal. Para VINCIE, usaremos um comando diferente.
|
| 20 |
+
|
| 21 |
+
# Novo serviço para a interface do VINCIE
|
| 22 |
+
vince-ui:
|
| 23 |
+
image: aduc-sdr-videosuite:latest # Usa a mesma imagem já construída
|
| 24 |
+
command: python3 /app/app_vince.py # Sobrescreve o CMD padrão para iniciar a UI do VINCIE
|
| 25 |
deploy:
|
| 26 |
resources:
|
| 27 |
reservations:
|
| 28 |
devices:
|
| 29 |
+
- driver: nvidia
|
| 30 |
+
count: all
|
| 31 |
+
capabilities: [gpu]
|
| 32 |
ports:
|
| 33 |
+
- "7861:7861"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
volumes:
|
| 35 |
+
- aduc_data:/data
|
| 36 |
+
- ./output:/app/output
|
| 37 |
+
|
|
|
|
| 38 |
volumes:
|
| 39 |
+
aduc_data:
|
|
|
|
|
|
|
|
|
|
|
|
entrypoint.sh
CHANGED
|
@@ -1,21 +1,16 @@
|
|
| 1 |
-
#!/bin/
|
| 2 |
-
# entrypoint.sh - Executado como root para corrigir permissões.
|
| 3 |
set -e
|
| 4 |
|
| 5 |
-
echo "
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
#
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
|
| 14 |
-
#
|
| 15 |
-
#
|
| 16 |
-
chown -R 1000:1000 $DIRS_TO_OWN
|
| 17 |
-
|
| 18 |
-
echo "✅ ENTRYPOINT (root): Permissões corrigidas."
|
| 19 |
-
|
| 20 |
-
# Passa a execução para o comando principal (CMD) definido no Dockerfile.
|
| 21 |
exec "$@"
|
|
|
|
| 1 |
+
#!/bin/bash
|
|
|
|
| 2 |
set -e
|
| 3 |
|
| 4 |
+
echo "🚀 ADUC-SDR Entrypoint: Verificando ambiente..."
|
| 5 |
|
| 6 |
+
# Passo 1: Executa o script de setup para garantir que repositórios e modelos existem.
|
| 7 |
+
# O setup.py é inteligente e pulará downloads se os arquivos já existirem no volume /data.
|
| 8 |
+
echo " > Executando setup.py para clonar repositórios e baixar modelos (apenas se necessário)..."
|
| 9 |
+
python3 /app/setup.py
|
| 10 |
|
| 11 |
+
echo " > Ambiente pronto!"
|
| 12 |
+
echo "---------------------------------------------------------"
|
| 13 |
|
| 14 |
+
# Passo 2: Executa o comando principal passado para o contêiner (CMD no Dockerfile)
|
| 15 |
+
# Por padrão, será "/app/start.sh"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
exec "$@"
|
managers/vae_manager.py
CHANGED
|
@@ -1,90 +1,96 @@
|
|
| 1 |
-
# vae_manager.py
|
| 2 |
-
# Responsável por decodificar latentes (B,C,T,H,W) → pixels (B,C,T,H',W') em [0,1].
|
| 3 |
|
| 4 |
import torch
|
| 5 |
import contextlib
|
| 6 |
-
import
|
| 7 |
-
import subprocess
|
| 8 |
import sys
|
| 9 |
from pathlib import Path
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
logging.set_verbosity_error()
|
| 15 |
-
logging.set_verbosity_warning()
|
| 16 |
-
logging.set_verbosity_info()
|
| 17 |
-
logging.set_verbosity_debug()
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
DEPS_DIR = Path("/data")
|
| 23 |
-
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 24 |
-
if not LTX_VIDEO_REPO_DIR.exists():
|
| 25 |
-
print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
|
| 26 |
-
run_setup()
|
| 27 |
|
| 28 |
def add_deps_to_path():
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 30 |
-
if
|
| 31 |
sys.path.insert(0, repo_path)
|
| 32 |
-
|
| 33 |
|
|
|
|
| 34 |
add_deps_to_path()
|
| 35 |
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
|
| 41 |
class _SimpleVAEManager:
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
self.
|
| 49 |
-
self.
|
| 50 |
-
self.autocast_dtype = autocast_dtype
|
| 51 |
|
| 52 |
def attach_pipeline(self, pipeline, device=None, autocast_dtype=None):
|
| 53 |
self.pipeline = pipeline
|
| 54 |
if device is not None:
|
| 55 |
-
self.device = device
|
|
|
|
| 56 |
if autocast_dtype is not None:
|
| 57 |
self.autocast_dtype = autocast_dtype
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
@torch.no_grad()
|
| 62 |
def decode(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
#
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
#
|
| 68 |
-
num_items_in_batch =
|
| 69 |
-
timestep_tensor = torch.tensor([decode_timestep] * num_items_in_batch, device=self.device
|
| 70 |
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
with ctx:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
pixels = vae_decode(
|
| 74 |
-
|
| 75 |
-
self.pipeline.vae
|
| 76 |
is_video=True,
|
| 77 |
timestep=timestep_tensor,
|
| 78 |
-
vae_per_channel_normalize=True,
|
| 79 |
)
|
| 80 |
|
| 81 |
-
#
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
return pixels
|
| 87 |
-
|
| 88 |
|
| 89 |
-
# Singleton global
|
| 90 |
-
vae_manager_singleton = _SimpleVAEManager()
|
|
|
|
| 1 |
+
# FILE: managers/vae_manager.py (Versão Final com vae_decode corrigido)
|
|
|
|
| 2 |
|
| 3 |
import torch
|
| 4 |
import contextlib
|
| 5 |
+
import logging
|
|
|
|
| 6 |
import sys
|
| 7 |
from pathlib import Path
|
| 8 |
+
import os
|
| 9 |
+
import io
|
| 10 |
|
| 11 |
+
LTX_VIDEO_REPO_DIR = Path("/data/LTX-Video")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
def add_deps_to_path():
|
| 14 |
+
"""
|
| 15 |
+
Adiciona o diretório do repositório LTX ao sys.path para garantir que suas
|
| 16 |
+
bibliotecas possam ser importadas.
|
| 17 |
+
"""
|
| 18 |
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
|
| 19 |
+
if repo_path not in sys.path:
|
| 20 |
sys.path.insert(0, repo_path)
|
| 21 |
+
logging.info(f"[ltx_utils] LTX-Video repository added to sys.path: {repo_path}")
|
| 22 |
|
| 23 |
+
# Executa a função imediatamente para configurar o ambiente antes de qualquer importação.
|
| 24 |
add_deps_to_path()
|
| 25 |
|
| 26 |
|
| 27 |
+
# --- IMPORTAÇÃO CRÍTICA ---
|
| 28 |
+
# Importa a função helper oficial da biblioteca LTX para decodificação.
|
| 29 |
+
try:
|
| 30 |
+
from ltx_video.models.autoencoders.vae_encode import vae_decode
|
| 31 |
+
except ImportError:
|
| 32 |
+
raise ImportError("Could not import 'vae_decode' from LTX-Video library. Check sys.path and repo integrity.")
|
| 33 |
|
| 34 |
|
| 35 |
class _SimpleVAEManager:
|
| 36 |
+
"""
|
| 37 |
+
Manages VAE decoding, now using the official 'vae_decode' helper function
|
| 38 |
+
for maximum compatibility.
|
| 39 |
+
"""
|
| 40 |
+
def __init__(self):
|
| 41 |
+
self.pipeline = None
|
| 42 |
+
self.device = torch.device("cpu")
|
| 43 |
+
self.autocast_dtype = torch.float32
|
|
|
|
| 44 |
|
| 45 |
def attach_pipeline(self, pipeline, device=None, autocast_dtype=None):
|
| 46 |
self.pipeline = pipeline
|
| 47 |
if device is not None:
|
| 48 |
+
self.device = torch.device(device)
|
| 49 |
+
logging.info(f"[VAEManager] VAE device successfully set to: {self.device}")
|
| 50 |
if autocast_dtype is not None:
|
| 51 |
self.autocast_dtype = autocast_dtype
|
| 52 |
|
|
|
|
|
|
|
| 53 |
@torch.no_grad()
|
| 54 |
def decode(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor:
|
| 55 |
+
"""
|
| 56 |
+
Decodes a latent tensor into a pixel tensor using the 'vae_decode' helper.
|
| 57 |
+
"""
|
| 58 |
+
if self.pipeline is None:
|
| 59 |
+
raise RuntimeError("VAEManager: No pipeline has been attached.")
|
| 60 |
|
| 61 |
+
# Move os latentes para o dispositivo VAE dedicado.
|
| 62 |
+
latent_tensor_on_vae_device = latent_tensor.to(self.device)
|
| 63 |
+
|
| 64 |
+
# Prepara o tensor de timesteps no mesmo dispositivo.
|
| 65 |
+
num_items_in_batch = latent_tensor_on_vae_device.shape[0]
|
| 66 |
+
timestep_tensor = torch.tensor([decode_timestep] * num_items_in_batch, device=self.device)
|
| 67 |
|
| 68 |
+
autocast_device_type = self.device.type
|
| 69 |
+
ctx = torch.autocast(
|
| 70 |
+
device_type=autocast_device_type,
|
| 71 |
+
dtype=self.autocast_dtype,
|
| 72 |
+
enabled=(autocast_device_type == 'cuda')
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
with ctx:
|
| 76 |
+
logging.debug(f"[VAEManager] Decoding latents with shape {latent_tensor_on_vae_device.shape} on {self.device}.")
|
| 77 |
+
|
| 78 |
+
# --- CORREÇÃO PRINCIPAL ---
|
| 79 |
+
# Usa a função helper `vae_decode` em vez de chamar `vae.decode` diretamente.
|
| 80 |
+
# Esta função sabe como lidar com o argumento 'timestep'.
|
| 81 |
pixels = vae_decode(
|
| 82 |
+
latents=latent_tensor_on_vae_device,
|
| 83 |
+
vae=self.pipeline.vae,
|
| 84 |
is_video=True,
|
| 85 |
timestep=timestep_tensor,
|
| 86 |
+
vae_per_channel_normalize=True, # Importante manter este parâmetro consistente
|
| 87 |
)
|
| 88 |
|
| 89 |
+
# A função vae_decode já retorna no intervalo [0, 1], mas um clamp extra não faz mal.
|
| 90 |
+
pixels = pixels.clamp(0, 1)
|
| 91 |
+
|
| 92 |
+
logging.debug("[VAEManager] Decoding complete. Moving pixel tensor to CPU.")
|
| 93 |
+
return pixels.cpu()
|
|
|
|
|
|
|
| 94 |
|
| 95 |
+
# Singleton global
|
| 96 |
+
vae_manager_singleton = _SimpleVAEManager()
|
setup.py
CHANGED
|
@@ -2,179 +2,170 @@
|
|
| 2 |
#
|
| 3 |
# Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
|
| 4 |
#
|
| 5 |
-
# Versão
|
| 6 |
-
# -
|
| 7 |
-
# -
|
| 8 |
-
# - Unifica o download de todas as dependências (Git, LTX Models, SeedVR Models).
|
| 9 |
|
| 10 |
import os
|
| 11 |
import subprocess
|
| 12 |
import sys
|
| 13 |
from pathlib import Path
|
| 14 |
import yaml
|
| 15 |
-
from huggingface_hub import hf_hub_download
|
| 16 |
|
| 17 |
-
#
|
|
|
|
|
|
|
|
|
|
| 18 |
DEPS_DIR = Path("/data")
|
|
|
|
| 19 |
|
| 20 |
-
# ---
|
| 21 |
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
# ---
|
| 24 |
-
SEEDVR_MODELS_DIR = DEPS_DIR / "SeedVR"
|
| 25 |
-
|
| 26 |
-
# --- Repositórios para Clonar ---
|
| 27 |
REPOS_TO_CLONE = {
|
| 28 |
"LTX-Video": "https://huggingface.co/spaces/Lightricks/ltx-video-distilled",
|
| 29 |
"SeedVR": "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler",
|
| 30 |
-
"
|
| 31 |
}
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
def run_command(command, cwd=None):
|
| 34 |
-
"""Executa um comando no terminal e
|
| 35 |
print(f"Executando: {' '.join(command)}")
|
| 36 |
try:
|
| 37 |
subprocess.run(
|
| 38 |
-
command,
|
| 39 |
-
|
| 40 |
-
cwd=cwd,
|
| 41 |
-
stdin=subprocess.DEVNULL,
|
| 42 |
)
|
| 43 |
except subprocess.CalledProcessError as e:
|
| 44 |
-
print(f"ERRO: O comando falhou com o código
|
| 45 |
sys.exit(1)
|
| 46 |
except FileNotFoundError:
|
| 47 |
-
print(f"ERRO:
|
| 48 |
sys.exit(1)
|
| 49 |
|
| 50 |
-
# --- Funções de Download (LTX-Video) ---
|
| 51 |
-
|
| 52 |
def _load_ltx_config():
|
| 53 |
"""Carrega o arquivo de configuração YAML do LTX-Video."""
|
| 54 |
print("--- Carregando Configuração do LTX-Video ---")
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
base / "ltxv-13b-0.9.8-distilled-fp8.yaml",
|
| 59 |
-
base / "ltxv-13b-0.9.8-distilled.yaml",
|
| 60 |
-
]
|
| 61 |
-
for cfg_path in candidates:
|
| 62 |
-
if cfg_path.exists():
|
| 63 |
-
print(f"Configuração encontrada: {cfg_path}")
|
| 64 |
-
with open(cfg_path, "r") as file:
|
| 65 |
-
return yaml.safe_load(file)
|
| 66 |
-
|
| 67 |
-
fallback_path = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
| 68 |
-
print(f"AVISO: Nenhuma configuração preferencial encontrada. Usando fallback: {fallback_path}")
|
| 69 |
-
if not fallback_path.exists():
|
| 70 |
-
print(f"ERRO: Arquivo de configuração fallback '{fallback_path}' não encontrado.")
|
| 71 |
return None
|
| 72 |
-
|
| 73 |
-
with open(
|
| 74 |
return yaml.safe_load(file)
|
| 75 |
|
| 76 |
-
def
|
| 77 |
-
"""
|
| 78 |
-
|
| 79 |
-
LTX_REPO = "Lightricks/LTX-Video"
|
| 80 |
-
|
| 81 |
-
if "checkpoint_path" not in config or "spatial_upscaler_model_path" not in config:
|
| 82 |
-
print("ERRO: Chaves de modelo não encontradas no arquivo de configuração do LTX.")
|
| 83 |
-
sys.exit(1)
|
| 84 |
-
|
| 85 |
-
models_to_download = {
|
| 86 |
-
config["checkpoint_path"]: "checkpoint principal",
|
| 87 |
-
config["spatial_upscaler_model_path"]: "upscaler espacial"
|
| 88 |
-
}
|
| 89 |
-
|
| 90 |
-
# O hf_hub_download já verifica o cache, mas vamos verificar o diretório final para clareza
|
| 91 |
-
# e para garantir que a lógica seja explícita.
|
| 92 |
-
for filename, description in models_to_download.items():
|
| 93 |
-
# A biblioteca huggingface_hub gerencia o local exato, então confiamos nela.
|
| 94 |
-
# A verificação aqui é para garantir que o download seja tentado.
|
| 95 |
-
print(f"Garantindo a existência do {description}: {filename}...")
|
| 96 |
-
try:
|
| 97 |
-
hf_hub_download(
|
| 98 |
-
repo_id=LTX_REPO, filename=filename,
|
| 99 |
-
local_dir=os.getenv("HF_HOME"), cache_dir=os.getenv("HF_HOME_CACHE"), token=os.getenv("HF_TOKEN")
|
| 100 |
-
)
|
| 101 |
-
print(f"{description.capitalize()} está disponível.")
|
| 102 |
-
except Exception as e:
|
| 103 |
-
print(f"ERRO ao baixar o {description}: {e}")
|
| 104 |
-
sys.exit(1)
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
def _download_seedvr_models():
|
| 108 |
-
"""Baixa os modelos do SeedVR, pulando os que já existem."""
|
| 109 |
-
print(f"\n--- Verificando Checkpoints do SeedVR em {SEEDVR_MODELS_DIR} ---")
|
| 110 |
-
SEEDVR_MODELS_DIR.mkdir(exist_ok=True)
|
| 111 |
-
|
| 112 |
-
model_files = {
|
| 113 |
-
"seedvr2_ema_7b_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 114 |
-
"seedvr2_ema_7b_sharp_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 115 |
-
"seedvr2_ema_3b_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 116 |
-
"ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 117 |
-
"pos_emb.pt": "ByteDance-Seed/SeedVR2-3B",
|
| 118 |
-
"neg_emb.pt": "ByteDance-Seed/SeedVR2-3B"
|
| 119 |
-
}
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
if
|
| 124 |
-
|
| 125 |
-
|
| 126 |
hf_hub_download(
|
| 127 |
-
repo_id=repo_id,
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
cache_dir=os.getenv("HF_HOME_CACHE"),
|
| 131 |
token=os.getenv("HF_TOKEN"),
|
| 132 |
)
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
-
#
|
|
|
|
|
|
|
| 142 |
|
| 143 |
def main():
|
| 144 |
-
|
|
|
|
| 145 |
DEPS_DIR.mkdir(exist_ok=True)
|
|
|
|
| 146 |
|
| 147 |
# --- ETAPA 1: Clonar Repositórios ---
|
| 148 |
-
print("\n--- ETAPA 1:
|
| 149 |
for repo_name, repo_url in REPOS_TO_CLONE.items():
|
| 150 |
repo_path = DEPS_DIR / repo_name
|
| 151 |
-
if repo_path.is_dir():
|
| 152 |
-
print(f"Repositório '{repo_name}' já existe. Pulando.")
|
| 153 |
else:
|
| 154 |
print(f"Clonando '{repo_name}' de {repo_url}...")
|
| 155 |
run_command(["git", "clone", "--depth", "1", repo_url, str(repo_path)])
|
| 156 |
-
print(f"'{repo_name}' clonado com sucesso.")
|
| 157 |
|
| 158 |
-
# --- ETAPA 2: Baixar Modelos
|
| 159 |
-
print("\n--- ETAPA 2:
|
| 160 |
-
if not LTX_VIDEO_REPO_DIR.is_dir():
|
| 161 |
-
print(f"ERRO: Diretório '{LTX_VIDEO_REPO_DIR}' não encontrado. Execute a clonagem primeiro.")
|
| 162 |
-
sys.exit(1)
|
| 163 |
-
|
| 164 |
ltx_config = _load_ltx_config()
|
| 165 |
-
if ltx_config:
|
| 166 |
-
_download_ltx_models(ltx_config)
|
| 167 |
-
else:
|
| 168 |
print("ERRO: Não foi possível carregar a configuração do LTX-Video. Abortando.")
|
| 169 |
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
-
# --- ETAPA 3: Baixar Modelos
|
| 172 |
-
print("\n--- ETAPA 3:
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
-
print("\n\n--- Setup do Ambiente Concluído com Sucesso! ---")
|
| 176 |
-
print("Todos os repositórios e modelos
|
| 177 |
-
print("Você agora pode iniciar a aplicação principal.")
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
| 180 |
main()
|
|
|
|
| 2 |
#
|
| 3 |
# Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
|
| 4 |
#
|
| 5 |
+
# Versão 3.1.0 (Setup Unificado com LTX, SeedVR e VINCIE com Cache Robusto)
|
| 6 |
+
# - Orquestra a instalação de todos os repositórios e modelos para a suíte ADUC-SDR.
|
| 7 |
+
# - Usa snapshot_download para baixar dependências de forma eficiente e correta.
|
|
|
|
| 8 |
|
| 9 |
import os
|
| 10 |
import subprocess
|
| 11 |
import sys
|
| 12 |
from pathlib import Path
|
| 13 |
import yaml
|
| 14 |
+
from huggingface_hub import hf_hub_download, snapshot_download
|
| 15 |
|
| 16 |
+
# ==============================================================================
|
| 17 |
+
# --- CONFIGURAÇÃO DE PATHS E CACHE ---
|
| 18 |
+
# ==============================================================================
|
| 19 |
+
# Assume que /data é um volume persistente montado no contêiner.
|
| 20 |
DEPS_DIR = Path("/data")
|
| 21 |
+
CACHE_DIR = DEPS_DIR / ".cache" / "huggingface"
|
| 22 |
|
| 23 |
+
# --- Paths dos Módulos da Aplicação ---
|
| 24 |
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
|
| 25 |
+
SEEDVR_MODELS_DIR = DEPS_DIR / "models" / "SeedVR"
|
| 26 |
+
VINCIE_REPO_DIR = DEPS_DIR / "VINCIE"
|
| 27 |
+
VINCIE_CKPT_DIR = DEPS_DIR / "ckpt" / "VINCIE-3B"
|
| 28 |
|
| 29 |
+
# --- Repositórios Git para Clonar ---
|
|
|
|
|
|
|
|
|
|
| 30 |
REPOS_TO_CLONE = {
|
| 31 |
"LTX-Video": "https://huggingface.co/spaces/Lightricks/ltx-video-distilled",
|
| 32 |
"SeedVR": "https://github.com/numz/ComfyUI-SeedVR2_VideoUpscaler",
|
| 33 |
+
"VINCIE": "https://github.com/ByteDance-Seed/VINCIE",
|
| 34 |
}
|
| 35 |
|
| 36 |
+
# ==============================================================================
|
| 37 |
+
# --- FUNÇÕES AUXILIARES ---
|
| 38 |
+
# ==============================================================================
|
| 39 |
+
|
| 40 |
def run_command(command, cwd=None):
|
| 41 |
+
"""Executa um comando no terminal de forma segura e com logs claros."""
|
| 42 |
print(f"Executando: {' '.join(command)}")
|
| 43 |
try:
|
| 44 |
subprocess.run(
|
| 45 |
+
command, check=True, cwd=cwd,
|
| 46 |
+
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True,
|
|
|
|
|
|
|
| 47 |
)
|
| 48 |
except subprocess.CalledProcessError as e:
|
| 49 |
+
print(f"ERRO: O comando falhou com o código {e.returncode}\nStderr:\n{e.stderr.strip()}")
|
| 50 |
sys.exit(1)
|
| 51 |
except FileNotFoundError:
|
| 52 |
+
print(f"ERRO: Comando '{command[0]}' não encontrado. Verifique se o git está instalado.")
|
| 53 |
sys.exit(1)
|
| 54 |
|
|
|
|
|
|
|
| 55 |
def _load_ltx_config():
|
| 56 |
"""Carrega o arquivo de configuração YAML do LTX-Video."""
|
| 57 |
print("--- Carregando Configuração do LTX-Video ---")
|
| 58 |
+
config_file = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
|
| 59 |
+
if not config_file.exists():
|
| 60 |
+
print(f"ERRO: Arquivo de configuração do LTX não encontrado em '{config_file}'")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
return None
|
| 62 |
+
print(f"Configuração LTX encontrada: {config_file}")
|
| 63 |
+
with open(config_file, "r") as file:
|
| 64 |
return yaml.safe_load(file)
|
| 65 |
|
| 66 |
+
def _ensure_hf_model(repo_id, filenames=None, allow_patterns=None, local_dir=None):
|
| 67 |
+
"""Função genérica para baixar um ou mais arquivos (hf_hub_download) ou um snapshot (snapshot_download)."""
|
| 68 |
+
if not repo_id: return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
+
print(f"Verificando/Baixando modelo do repositório: '{repo_id}'...")
|
| 71 |
+
try:
|
| 72 |
+
if filenames: # Baixa arquivos específicos
|
| 73 |
+
for filename in filenames:
|
| 74 |
+
if not filename: continue
|
| 75 |
hf_hub_download(
|
| 76 |
+
repo_id=repo_id, filename=filename, cache_dir=str(CACHE_DIR),
|
| 77 |
+
local_dir=str(local_dir) if local_dir else None,
|
| 78 |
+
#local_dir_use_symlinks=False,
|
|
|
|
| 79 |
token=os.getenv("HF_TOKEN"),
|
| 80 |
)
|
| 81 |
+
else: # Baixa um snapshot (partes de um repositório)
|
| 82 |
+
snapshot_download(
|
| 83 |
+
repo_id=repo_id, cache_dir=str(CACHE_DIR),
|
| 84 |
+
local_dir=str(local_dir) if local_dir else None,
|
| 85 |
+
allow_patterns=allow_patterns,
|
| 86 |
+
token=os.getenv("HF_TOKEN"),
|
| 87 |
+
)
|
| 88 |
+
print(f"-> Modelo '{repo_id}' está disponível.")
|
| 89 |
+
except Exception as e:
|
| 90 |
+
print(f"ERRO CRÍTICO ao baixar o modelo '{repo_id}': {e}")
|
| 91 |
+
sys.exit(1)
|
| 92 |
|
| 93 |
+
# ==============================================================================
|
| 94 |
+
# --- FUNÇÃO PRINCIPAL DE SETUP ---
|
| 95 |
+
# ==============================================================================
|
| 96 |
|
| 97 |
def main():
|
| 98 |
+
"""Orquestra todo o processo de setup do ambiente."""
|
| 99 |
+
print("--- Iniciando Setup do Ambiente ADUC-SDR (LTX + SeedVR + VINCIE) ---")
|
| 100 |
DEPS_DIR.mkdir(exist_ok=True)
|
| 101 |
+
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 102 |
|
| 103 |
# --- ETAPA 1: Clonar Repositórios ---
|
| 104 |
+
print("\n--- ETAPA 1: Verificando Repositórios Git ---")
|
| 105 |
for repo_name, repo_url in REPOS_TO_CLONE.items():
|
| 106 |
repo_path = DEPS_DIR / repo_name
|
| 107 |
+
if repo_path.is_dir():
|
| 108 |
+
print(f"Repositório '{repo_name}' já existe em '{repo_path}'. Pulando.")
|
| 109 |
else:
|
| 110 |
print(f"Clonando '{repo_name}' de {repo_url}...")
|
| 111 |
run_command(["git", "clone", "--depth", "1", repo_url, str(repo_path)])
|
| 112 |
+
print(f"-> '{repo_name}' clonado com sucesso.")
|
| 113 |
|
| 114 |
+
# --- ETAPA 2: Baixar Modelos LTX-Video e Dependências ---
|
| 115 |
+
print("\n--- ETAPA 2: Verificando Modelos LTX-Video e Dependências ---")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
ltx_config = _load_ltx_config()
|
| 117 |
+
if not ltx_config:
|
|
|
|
|
|
|
| 118 |
print("ERRO: Não foi possível carregar a configuração do LTX-Video. Abortando.")
|
| 119 |
sys.exit(1)
|
| 120 |
+
|
| 121 |
+
_ensure_hf_model(
|
| 122 |
+
repo_id="Lightricks/LTX-Video",
|
| 123 |
+
filenames=[ltx_config.get("checkpoint_path"), ltx_config.get("spatial_upscaler_model_path")]
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
_ensure_hf_model(
|
| 127 |
+
repo_id=ltx_config.get("text_encoder_model_name_or_path"),
|
| 128 |
+
allow_patterns=["*.json", "*.safetensors"]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
enhancer_repos = [
|
| 132 |
+
ltx_config.get("prompt_enhancer_image_caption_model_name_or_path"),
|
| 133 |
+
ltx_config.get("prompt_enhancer_llm_model_name_or_path"),
|
| 134 |
+
]
|
| 135 |
+
for repo_id in filter(None, enhancer_repos):
|
| 136 |
+
_ensure_hf_model(repo_id=repo_id, allow_patterns=["*.json", "*.safetensors", "*.bin"])
|
| 137 |
|
| 138 |
+
# --- ETAPA 3: Baixar Modelos SeedVR ---
|
| 139 |
+
print("\n--- ETAPA 3: Verificando Modelos SeedVR ---")
|
| 140 |
+
SEEDVR_MODELS_DIR.mkdir(parents=True, exist_ok=True)
|
| 141 |
+
seedvr_files = {
|
| 142 |
+
"seedvr2_ema_7b_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 143 |
+
"seedvr2_ema_7b_sharp_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 144 |
+
"ema_vae_fp16.safetensors": "MonsterMMORPG/SeedVR2_SECourses",
|
| 145 |
+
}
|
| 146 |
+
for filename, repo_id in seedvr_files.items():
|
| 147 |
+
if not (SEEDVR_MODELS_DIR / filename).is_file():
|
| 148 |
+
_ensure_hf_model(repo_id=repo_id, filenames=[filename], local_dir=SEEDVR_MODELS_DIR)
|
| 149 |
+
else:
|
| 150 |
+
print(f"Arquivo SeedVR '{filename}' já existe. Pulando.")
|
| 151 |
+
|
| 152 |
+
# --- ETAPA 4: Baixar Modelos VINCIE ---
|
| 153 |
+
print("\n--- ETAPA 4: Verificando Modelos VINCIE ---")
|
| 154 |
+
VINCIE_CKPT_DIR.mkdir(parents=True, exist_ok=True)
|
| 155 |
+
_ensure_hf_model(repo_id="ByteDance-Seed/VINCIE-3B", local_dir=VINCIE_CKPT_DIR)
|
| 156 |
+
|
| 157 |
+
# Cria o symlink de compatibilidade, se necessário
|
| 158 |
+
repo_ckpt_dir = VINCIE_REPO_DIR / "ckpt"
|
| 159 |
+
repo_ckpt_dir.mkdir(parents=True, exist_ok=True)
|
| 160 |
+
link = repo_ckpt_dir / "VINCIE-3B"
|
| 161 |
+
if not link.exists():
|
| 162 |
+
link.symlink_to(VINCIE_CKPT_DIR.resolve(), target_is_directory=True)
|
| 163 |
+
print(f"-> Symlink de compatibilidade VINCIE criado: '{link}' -> '{VINCIE_CKPT_DIR.resolve()}'")
|
| 164 |
+
else:
|
| 165 |
+
print(f"-> Symlink de compatibilidade VINCIE já existe.")
|
| 166 |
|
| 167 |
+
print("\n\n--- ✅ Setup Completo do Ambiente ADUC-SDR Concluído com Sucesso! ---")
|
| 168 |
+
print("Todos os repositórios e modelos foram verificados e estão prontos para uso.")
|
|
|
|
| 169 |
|
| 170 |
if __name__ == "__main__":
|
| 171 |
main()
|
start.sh
CHANGED
|
@@ -1,43 +1,8 @@
|
|
| 1 |
-
#!/
|
| 2 |
-
|
| 3 |
|
| 4 |
|
| 5 |
-
|
| 6 |
-
tree -L 4 /app
|
| 7 |
-
tree -L 4 /data
|
| 8 |
-
|
| 9 |
-
echo "🚀 Iniciando o script de setup e lançamento do LTX-Video..."
|
| 10 |
-
echo "Usuário atual: $(whoami)"
|
| 11 |
-
|
| 12 |
-
# Define as variáveis de ambiente que o LTXServer irá consumir
|
| 13 |
-
export HF_HOME="${HF_HOME:-/data/.cache/huggingface}"
|
| 14 |
-
export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs/ltx}"
|
| 15 |
-
export LTXV_FRAME_LOG_EVERY=8
|
| 16 |
-
export LTXV_DEBUG=1
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
# --- Garante que Diretórios Existam ---
|
| 20 |
-
mkdir -p "$OUTPUT_ROOT" "$HF_HOME"
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
# 1) Builder (garante Apex/Flash e deps CUDA)
|
| 24 |
-
#echo "🛠️ Iniciando o builder.sh para compilar/instalar dependências CUDA..."
|
| 25 |
-
#if [ -f "/app/builder.sh" ]; then
|
| 26 |
-
# /bin/bash /app/builder.sh
|
| 27 |
-
# echo "✅ Builder finalizado."
|
| 28 |
-
#else
|
| 29 |
-
# echo "⚠️ Aviso: builder.sh não encontrado. Pulando etapa de compilação de dependências."
|
| 30 |
-
#fi
|
| 31 |
-
|
| 32 |
-
python setup.py
|
| 33 |
-
|
| 34 |
-
cp -rfv /app/LTX-Video/ /data/
|
| 35 |
-
|
| 36 |
-
export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs}"
|
| 37 |
-
export INPUT_ROOT="${INPUT_ROOT:-/app/inputs}"
|
| 38 |
-
|
| 39 |
-
mkdir -p "$OUTPUT_ROOT" "$INPUT_ROOT"
|
| 40 |
-
echo "[aduc][start] Verificando ambiente como usuário: $(whoami)"
|
| 41 |
|
| 42 |
# Env da UI
|
| 43 |
export GRADIO_SERVER_NAME="0.0.0.0"
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
echo "🔥 Iniciando a aplicação principal Gradio (app.py)..."
|
| 3 |
|
| 4 |
|
| 5 |
+
tree -L 6 /data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Env da UI
|
| 8 |
export GRADIO_SERVER_NAME="0.0.0.0"
|