caarleexx commited on
Commit
3e91f0e
·
verified ·
1 Parent(s): 7b2197f

Delete api/ltx_server.py

Browse files
Files changed (1) hide show
  1. api/ltx_server.py +0 -952
api/ltx_server.py DELETED
@@ -1,952 +0,0 @@
1
- # ltx_server.py — VideoService (beta 1.1)
2
- # Sempre output_type="latent"; no final: VAE (bloco inteiro) → pixels → MP4.
3
- # Ignora UserWarning/FutureWarning e injeta VAE no manager com dtype/device corretos.
4
-
5
- # --- 0. WARNINGS E AMBIENTE ---
6
- import warnings
7
- warnings.filterwarnings("ignore", category=UserWarning)
8
- warnings.filterwarnings("ignore", category=FutureWarning)
9
- warnings.filterwarnings("ignore", message=".*")
10
-
11
- from huggingface_hub import logging
12
-
13
- logging.set_verbosity_error()
14
- logging.set_verbosity_warning()
15
- logging.set_verbosity_info()
16
- logging.set_verbosity_debug()
17
-
18
-
19
- LTXV_DEBUG=1
20
- LTXV_FRAME_LOG_EVERY=8
21
-
22
-
23
-
24
- # --- 1. IMPORTAÇÕES ---
25
- import os, subprocess, shlex, tempfile
26
- import torch
27
- import json
28
- import numpy as np
29
- import random
30
- import os
31
- import shlex
32
- import yaml
33
- from typing import List, Dict
34
- from pathlib import Path
35
- import imageio
36
- import tempfile
37
- from huggingface_hub import hf_hub_download
38
- import sys
39
- import subprocess
40
- import gc
41
- import shutil
42
- import contextlib
43
- import time
44
- import traceback
45
- from einops import rearrange
46
- import torch.nn.functional as F
47
-
48
- # Singletons (versões simples)
49
- from managers.vae_manager import vae_manager_singleton
50
- from tools.video_encode_tool import video_encode_tool_singleton
51
-
52
- # --- 2. GERENCIAMENTO DE DEPENDÊNCIAS E SETUP ---
53
- def _query_gpu_processes_via_nvml(device_index: int) -> List[Dict]:
54
- try:
55
- import psutil
56
- import pynvml as nvml
57
- nvml.nvmlInit()
58
- handle = nvml.nvmlDeviceGetHandleByIndex(device_index)
59
- try:
60
- procs = nvml.nvmlDeviceGetComputeRunningProcesses_v3(handle)
61
- except Exception:
62
- procs = nvml.nvmlDeviceGetComputeRunningProcesses(handle)
63
- results = []
64
- for p in procs:
65
- pid = int(p.pid)
66
- used_mb = None
67
- try:
68
- if getattr(p, "usedGpuMemory", None) is not None and p.usedGpuMemory not in (0,):
69
- used_mb = max(0, int(p.usedGpuMemory) // (1024 * 1024))
70
- except Exception:
71
- used_mb = None
72
- name = "unknown"
73
- user = "unknown"
74
- try:
75
- import psutil
76
- pr = psutil.Process(pid)
77
- name = pr.name()
78
- user = pr.username()
79
- except Exception:
80
- pass
81
- results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
82
- nvml.nvmlShutdown()
83
- return results
84
- except Exception:
85
- return []
86
-
87
- def _query_gpu_processes_via_nvidiasmi(device_index: int) -> List[Dict]:
88
- cmd = f"nvidia-smi -i {device_index} --query-compute-apps=pid,process_name,used_memory --format=csv,noheader,nounits"
89
- try:
90
- out = subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT, text=True, timeout=2.0)
91
- except Exception:
92
- return []
93
- results = []
94
- for line in out.strip().splitlines():
95
- parts = [p.strip() for p in line.split(",")]
96
- if len(parts) >= 3:
97
- try:
98
- pid = int(parts[0]); name = parts[1]; used_mb = int(parts[2])
99
- user = "unknown"
100
- try:
101
- import psutil
102
- pr = psutil.Process(pid)
103
- user = pr.username()
104
- except Exception:
105
- pass
106
- results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
107
- except Exception:
108
- continue
109
- return results
110
-
111
-
112
-
113
- def calculate_new_dimensions(orig_w, orig_h, divisor=8):
114
- """
115
- Calcula novas dimensões mantendo a proporção, garantindo que ambos os
116
- lados sejam divisíveis pelo divisor especificado (padrão 8).
117
- """
118
- if orig_w == 0 or orig_h == 0:
119
- # Retorna um valor padrão seguro
120
- return 512, 512
121
-
122
- # Preserva a orientação (paisagem vs. retrato)
123
- if orig_w >= orig_h:
124
- # Paisagem ou quadrado
125
- aspect_ratio = orig_w / orig_h
126
- # Começa com uma altura base e calcula a largura
127
- new_h = 512 # Altura base para paisagem
128
- new_w = new_h * aspect_ratio
129
- else:
130
- # Retrato
131
- aspect_ratio = orig_h / orig_w
132
- # Começa com uma largura base e calcula a altura
133
- new_w = 512 # Largura base para retrato
134
- new_h = new_w * aspect_ratio
135
-
136
- # Arredonda AMBOS os valores para o múltiplo mais próximo do divisor
137
- final_w = int(round(new_w / divisor)) * divisor
138
- final_h = int(round(new_h / divisor)) * divisor
139
-
140
- # Garante que as dimensões não sejam zero após o arredondamento
141
- final_w = max(divisor, final_w)
142
- final_h = max(divisor, final_h)
143
-
144
- print(f"[Dimension Calc] Original: {orig_w}x{orig_h} -> Calculado: {new_w:.0f}x{new_h:.0f} -> Final (divisível por {divisor}): {final_w}x{final_h}")
145
- return final_h, final_w # Retorna (altura, largura)
146
-
147
-
148
- def handle_media_upload_for_dims(filepath, current_h, current_w):
149
- """
150
- Esta função agora usará o novo cálculo robusto.
151
- (O corpo desta função não precisa de alterações, pois ela já chama a função de cálculo)
152
- """
153
- if not filepath or not os.path.exists(str(filepath)):
154
- return gr.update(value=current_h), gr.update(value=current_w)
155
- try:
156
- if str(filepath).lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
157
- with Image.open(filepath) as img:
158
- orig_w, orig_h = img.size
159
- else: # Assumir que é um vídeo
160
- with imageio.get_reader(filepath) as reader:
161
- meta = reader.get_meta_data()
162
- orig_w, orig_h = meta.get('size', (current_w, current_h))
163
-
164
- # Chama a nova função corrigida
165
- new_h, new_w = calculate_new_dimensions(orig_w, orig_h)
166
-
167
- return gr.update(value=new_h), gr.update(value=new_w)
168
- except Exception as e:
169
- print(f"Erro ao processar mídia para dimensões: {e}")
170
- return gr.update(value=current_h), gr.update(value=current_w)
171
-
172
-
173
- def _gpu_process_table(processes: List[Dict], current_pid: int) -> str:
174
- if not processes:
175
- return " - Processos ativos: (nenhum)\n"
176
- processes = sorted(processes, key=lambda x: (x.get("used_mb") or 0), reverse=True)
177
- lines = [" - Processos ativos (PID | USER | NAME | VRAM MB):"]
178
- for p in processes:
179
- star = "*" if p["pid"] == current_pid else " "
180
- used_str = str(p["used_mb"]) if p.get("used_mb") is not None else "N/A"
181
- lines.append(f" {star} {p['pid']} | {p['user']} | {p['name']} | {used_str}")
182
- return "\n".join(lines) + "\n"
183
-
184
- def run_setup():
185
- setup_script_path = "setup.py"
186
- if not os.path.exists(setup_script_path):
187
- print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
188
- return
189
- try:
190
- print("[DEBUG] Executando setup.py para dependências...")
191
- subprocess.run([sys.executable, setup_script_path], check=True)
192
- print("[DEBUG] Setup concluído com sucesso.")
193
- except subprocess.CalledProcessError as e:
194
- print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
195
- sys.exit(1)
196
-
197
- from api.ltx.inference import (
198
- create_ltx_video_pipeline,
199
- create_latent_upsampler,
200
- load_image_to_tensor_with_resize_and_crop,
201
- seed_everething,
202
- calculate_padding,
203
- load_media_file,
204
- )
205
-
206
- DEPS_DIR = Path("/data")
207
- LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
208
- if not LTX_VIDEO_REPO_DIR.exists():
209
- print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
210
- run_setup()
211
-
212
- def add_deps_to_path():
213
- repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
214
- if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
215
- sys.path.insert(0, repo_path)
216
- print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
217
-
218
- add_deps_to_path()
219
-
220
- # --- 3. IMPORTAÇÕES ESPECÍFICAS DO MODELO ---
221
-
222
- from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline
223
- from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
224
- from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
225
- from ltx_video.pipelines.pipeline_ltx_video import adain_filter_latent
226
-
227
-
228
- from api.ltx.inference import (
229
- load_image_to_tensor_with_resize_and_crop,
230
- seed_everething,
231
- calculate_padding,
232
- load_media_file,
233
- )
234
-
235
-
236
-
237
- # --- 4. FUNÇÕES HELPER DE LOG ---
238
- def log_tensor_info(tensor, name="Tensor"):
239
- if not isinstance(tensor, torch.Tensor):
240
- print(f"\n[INFO] '{name}' não é tensor.")
241
- return
242
- print(f"\n--- Tensor: {name} ---")
243
- print(f" - Shape: {tuple(tensor.shape)}")
244
- print(f" - Dtype: {tensor.dtype}")
245
- print(f" - Device: {tensor.device}")
246
- if tensor.numel() > 0:
247
- try:
248
- print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
249
- except Exception:
250
- pass
251
- print("------------------------------------------\n")
252
-
253
-
254
-
255
-
256
-
257
- # --- 5. CLASSE PRINCIPAL DO SERVIÇO ---
258
- class VideoService:
259
- def __init__(self):
260
- t0 = time.perf_counter()
261
- print("[DEBUG] Inicializando VideoService...")
262
- self.debug = os.getenv("LTXV_DEBUG", "1") == "1"
263
- self.frame_log_every = int(os.getenv("LTXV_FRAME_LOG_EVERY", "8"))
264
- self.config = self._load_config()
265
- print(f"[DEBUG] Config carregada (precision={self.config.get('precision')}, sampler={self.config.get('sampler')})")
266
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
267
- print(f"[DEBUG] Device selecionado: {self.device}")
268
- self.last_memory_reserved_mb = 0.0
269
- self._tmp_dirs = set(); self._tmp_files = set(); self._last_outputs = []
270
-
271
- self.pipeline, self.latent_upsampler = self._load_models()
272
- print(f"[DEBUG] Pipeline e Upsampler carregados. Upsampler ativo? {bool(self.latent_upsampler)}")
273
-
274
- print(f"[DEBUG] Movendo modelos para {self.device}...")
275
- self.pipeline.to(self.device)
276
- if self.latent_upsampler:
277
- self.latent_upsampler.to(self.device)
278
-
279
- self._apply_precision_policy()
280
- print(f"[DEBUG] runtime_autocast_dtype = {getattr(self, 'runtime_autocast_dtype', None)}")
281
-
282
- # Injeta pipeline/vae no manager (impede vae=None)
283
- vae_manager_singleton.attach_pipeline(
284
- self.pipeline,
285
- device=self.device,
286
- autocast_dtype=self.runtime_autocast_dtype
287
- )
288
- print(f"[DEBUG] VAE manager conectado: has_vae={hasattr(self.pipeline, 'vae')} device={self.device}")
289
-
290
- if self.device == "cuda":
291
- torch.cuda.empty_cache()
292
- self._log_gpu_memory("Após carregar modelos")
293
-
294
- print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
295
-
296
- def _log_gpu_memory(self, stage_name: str):
297
- if self.device != "cuda":
298
- return
299
- device_index = torch.cuda.current_device() if torch.cuda.is_available() else 0
300
- current_reserved_b = torch.cuda.memory_reserved(device_index)
301
- current_reserved_mb = current_reserved_b / (1024 ** 2)
302
- total_memory_b = torch.cuda.get_device_properties(device_index).total_memory
303
- total_memory_mb = total_memory_b / (1024 ** 2)
304
- peak_reserved_mb = torch.cuda.max_memory_reserved(device_index) / (1024 ** 2)
305
- delta_mb = current_reserved_mb - getattr(self, "last_memory_reserved_mb", 0.0)
306
- processes = _query_gpu_processes_via_nvml(device_index) or _query_gpu_processes_via_nvidiasmi(device_index)
307
- print(f"\n--- [LOG GPU] {stage_name} (cuda:{device_index}) ---")
308
- print(f" - Reservado: {current_reserved_mb:.2f} MB / {total_memory_mb:.2f} MB (Δ={delta_mb:+.2f} MB)")
309
- if peak_reserved_mb > getattr(self, "last_memory_reserved_mb", 0.0):
310
- print(f" - Pico reservado (nesta fase): {peak_reserved_mb:.2f} MB")
311
- print(_gpu_process_table(processes, os.getpid()), end="")
312
- print("--------------------------------------------------\n")
313
- self.last_memory_reserved_mb = current_reserved_mb
314
-
315
- def _register_tmp_dir(self, d: str):
316
- if d and os.path.isdir(d):
317
- self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
318
-
319
- def _register_tmp_file(self, f: str):
320
- if f and os.path.exists(f):
321
- self._tmp_files.add(f); print(f"[DEBUG] Registrado tmp file: {f}")
322
-
323
- def finalize(self, keep_paths=None, extra_paths=None, clear_gpu=True):
324
- print("[DEBUG] Finalize: iniciando limpeza...")
325
- keep = set(keep_paths or []); extras = set(extra_paths or [])
326
- removed_files = 0
327
- for f in list(self._tmp_files | extras):
328
- try:
329
- if f not in keep and os.path.isfile(f):
330
- os.remove(f); removed_files += 1; print(f"[DEBUG] Removido arquivo tmp: {f}")
331
- except Exception as e:
332
- print(f"[DEBUG] Falha removendo arquivo {f}: {e}")
333
- finally:
334
- self._tmp_files.discard(f)
335
- removed_dirs = 0
336
- for d in list(self._tmp_dirs):
337
- try:
338
- if d not in keep and os.path.isdir(d):
339
- shutil.rmtree(d, ignore_errors=True); removed_dirs += 1; print(f"[DEBUG] Removido diretório tmp: {d}")
340
- except Exception as e:
341
- print(f"[DEBUG] Falha removendo diretório {d}: {e}")
342
- finally:
343
- self._tmp_dirs.discard(d)
344
- print(f"[DEBUG] Finalize: arquivos removidos={removed_files}, dirs removidos={removed_dirs}")
345
- gc.collect()
346
- try:
347
- if clear_gpu and torch.cuda.is_available():
348
- torch.cuda.empty_cache()
349
- try:
350
- torch.cuda.ipc_collect()
351
- except Exception:
352
- pass
353
- except Exception as e:
354
- print(f"[DEBUG] Finalize: limpeza GPU falhou: {e}")
355
- try:
356
- self._log_gpu_memory("Após finalize")
357
- except Exception as e:
358
- print(f"[DEBUG] Log GPU pós-finalize falhou: {e}")
359
-
360
- def _load_config(self):
361
- base = LTX_VIDEO_REPO_DIR / "configs"
362
- cfg = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
363
- print(f"[DEBUG] Config: {cfg}")
364
- with open(cfg, "r") as file:
365
- return yaml.safe_load(file)
366
-
367
- def _load_models(self):
368
- t0 = time.perf_counter()
369
- LTX_REPO = "Lightricks/LTX-Video"
370
- print("[DEBUG] Baixando checkpoint principal...")
371
- distilled_model_path = hf_hub_download(
372
- repo_id=LTX_REPO,
373
- filename=self.config["checkpoint_path"],
374
- local_dir=os.getenv("HF_HOME"),
375
- cache_dir=os.getenv("HF_HOME_CACHE"),
376
- token=os.getenv("HF_TOKEN"),
377
- )
378
- self.config["checkpoint_path"] = distilled_model_path
379
- print(f"[DEBUG] Checkpoint em: {distilled_model_path}")
380
-
381
- print("[DEBUG] Baixando upscaler espacial...")
382
- spatial_upscaler_path = hf_hub_download(
383
- repo_id=LTX_REPO,
384
- filename=self.config["spatial_upscaler_model_path"],
385
- local_dir=os.getenv("HF_HOME"),
386
- cache_dir=os.getenv("HF_HOME_CACHE"),
387
- token=os.getenv("HF_TOKEN")
388
- )
389
- self.config["spatial_upscaler_model_path"] = spatial_upscaler_path
390
- print(f"[DEBUG] Upscaler em: {spatial_upscaler_path}")
391
-
392
- print("[DEBUG] Construindo pipeline...")
393
- pipeline = create_ltx_video_pipeline(
394
- ckpt_path=self.config["checkpoint_path"],
395
- precision=self.config["precision"],
396
- text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"],
397
- sampler=self.config["sampler"],
398
- device="cpu",
399
- enhance_prompt=False,
400
- prompt_enhancer_image_caption_model_name_or_path=self.config["prompt_enhancer_image_caption_model_name_or_path"],
401
- prompt_enhancer_llm_model_name_or_path=self.config["prompt_enhancer_llm_model_name_or_path"],
402
- )
403
- print("[DEBUG] Pipeline pronto.")
404
-
405
- latent_upsampler = None
406
- if self.config.get("spatial_upscaler_model_path"):
407
- print("[DEBUG] Construindo latent_upsampler...")
408
- latent_upsampler = create_latent_upsampler(self.config["spatial_upscaler_model_path"], device="cpu")
409
- print("[DEBUG] Upsampler pronto.")
410
- print(f"[DEBUG] _load_models() tempo total={time.perf_counter()-t0:.3f}s")
411
- return pipeline, latent_upsampler
412
-
413
- def _promote_fp8_weights_to_bf16(self, module):
414
- if not isinstance(module, torch.nn.Module):
415
- print("[DEBUG] Promoção FP8→BF16 ignorada: alvo não é nn.Module.")
416
- return
417
- f8 = getattr(torch, "float8_e4m3fn", None)
418
- if f8 is None:
419
- print("[DEBUG] torch.float8_e4m3fn indisponível.")
420
- return
421
- p_cnt = b_cnt = 0
422
- for _, p in module.named_parameters(recurse=True):
423
- try:
424
- if p.dtype == f8:
425
- with torch.no_grad():
426
- p.data = p.data.to(torch.bfloat16); p_cnt += 1
427
- except Exception:
428
- pass
429
- for _, b in module.named_buffers(recurse=True):
430
- try:
431
- if hasattr(b, "dtype") and b.dtype == f8:
432
- b.data = b.data.to(torch.bfloat16); b_cnt += 1
433
- except Exception:
434
- pass
435
- print(f"[DEBUG] FP8→BF16: params_promoted={p_cnt}, buffers_promoted={b_cnt}")
436
-
437
-
438
-
439
- @torch.no_grad()
440
- def _upsample_latents_internal(self, latents: torch.Tensor) -> torch.Tensor:
441
- """
442
- Lógica extraída diretamente da LTXMultiScalePipeline para upscale de latentes.
443
- """
444
- if not self.latent_upsampler:
445
- raise ValueError("Latent Upsampler não está carregado.")
446
-
447
- # Garante que os modelos estejam no dispositivo correto
448
- self.latent_upsampler.to(self.device)
449
- self.pipeline.vae.to(self.device)
450
- print(f"[DEBUG-UPSAMPLE] Shape de entrada: {tuple(latents.shape)}")
451
- latents = un_normalize_latents(latents, self.pipeline.vae, vae_per_channel_normalize=True)
452
- upsampled_latents = self.latent_upsampler(latents)
453
- upsampled_latents = normalize_latents(upsampled_latents, self.pipeline.vae, vae_per_channel_normalize=True)
454
- print(f"[DEBUG-UPSAMPLE] Shape de saída: {tuple(upsampled_latents.shape)}")
455
-
456
- return upsampled_latents
457
-
458
-
459
-
460
- def _apply_precision_policy(self):
461
- prec = str(self.config.get("precision", "")).lower()
462
- self.runtime_autocast_dtype = torch.float32
463
- print(f"[DEBUG] Aplicando política de precisão: {prec}")
464
- if prec == "float8_e4m3fn":
465
- self.runtime_autocast_dtype = torch.bfloat16
466
- force_promote = os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
467
- print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
468
- if force_promote and hasattr(torch, "float8_e4m3fn"):
469
- try:
470
- self._promote_fp8_weights_to_bf16(self.pipeline)
471
- except Exception as e:
472
- print(f"[DEBUG] Promoção FP8→BF16 na pipeline falhou: {e}")
473
- try:
474
- if self.latent_upsampler:
475
- self._promote_fp8_weights_to_bf16(self.latent_upsampler)
476
- except Exception as e:
477
- print(f"[DEBUG] Promoção FP8→BF16 no upsampler falhou: {e}")
478
- elif prec == "bfloat16":
479
- self.runtime_autocast_dtype = torch.bfloat16
480
- elif prec == "mixed_precision":
481
- self.runtime_autocast_dtype = torch.float16
482
- else:
483
- self.runtime_autocast_dtype = torch.float32
484
-
485
- def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
486
- print(f"[DEBUG] Carregando condicionamento: {filepath}")
487
- tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
488
- tensor = torch.nn.functional.pad(tensor, padding_values)
489
- out = tensor.to(self.device, dtype=self.runtime_autocast_dtype) if self.device == "cuda" else tensor.to(self.device)
490
- print(f"[DEBUG] Cond shape={tuple(out.shape)} dtype={out.dtype} device={out.device}")
491
- return out
492
-
493
-
494
- def _dividir_latentes_por_tamanho(self, latents_brutos, num_latente_por_chunk: int, overlap: int = 1):
495
- """
496
- Divide o tensor de latentes em chunks com tamanho definido em número de latentes.
497
-
498
- Args:
499
- latents_brutos: tensor [B, C, T, H, W]
500
- num_latente_por_chunk: número de latentes por chunk
501
- overlap: número de frames que se sobrepõem entre chunks
502
-
503
- Returns:
504
- List[tensor]: lista de chunks cloneados
505
- """
506
- sum_latent = latents_brutos.shape[2]
507
- chunks = []
508
-
509
- if num_latente_por_chunk >= sum_latent:
510
- return [latents_brutos]
511
-
512
- n_chunks = (sum_latent) // num_latente_por_chunk
513
- steps = sum_latent//n_chunks
514
- print("================PODA CAUSAL=================")
515
- print(f"[DEBUG] TOTAL LATENTES = {sum_latent}")
516
- print(f"[DEBUG] LATENTES min por chunk = {num_latente_por_chunk}")
517
- print(f"[DEBUG] Número de chunks = {n_chunks}")
518
- if n_chunks > 1:
519
- i=0
520
- while i < n_chunks:
521
- start = (num_latente_por_chunk*i)
522
- end = (start+num_latente_por_chunk+overlap)
523
- if i+1 < n_chunks:
524
- chunk = latents_brutos[:, :, start:end, :, :].clone().detach()
525
- print(f"[DEBUG] chunk{i+1}[:, :, {start}:{end}, :, :] = {chunk.shape[2]}")
526
- else:
527
- chunk = latents_brutos[:, :, start:, :, :].clone().detach()
528
- print(f"[DEBUG] chunk{i+1}[:, :, {start}:, :, :] = {chunk.shape[2]}")
529
- chunks.append(chunk)
530
- i+=1
531
- else:
532
- print(f"[DEBUG] numero chunks minimo ")
533
- print(f"[DEBUG] latents_brutos[:, :, :, :, :] = {latents_brutos.shape[2]}")
534
- chunks.append(latents_brutos)
535
- print("================PODA CAUSAL=================")
536
- return chunks
537
-
538
- def _get_total_frames(self, video_path: str) -> int:
539
- cmd = [
540
- "ffprobe",
541
- "-v", "error",
542
- "-select_streams", "v:0",
543
- "-count_frames",
544
- "-show_entries", "stream=nb_read_frames",
545
- "-of", "default=nokey=1:noprint_wrappers=1",
546
- video_path
547
- ]
548
- result = subprocess.run(cmd, capture_output=True, text=True, check=True)
549
- return int(result.stdout.strip())
550
-
551
- def _gerar_lista_com_transicoes(self, pasta: str, video_paths: list[str], crossfade_frames: int = 8) -> list[str]:
552
- """
553
- Gera uma nova lista de vídeos aplicando transições suaves (blend frame a frame)
554
- seguindo exatamente a lógica linear de Carlos.
555
- """
556
- import os, subprocess, shutil
557
-
558
- poda = crossfade_frames
559
- total_partes = len(video_paths)
560
- video_fade_fim = None
561
- video_fade_ini = None
562
- nova_lista = []
563
-
564
- print("===========CONCATECAO CAUSAL=============")
565
-
566
- print(f"[DEBUG] Iniciando pipeline com {total_partes} vídeos e {poda} frames de crossfade")
567
-
568
- for i in range(total_partes):
569
- base = video_paths[i]
570
-
571
- # --- PODA ---
572
- video_podado = os.path.join(pasta, f"{base}_podado_{i}.mp4")
573
-
574
-
575
- if i<total_partes-1:
576
- end_frame = self._get_total_frames(base) - poda
577
- else:
578
- end_frame = self._get_total_frames(base)
579
-
580
- if i>0:
581
- start_frame = poda
582
- else:
583
- start_frame = 0
584
-
585
- cmd_fim = (
586
- f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
587
- f'-vf "trim=start_frame={start_frame}:end_frame={end_frame},setpts=PTS-STARTPTS" '
588
- f'-an "{video_podado}"'
589
- )
590
- subprocess.run(cmd_fim, shell=True, check=True)
591
-
592
-
593
- # --- FADE_INI ---
594
- if i > 0:
595
- video_fade_ini = os.path.join(pasta, f"{base}_fade_ini_{i}.mp4")
596
- cmd_ini = (
597
- f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
598
- f'-vf "trim=end_frame={poda},setpts=PTS-STARTPTS" -an "{video_fade_ini}"'
599
- )
600
- subprocess.run(cmd_ini, shell=True, check=True)
601
-
602
- # --- TRANSIÇÃO ---
603
- if video_fade_fim and video_fade_ini:
604
- video_fade = os.path.join(pasta, f"transicao_{i}_{i+1}.mp4")
605
- cmd_blend = (
606
- f'ffmpeg -y -hide_banner -loglevel error '
607
- f'-i "{video_fade_fim}" -i "{video_fade_ini}" '
608
- f'-filter_complex "[0:v][1:v]blend=all_expr=\'A*(1-T/{poda})+B*(T/{poda})\',format=yuv420p" '
609
- f'-frames:v {poda} "{video_fade}"'
610
- )
611
- subprocess.run(cmd_blend, shell=True, check=True)
612
- print(f"[DEBUG] transicao adicionada {i}/{i+1} {self._get_total_frames(video_fade)} frames ✅")
613
- nova_lista.append(video_fade)
614
-
615
- # --- FADE_FIM ---
616
- if i<=total_partes-1:
617
- video_fade_fim = os.path.join(pasta, f"{base}_fade_fim_{i}.mp4")
618
- cmd_fim = (
619
- f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
620
- f'-vf "trim=start_frame={end_frame-poda},setpts=PTS-STARTPTS" -an "{video_fade_fim}"'
621
- )
622
- subprocess.run(cmd_fim, shell=True, check=True)
623
-
624
- nova_lista.append(video_podado)
625
- print(f"[DEBUG] Video podado {i+1} adicionado {self._get_total_frames(video_podado)} frames ✅")
626
-
627
-
628
-
629
- print("===========CONCATECAO CAUSAL=============")
630
- print(f"[DEBUG] {nova_lista}")
631
- return nova_lista
632
-
633
- def _concat_mp4s_no_reencode(self, mp4_list: List[str], out_path: str):
634
- """
635
- Concatena múltiplos MP4s sem reencode usando o demuxer do ffmpeg.
636
- ATENÇÃO: todos os arquivos precisam ter mesmo codec, fps, resolução etc.
637
- """
638
- if not mp4_list or len(mp4_list) < 2:
639
- raise ValueError("Forneça pelo menos dois arquivos MP4 para concatenar.")
640
-
641
-
642
- # Cria lista temporária para o ffmpeg
643
- with tempfile.NamedTemporaryFile("w", delete=False, suffix=".txt") as f:
644
- for mp4 in mp4_list:
645
- f.write(f"file '{os.path.abspath(mp4)}'\n")
646
- list_path = f.name
647
-
648
- cmd = f"ffmpeg -y -f concat -safe 0 -i {list_path} -c copy {out_path}"
649
- print(f"[DEBUG] Concat: {cmd}")
650
-
651
- try:
652
- subprocess.check_call(shlex.split(cmd))
653
- finally:
654
- try:
655
- os.remove(list_path)
656
- except Exception:
657
- pass
658
-
659
-
660
- # ==============================================================================
661
- # --- FUNÇÃO GENERATE COMPLETA E ATUALIZADA ---
662
- # ==============================================================================
663
- def generate(
664
- self,
665
- prompt,
666
- negative_prompt,
667
- mode="text-to-video",
668
- start_image_filepath=None,
669
- middle_image_filepath=None,
670
- middle_frame_number=None,
671
- middle_image_weight=1.0,
672
- end_image_filepath=None,
673
- end_image_weight=1.0,
674
- input_video_filepath=None,
675
- height=512,
676
- width=704,
677
- duration=2.0,
678
- frames_to_use=9,
679
- seed=42,
680
- randomize_seed=True,
681
- guidance_scale=3.0,
682
- improve_texture=True,
683
- progress_callback=None,
684
- external_decode=True,
685
- ):
686
- t_all = time.perf_counter()
687
- print(f"[DEBUG] generate() begin mode={mode} external_decode={external_decode} improve_texture={improve_texture}")
688
- if self.device == "cuda":
689
- torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
690
- self._log_gpu_memory("Início da Geração")
691
-
692
- # --- Setup Inicial (como antes) ---
693
- if mode == "image-to-video" and not start_image_filepath:
694
- raise ValueError("A imagem de início é obrigatória para o modo image-to-video")
695
- used_seed = random.randint(0, 2**32 - 1) if randomize_seed else int(seed)
696
- seed_everething(used_seed); print(f"[DEBUG] Seed usado: {used_seed}")
697
- FPS = 24.0; MAX_NUM_FRAMES = 2570
698
- target_frames_rounded = round(duration * FPS)
699
- n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
700
- actual_num_frames = max(9, min(MAX_NUM_FRAMES, int(n_val * 8 + 1)))
701
- height_padded = ((height - 1) // 8 + 1) * 8
702
- width_padded = ((width - 1) // 8 + 1) * 8
703
- padding_values = calculate_padding(height, width, height_padded, width_padded)
704
- generator = torch.Generator(device=self.device).manual_seed(used_seed)
705
-
706
- conditioning_items = []
707
- if mode == "image-to-video":
708
- start_tensor = self._prepare_conditioning_tensor(start_image_filepath, height, width, padding_values)
709
- conditioning_items.append(ConditioningItem(start_tensor, 0, 1.0))
710
- if middle_image_filepath and middle_frame_number is not None:
711
- middle_tensor = self._prepare_conditioning_tensor(middle_image_filepath, height, width, padding_values)
712
- safe_middle_frame = max(0, min(int(middle_frame_number), actual_num_frames - 1))
713
- conditioning_items.append(ConditioningItem(middle_tensor, safe_middle_frame, float(middle_image_weight)))
714
- if end_image_filepath:
715
- end_tensor = self._prepare_conditioning_tensor(end_image_filepath, height, width, padding_values)
716
- last_frame_index = actual_num_frames - 1
717
- conditioning_items.append(ConditioningItem(end_tensor, last_frame_index, float(end_image_weight)))
718
- print(f"[DEBUG] Conditioning items: {len(conditioning_items)}")
719
-
720
- call_kwargs = {
721
- "prompt": prompt,
722
- "negative_prompt": negative_prompt,
723
- "height": height_padded,
724
- "width": width_padded,
725
- "num_frames": actual_num_frames,
726
- "frame_rate": int(FPS),
727
- "generator": generator,
728
- "output_type": "latent",
729
- "conditioning_items": conditioning_items if conditioning_items else None,
730
- "media_items": None,
731
- "decode_timestep": self.config["decode_timestep"],
732
- "decode_noise_scale": self.config["decode_noise_scale"],
733
- "stochastic_sampling": self.config["stochastic_sampling"],
734
- "image_cond_noise_scale": 0.01,
735
- "is_video": True,
736
- "vae_per_channel_normalize": True,
737
- "mixed_precision": (self.config["precision"] == "mixed_precision"),
738
- "offload_to_cpu": False,
739
- "enhance_prompt": False,
740
- "skip_layer_strategy": SkipLayerStrategy.AttentionValues,
741
- }
742
- print(f"[DEBUG] output_type={call_kwargs['output_type']} skip_layer_strategy={call_kwargs['skip_layer_strategy']}")
743
-
744
- latents = None
745
- latents_list = []
746
- results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
747
-
748
-
749
- try:
750
- ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
751
- with ctx:
752
- if improve_texture:
753
- if not self.latent_upsampler:
754
- raise ValueError("Upscaler espacial não carregado, mas 'improve_texture' está ativo.")
755
-
756
- # --- ETAPA 1: GERAÇÃO BASE (FIRST PASS) ---
757
- print("\n--- INICIANDO ETAPA 1: GERAÇÃO BASE (FIRST PASS) ---")
758
- t_pass1 = time.perf_counter()
759
-
760
- first_pass_config = self.config.get("first_pass", {}).copy()
761
- downscale_factor = self.config.get("downscale_factor", 0.6666666)
762
- vae_scale_factor = self.pipeline.vae_scale_factor # Geralmente 8
763
-
764
- # --- <INÍCIO DA LÓGICA DE CÁLCULO EXATA> ---
765
- # Replica a fórmula da LTXMultiScalePipeline
766
- x_width = int(width_padded * downscale_factor)
767
- downscaled_width = x_width - (x_width % vae_scale_factor)
768
- x_height = int(height_padded * downscale_factor)
769
- downscaled_height = x_height - (x_height % vae_scale_factor)
770
- print(f"[DEBUG] First Pass Dims: Original Pad ({width_padded}x{height_padded}) -> Downscaled ({downscaled_width}x{downscaled_height})")
771
- # --- <FIM DA LÓGICA DE CÁLCULO EXATA> ---
772
-
773
- first_pass_kwargs = call_kwargs.copy()
774
-
775
- first_pass_kwargs.update({
776
- "output_type": "latent",
777
- "width": downscaled_width,
778
- "height": downscaled_height,
779
- "guidance_scale": float(guidance_scale),
780
- **first_pass_config
781
- })
782
-
783
- print(f"[DEBUG] First Pass: Gerando em {downscaled_width}x{downscaled_height}...")
784
- base_latents = self.pipeline(**first_pass_kwargs).images
785
- log_tensor_info(base_latents, "Latentes Base (First Pass)")
786
- print(f"[DEBUG] First Pass concluída em {time.perf_counter() - t_pass1:.2f}s")
787
-
788
- # --- ETAPA 2: UPSCALE DOS LATENTES ---
789
- print("\n--- INICIANDO ETAPA 2: UPSCALE DOS LATENTES ---")
790
- t_upscale = time.perf_counter()
791
-
792
- upsampled_latents = self._upsample_latents_internal(base_latents)
793
- upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=base_latents)
794
- log_tensor_info(upsampled_latents, "Latentes Pós-Upscale")
795
- print(f"[DEBUG] Upscale de Latentes concluído em {time.perf_counter() - t_upscale:.2f}s")
796
- del base_latents; gc.collect(); torch.cuda.empty_cache()
797
-
798
- par = 0
799
- latents_cpu_up = upsampled_latents.detach().to("cpu", non_blocking=True)
800
- torch.cuda.empty_cache()
801
- try:
802
- torch.cuda.ipc_collect()
803
- except Exception:
804
- pass
805
-
806
- latents_parts_up = self._dividir_latentes_por_tamanho(latents_cpu_up,4,1)
807
-
808
- for latents in latents_parts_up:
809
-
810
- # # --- ETAPA 3: REFINAMENTO DE TEXTURA (SECOND PASS) ---
811
- print("\n--- INICIANDO ETAPA 3: REFINAMENTO DE TEXTURA (SECOND PASS) ---")
812
-
813
- second_pass_config = self.config.get("second_pass", {}).copy()
814
- # --- <INÍCIO DA LÓGICA DE CÁLCULO EXATA PARA SECOND PASS> ---
815
- # Usa as dimensões da primeira passagem dobradas, como na pipeline original
816
- second_pass_width = downscaled_width * 2
817
- second_pass_height = downscaled_height * 2
818
- print(f"[DEBUG] Second Pass Dims: Target ({second_pass_width}x{second_pass_height})")
819
- # --- <FIM DA LÓGICA DE CÁLCULO EXATA> ---
820
- t_pass2 = time.perf_counter()
821
-
822
- vae_temporal_scale = self.pipeline.video_scale_factor # Geralmente 4 ou 8
823
- num_pixel_frames_part = ((latents.shape[2] - 1) * vae_temporal_scale) + 1
824
- print(f"[DEBUG] Parte {i+1}: {latents.shape[2] - 1} latentes -> {num_pixel_frames_part} frames de pixel (alvo)")
825
-
826
- second_pass_kwargs = call_kwargs.copy()
827
- second_pass_kwargs.update({
828
- "output_type": "latent",
829
- "width": second_pass_width,
830
- "height": second_pass_height,
831
- "num_frames": num_pixel_frames_part,
832
- "latents": upsampled_latents, # O tensor upscaled
833
- "guidance_scale": float(guidance_scale),
834
- **second_pass_config
835
- })
836
-
837
- print(f"[DEBUG] Second Pass: Refinando em {width_padded}x{height_padded}...")
838
- final_latents = self.pipeline(**second_pass_kwargs).images
839
- log_tensor_info(final_latents, "Latentes Finais (Pós-Second Pass)")
840
- print(f"[DEBUG] Second part Pass concluída em {time.perf_counter() - t_pass2:.2f}s")
841
-
842
- latents_list.append(final_latents)
843
-
844
- else: # Geração de etapa única
845
- print("\n--- INICIANDO GERAÇÃO DE ETAPA ÚNICA ---")
846
- t_single = time.perf_counter()
847
- single_pass_kwargs = call_kwargs.copy()
848
- single_pass_kwargs.update(self.config.get("first_pass", {}))
849
- single_pass_kwargs["guidance_scale"] = float(guidance_scale)
850
- single_pass_kwargs["output_type"] = "latent"
851
-
852
- latents = self.pipeline(**single_pass_kwargs).images
853
- log_tensor_info(latents, "Latentes Finais (Etapa Única)")
854
- print(f"[DEBUG] Etapa única concluída em {time.perf_counter() - t_single:.2f}s")
855
-
856
- latents_list.append(latents)
857
-
858
- # --- ETAPA FINAL: DECODIFICAÇÃO E CODIFICAÇÃO MP4 ---
859
- print("\n--- INICIANDO ETAPA FINAL: DECODIFICAÇÃO E MONTAGEM ---")
860
-
861
- #latents_cpu = latents.detach().to("cpu", non_blocking=True)
862
- #torch.cuda.empty_cache()
863
- #try:
864
- # torch.cuda.ipc_collect()
865
- #except Exception:
866
- # pass
867
-
868
- latents_parts = []
869
- for latents in latents_list:
870
- latents_parts.append(self._dividir_latentes_por_tamanho(latents,4,1))
871
-
872
-
873
- partes_mp4 = []
874
- par = 0
875
- for latents in latents_parts:
876
-
877
- par = par + 1
878
- output_video_path = os.path.join(results_dir, f"output_{used_seed}_{par}.mp4")
879
- final_output_path = None
880
-
881
- print("[DEBUG] Decodificando bloco de latentes com VAE {par} → tensor de pixels...")
882
- # Usar manager com timestep por item; previne target_shape e rota NoneType.decode
883
- pixel_tensor = vae_manager_singleton.decode(
884
- latents.to(self.device, non_blocking=True),
885
- decode_timestep=float(self.config.get("decode_timestep", 0.05))
886
- )
887
- log_tensor_info(pixel_tensor, "Pixel tensor (VAE saída)")
888
-
889
- print("[DEBUG] Codificando MP4 a partir do tensor de pixels (bloco inteiro)...")
890
- video_encode_tool_singleton.save_video_from_tensor(
891
- pixel_tensor,
892
- output_video_path,
893
- fps=call_kwargs["frame_rate"],
894
- progress_callback=progress_callback
895
- )
896
-
897
- candidate = os.path.join(results_dir, f"output_par_{par}.mp4")
898
- try:
899
- shutil.move(output_video_path, candidate)
900
- final_output_path = candidate
901
- print(f"[DEBUG] MP4 parte {par} movido para {final_output_path}")
902
- partes_mp4.append(final_output_path)
903
-
904
- except Exception as e:
905
- final_output_path = output_video_path
906
- print(f"[DEBUG] Falha no move; usando tmp como final: {e}")
907
-
908
- total_partes = len(partes_mp4)
909
- if (total_partes>1):
910
- final_vid = os.path.join(results_dir, f"concat_fim_{used_seed}.mp4")
911
- partes_mp4_fade = self._gerar_lista_com_transicoes(pasta=results_dir, video_paths=partes_mp4, crossfade_frames=8)
912
- self._concat_mp4s_no_reencode(partes_mp4_fade, final_vid)
913
- else:
914
- final_vid = partes_mp4[0]
915
-
916
-
917
- self._log_gpu_memory("Fim da Geração")
918
- return final_vid, used_seed
919
-
920
-
921
- except Exception as e:
922
- print("[DEBUG] EXCEÇÃO NA GERAÇÃO:")
923
- print("".join(traceback.format_exception(type(e), e, e.__traceback__)))
924
- raise
925
- finally:
926
- try:
927
- del latents
928
- except Exception:
929
- pass
930
- try:
931
- del multi_scale_pipeline
932
- except Exception:
933
- pass
934
-
935
- gc.collect()
936
- try:
937
- if self.device == "cuda":
938
- torch.cuda.empty_cache()
939
- try:
940
- torch.cuda.ipc_collect()
941
- except Exception:
942
- pass
943
- except Exception as e:
944
- print(f"[DEBUG] Limpeza GPU no finally falhou: {e}")
945
-
946
- try:
947
- self.finalize(keep_paths=[])
948
- except Exception as e:
949
- print(f"[DEBUG] finalize() no finally falhou: {e}")
950
-
951
- print("Criando instância do VideoService. O carregamento do modelo começará agora...")
952
- video_generation_service = VideoService()