eeuuia commited on
Commit
975f84e
verified
1 Parent(s): 6d12705

Update api/ltx/ltx_utils.py

Browse files
Files changed (1) hide show
  1. api/ltx/ltx_utils.py +333 -169
api/ltx/ltx_utils.py CHANGED
@@ -1,203 +1,367 @@
1
- # FILE: api/ltx/ltx_utils.py
2
- # DESCRIPTION: Comprehensive, self-contained utility module for the LTX pipeline.
3
- # Handles dependency path injection, model loading, data structures, and helper functions.
 
4
 
5
- import os
6
- import random
7
  import json
8
  import logging
9
- import time
 
10
  import sys
 
 
11
  from pathlib import Path
12
- from typing import Dict, Optional, Tuple, Union
13
- from dataclasses import dataclass
14
- from enum import Enum, auto
15
 
16
- import numpy as np
17
  import torch
18
- import torchvision.transforms.functional as TVF
19
- from PIL import Image
20
- from safetensors import safe_open
21
- from transformers import T5EncoderModel, T5Tokenizer
22
 
23
  # ==============================================================================
24
- # --- CRITICAL: DEPENDENCY PATH INJECTION ---
25
  # ==============================================================================
26
 
27
- # Define o caminho para o reposit贸rio clonado
28
- LTX_VIDEO_REPO_DIR = Path("/data/LTX-Video")
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def add_deps_to_path():
31
- """
32
- Adiciona o diret贸rio do reposit贸rio LTX ao sys.path para garantir que suas
33
- bibliotecas possam ser importadas.
34
- """
35
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
36
  if repo_path not in sys.path:
37
  sys.path.insert(0, repo_path)
38
- logging.info(f"[ltx_utils] LTX-Video repository added to sys.path: {repo_path}")
39
 
40
- # Executa a fun莽茫o imediatamente para configurar o ambiente antes de qualquer importa莽茫o.
41
  add_deps_to_path()
42
 
43
-
44
- # --- Importa莽玫es da Biblioteca LTX-Video (Agora devem funcionar) ---
45
  try:
46
- from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline
47
- from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
48
- from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
49
- from ltx_video.models.transformers.transformer3d import Transformer3DModel
50
- from ltx_video.models.transformers.symmetric_patchifier import SymmetricPatchifier
51
- from ltx_video.schedulers.rf import RectifiedFlowScheduler
52
- from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
53
- import ltx_video.pipelines.crf_compressor as crf_compressor
 
 
54
  except ImportError as e:
55
- raise ImportError(f"Could not import from LTX-Video library even after setting sys.path. Check repo integrity at '{LTX_VIDEO_REPO_DIR}'. Error: {e}")
56
-
57
 
58
  # ==============================================================================
59
- # --- ESTRUTURAS DE DADOS E ENUMS (Centralizadas aqui) ---
60
  # ==============================================================================
61
 
62
- @dataclass
63
- class ConditioningItem:
64
- """Defines a single frame-conditioning item, used to guide the generation pipeline."""
65
- media_item: torch.Tensor
66
- media_frame_number: int
67
- conditioning_strength: float
68
- media_x: Optional[int] = None
69
- media_y: Optional[int] = None
70
-
71
-
72
- class SkipLayerStrategy(Enum):
73
- """Defines the strategy for how spatio-temporal guidance is applied."""
74
- AttentionSkip = auto()
75
- AttentionValues = auto()
76
- Residual = auto()
77
- TransformerBlock = auto()
78
-
79
 
80
  # ==============================================================================
81
- # --- FUN脟脮ES DE CONSTRU脟脙O DE MODELO E PIPELINE ---
82
  # ==============================================================================
83
 
84
- def create_latent_upsampler(latent_upsampler_model_path: str, device: str) -> LatentUpsampler:
85
- """Loads the Latent Upsampler model from a checkpoint path."""
86
- logging.info(f"Loading Latent Upsampler from: {latent_upsampler_model_path} to device: {device}")
87
- latent_upsampler = LatentUpsampler.from_pretrained(latent_upsampler_model_path)
88
- latent_upsampler.to(device)
89
- latent_upsampler.eval()
90
- return latent_upsampler
91
-
92
- def build_ltx_pipeline_on_cpu(config: Dict) -> Tuple[LTXVideoPipeline, Optional[torch.nn.Module]]:
93
- """Builds the complete LTX pipeline and upsampler on the CPU."""
94
- t0 = time.perf_counter()
95
- logging.info("Building LTX pipeline on CPU...")
96
-
97
- ckpt_path = Path(config["checkpoint_path"])
98
- if not ckpt_path.is_file():
99
- raise FileNotFoundError(f"Main checkpoint file not found: {ckpt_path}")
100
-
101
- with safe_open(ckpt_path, framework="pt") as f:
102
- metadata = f.metadata() or {}
103
- config_str = metadata.get("config", "{}")
104
- configs = json.loads(config_str)
105
- allowed_inference_steps = configs.get("allowed_inference_steps")
106
-
107
- vae = CausalVideoAutoencoder.from_pretrained(ckpt_path).to("cpu")
108
- transformer = Transformer3DModel.from_pretrained(ckpt_path).to("cpu")
109
- scheduler = RectifiedFlowScheduler.from_pretrained(ckpt_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- text_encoder_path = config["text_encoder_model_name_or_path"]
112
- text_encoder = T5EncoderModel.from_pretrained(text_encoder_path, subfolder="text_encoder").to("cpu")
113
- tokenizer = T5Tokenizer.from_pretrained(text_encoder_path, subfolder="tokenizer")
114
- patchifier = SymmetricPatchifier(patch_size=1)
115
-
116
- precision = config.get("precision", "bfloat16")
117
- if precision == "bfloat16":
118
- vae.to(torch.bfloat16)
119
- transformer.to(torch.bfloat16)
120
- text_encoder.to(torch.bfloat16)
121
 
122
- pipeline = LTXVideoPipeline(
123
- transformer=transformer, patchifier=patchifier, text_encoder=text_encoder,
124
- tokenizer=tokenizer, scheduler=scheduler, vae=vae,
125
- allowed_inference_steps=allowed_inference_steps,
126
- prompt_enhancer_image_caption_model=None, prompt_enhancer_image_caption_processor=None,
127
- prompt_enhancer_llm_model=None, prompt_enhancer_llm_tokenizer=None,
128
- )
129
-
130
- latent_upsampler = None
131
- if config.get("spatial_upscaler_model_path"):
132
- spatial_path = config["spatial_upscaler_model_path"]
133
- latent_upsampler = create_latent_upsampler(spatial_path, device="cpu")
134
- if precision == "bfloat16":
135
- latent_upsampler.to(torch.bfloat16)
136
-
137
- logging.info(f"LTX pipeline built on CPU in {time.perf_counter() - t0:.2f}s")
138
- return pipeline, latent_upsampler
139
 
 
 
 
140
 
141
  # ==============================================================================
142
- # --- FUN脟脮ES AUXILIARES (Latent Processing, Seed, Image Prep) ---
143
  # ==============================================================================
144
-
145
- def adain_filter_latent(
146
- latents: torch.Tensor, reference_latents: torch.Tensor, factor=1.0
147
- ) -> torch.Tensor:
148
- """Applies AdaIN to transfer the style from a reference latent to another."""
149
- result = latents.clone()
150
- for i in range(latents.size(0)):
151
- for c in range(latents.size(1)):
152
- r_sd, r_mean = torch.std_mean(reference_latents[i, c], dim=None)
153
- i_sd, i_mean = torch.std_mean(result[i, c], dim=None)
154
- if i_sd > 1e-6:
155
- result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean
156
- return torch.lerp(latents, result, factor)
157
-
158
- def seed_everything(seed: int):
159
- """Sets the seed for reproducibility."""
160
- random.seed(seed)
161
- os.environ['PYTHONHASHSEED'] = str(seed)
162
- np.random.seed(seed)
163
- torch.manual_seed(seed)
164
- torch.cuda.manual_seed_all(seed)
165
- torch.backends.cudnn.deterministic = True
166
- torch.backends.cudnn.benchmark = False
167
-
168
- def load_image_to_tensor_with_resize_and_crop(
169
- image_input: Union[str, Image.Image],
170
- target_height: int,
171
- target_width: int,
172
- ) -> torch.Tensor:
173
- """Loads and processes an image into a 5D tensor compatible with the LTX pipeline."""
174
- if isinstance(image_input, str):
175
- image = Image.open(image_input).convert("RGB")
176
- elif isinstance(image_input, Image.Image):
177
- image = image_input
178
- else:
179
- raise ValueError("image_input must be a file path or a PIL Image object")
180
-
181
- input_width, input_height = image.size
182
- aspect_ratio_target = target_width / target_height
183
- aspect_ratio_frame = input_width / input_height
184
-
185
- if aspect_ratio_frame > aspect_ratio_target:
186
- new_width, new_height = int(input_height * aspect_ratio_target), input_height
187
- x_start, y_start = (input_width - new_width) // 2, 0
188
- else:
189
- new_width, new_height = input_width, int(input_width / aspect_ratio_target)
190
- x_start, y_start = 0, (input_height - new_height) // 2
191
-
192
- image = image.crop((x_start, y_start, x_start + new_width, y_start + new_height))
193
- image = image.resize((target_width, target_height), Image.Resampling.LANCZOS)
194
-
195
- frame_tensor = TVF.to_tensor(image)
196
- frame_tensor = TVF.gaussian_blur(frame_tensor, kernel_size=(3, 3))
197
-
198
- frame_tensor_hwc = frame_tensor.permute(1, 2, 0)
199
- frame_tensor_hwc = crf_compressor.compress(frame_tensor_hwc)
200
- frame_tensor = frame_tensor_hwc.permute(2, 0, 1)
201
- frame_tensor = (frame_tensor * 2.0) - 1.0
202
-
203
- return frame_tensor.unsqueeze(0).unsqueeze(2)
 
1
+ # FILE: api/ltx_server_refactored_complete.py
2
+ # DESCRIPTION: Final high-level orchestrator for LTX-Video generation.
3
+ # This version features a unified generation workflow, random seed generation,
4
+ # delegation to specialized modules, and advanced debugging capabilities.
5
 
6
+ import gc
 
7
  import json
8
  import logging
9
+ import os
10
+ import shutil
11
  import sys
12
+ import tempfile
13
+ import time
14
  from pathlib import Path
15
+ from typing import Dict, List, Optional, Tuple
 
 
16
 
 
17
  import torch
18
+ import yaml
19
+ import numpy as np
20
+ from huggingface_hub import hf_hub_download
 
21
 
22
  # ==============================================================================
23
+ # --- SETUP E IMPORTA脟脮ES DO PROJETO ---
24
  # ==============================================================================
25
 
26
+ # Configura莽茫o de logging e supress茫o de warnings
27
+ import warnings
28
+ warnings.filterwarnings("ignore")
29
+ logging.getLogger("huggingface_hub").setLevel(logging.ERROR)
30
+ log_level = os.environ.get("ADUC_LOG_LEVEL", "INFO").upper()
31
+ logging.basicConfig(level=log_level, format='[%(levelname)s] [%(name)s] %(message)s')
32
+
33
+ # --- Constantes de Configura莽茫o ---
34
+ DEPS_DIR = Path("/data")
35
+ LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
36
+ RESULTS_DIR = Path("/app/output")
37
+ DEFAULT_FPS = 24.0
38
+ FRAMES_ALIGNMENT = 8
39
+ LTX_REPO_ID = "Lightricks/LTX-Video"
40
+
41
+ # Garante que a biblioteca LTX-Video seja import谩vel
42
  def add_deps_to_path():
 
 
 
 
43
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
44
  if repo_path not in sys.path:
45
  sys.path.insert(0, repo_path)
46
+ logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}")
47
 
 
48
  add_deps_to_path()
49
 
50
+ # --- M贸dulos da nossa Arquitetura ---
 
51
  try:
52
+ from api.gpu_manager import gpu_manager
53
+ from managers.vae_manager import vae_manager_singleton
54
+ from tools.video_encode_tool import video_encode_tool_singleton
55
+ from api.ltx.ltx_utils import (
56
+ build_ltx_pipeline_on_cpu,
57
+ seed_everything,
58
+ load_image_to_tensor_with_resize_and_crop,
59
+ ConditioningItem,
60
+ )
61
+ from api.utils.debug_utils import log_function_io
62
  except ImportError as e:
63
+ logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True)
64
+ sys.exit(1)
65
 
66
  # ==============================================================================
67
+ # --- FUN脟脮ES AUXILIARES DO ORQUESTRADOR ---
68
  # ==============================================================================
69
 
70
+ @log_function_io
71
+ def calculate_padding(orig_h: int, orig_w: int, target_h: int, target_w: int) -> Tuple[int, int, int, int]:
72
+ """Calculates symmetric padding required to meet target dimensions."""
73
+ pad_h = target_h - orig_h
74
+ pad_w = target_w - orig_w
75
+ pad_top = pad_h // 2
76
+ pad_bottom = pad_h - pad_top
77
+ pad_left = pad_w // 2
78
+ pad_right = pad_w - pad_left
79
+ return (pad_left, pad_right, pad_top, pad_bottom)
 
 
 
 
 
 
 
80
 
81
  # ==============================================================================
82
+ # --- CLASSE DE SERVI脟O (O ORQUESTRADOR) ---
83
  # ==============================================================================
84
 
85
+ class VideoService:
86
+ """
87
+ Orchestrates the high-level logic of video generation, delegating low-level
88
+ tasks to specialized managers and utility modules.
89
+ """
90
+
91
+ @log_function_io
92
+ def __init__(self):
93
+ t0 = time.perf_counter()
94
+ logging.info("Initializing VideoService Orchestrator...")
95
+ RESULTS_DIR.mkdir(parents=True, exist_ok=True)
96
+
97
+ target_main_device_str = str(gpu_manager.get_ltx_device())
98
+ target_vae_device_str = str(gpu_manager.get_ltx_vae_device())
99
+ logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'")
100
+
101
+ self.config = self._load_config()
102
+ self._resolve_model_paths_from_cache()
103
+
104
+ self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config)
105
+
106
+ self.main_device = torch.device("cpu")
107
+ self.vae_device = torch.device("cpu")
108
+ self.move_to_device(main_device_str=target_main_device_str, vae_device_str=target_vae_device_str)
109
+
110
+ self._apply_precision_policy()
111
+ vae_manager_singleton.attach_pipeline(self.pipeline, device=self.vae_device, autocast_dtype=self.runtime_autocast_dtype)
112
+ logging.info(f"VideoService ready. Startup time: {time.perf_counter()-t0:.2f}s")
113
+
114
+ def _load_config(self) -> Dict:
115
+ """Loads the YAML configuration file."""
116
+ config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
117
+ logging.info(f"Loading config from: {config_path}")
118
+ with open(config_path, "r") as file:
119
+ return yaml.safe_load(file)
120
+
121
+ def _resolve_model_paths_from_cache(self):
122
+ """Finds the absolute paths to model files in the cache and updates the in-memory config."""
123
+ logging.info("Resolving model paths from Hugging Face cache...")
124
+ cache_dir = os.environ.get("HF_HOME")
125
+ try:
126
+ main_ckpt_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["checkpoint_path"], cache_dir=cache_dir)
127
+ self.config["checkpoint_path"] = main_ckpt_path
128
+ logging.info(f" -> Main checkpoint resolved to: {main_ckpt_path}")
129
+
130
+ if self.config.get("spatial_upscaler_model_path"):
131
+ upscaler_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["spatial_upscaler_model_path"], cache_dir=cache_dir)
132
+ self.config["spatial_upscaler_model_path"] = upscaler_path
133
+ logging.info(f" -> Spatial upscaler resolved to: {upscaler_path}")
134
+ except Exception as e:
135
+ logging.critical(f"Failed to resolve model paths. Ensure setup.py ran correctly. Error: {e}", exc_info=True)
136
+ sys.exit(1)
137
+
138
+ @log_function_io
139
+ def move_to_device(self, main_device_str: str, vae_device_str: str):
140
+ """Moves pipeline components to their designated target devices."""
141
+ target_main_device = torch.device(main_device_str)
142
+ target_vae_device = torch.device(vae_device_str)
143
+ logging.info(f"Moving LTX models -> Main Pipeline: {target_main_device}, VAE: {target_vae_device}")
144
+
145
+ self.main_device = target_main_device
146
+ self.pipeline.to(self.main_device)
147
+ self.vae_device = target_vae_device
148
+ self.pipeline.vae.to(self.vae_device)
149
+ if self.latent_upsampler: self.latent_upsampler.to(self.main_device)
150
+ logging.info("LTX models successfully moved to target devices.")
151
+
152
+ def move_to_cpu(self):
153
+ """Moves all LTX components to CPU to free VRAM for other services."""
154
+ self.move_to_device(main_device_str="cpu", vae_device_str="cpu")
155
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
156
+
157
+ def finalize(self):
158
+ """Cleans up GPU memory after a generation task."""
159
+ gc.collect()
160
+ if torch.cuda.is_available():
161
+ torch.cuda.empty_cache()
162
+ try: torch.cuda.ipc_collect();
163
+ except Exception: pass
164
+
165
+ # ==========================================================================
166
+ # --- L脫GICA DE NEG脫CIO: ORQUESTRADOR P脷BLICO UNIFICADO ---
167
+ # ==========================================================================
168
+
169
+ @log_function_io
170
+ def generate_low_resolution(self, prompt: str, **kwargs) -> Tuple[Optional[str], Optional[str], Optional[int]]:
171
+ """
172
+ [UNIFIED ORCHESTRATOR] Generates a low-resolution video from a prompt.
173
+ Handles both single-line and multi-line prompts transparently.
174
+ """
175
+ logging.info("Starting unified low-resolution generation (random seed)...")
176
+ used_seed = self._get_random_seed()
177
+ seed_everything(used_seed)
178
+ logging.info(f"Using randomly generated seed: {used_seed}")
179
+
180
+ prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
181
+ if not prompt_list: raise ValueError("Prompt is empty or contains no valid lines.")
182
+
183
+ is_narrative = len(prompt_list) > 1
184
+ logging.info(f"Generation mode detected: {'Narrative' if is_narrative else 'Simple'} ({len(prompt_list)} scene(s)).")
185
+
186
+ num_chunks = len(prompt_list)
187
+ total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0))
188
+ frames_per_chunk = max(FRAMES_ALIGNMENT, (total_frames // num_chunks // FRAMES_ALIGNMENT) * FRAMES_ALIGNMENT)
189
+ overlap_frames = self.config.get("overlap_frames", 8) if is_narrative else 0
190
+
191
+ temp_latent_paths = []
192
+ overlap_condition_item = None
193
+
194
+ try:
195
+ for i, chunk_prompt in enumerate(prompt_list):
196
+ logging.info(f"Processing scene {i+1}/{num_chunks}: '{chunk_prompt[:50]}...'")
197
+
198
+ if i == num_chunks - 1:
199
+ processed_frames = (num_chunks - 1) * frames_per_chunk
200
+ current_frames = total_frames - processed_frames
201
+ else:
202
+ current_frames = frames_per_chunk
203
+
204
+ if i > 0: current_frames += overlap_frames
205
+
206
+ current_conditions = kwargs.get("initial_conditions", []) if i == 0 else []
207
+ if overlap_condition_item: current_conditions.append(overlap_condition_item)
208
+
209
+ chunk_latents = self._generate_single_chunk_low(
210
+ prompt=chunk_prompt, num_frames=current_frames, seed=used_seed + i,
211
+ conditioning_items=current_conditions, **kwargs
212
+ )
213
+ if chunk_latents is None: raise RuntimeError(f"Failed to generate latents for scene {i+1}.")
214
+
215
+ if is_narrative and i < num_chunks - 1:
216
+ overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
217
+ overlap_condition_item = ConditioningItem(media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0)
218
+
219
+ if i > 0: chunk_latents = chunk_latents[:, :, overlap_frames:, :, :]
220
+
221
+ chunk_path = RESULTS_DIR / f"temp_chunk_{i}_{used_seed}.pt"
222
+ torch.save(chunk_latents.cpu(), chunk_path)
223
+ temp_latent_paths.append(chunk_path)
224
+
225
+ base_filename = "narrative_video" if is_narrative else "single_video"
226
+ return self._finalize_generation(temp_latent_paths, base_filename, used_seed)
227
+ except Exception as e:
228
+ logging.error(f"Error during unified generation: {e}", exc_info=True)
229
+ return None, None, None
230
+ finally:
231
+ for path in temp_latent_paths:
232
+ if path.exists(): path.unlink()
233
+ self.finalize()
234
+
235
+ # ==========================================================================
236
+ # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
237
+ # ==========================================================================
238
+
239
+ @log_function_io
240
+ def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]:
241
+ """[WORKER] Calls the pipeline to generate a single chunk of latents."""
242
+ height_padded, width_padded = (self._align(d) for d in (kwargs['height'], kwargs['width']))
243
+ downscale_factor = self.config.get("downscale_factor", 0.6666666)
244
+ vae_scale_factor = self.pipeline.vae_scale_factor
245
+ downscaled_height = self._align(int(height_padded * downscale_factor), vae_scale_factor)
246
+ downscaled_width = self._align(int(width_padded * downscale_factor), vae_scale_factor)
247
+
248
+ first_pass_config = self.config.get("first_pass", {}).copy()
249
+ if kwargs.get("ltx_configs_override"):
250
+ self._apply_ui_overrides(first_pass_config, kwargs["ltx_configs_override"])
251
+
252
+ pipeline_kwargs = {
253
+ "prompt": kwargs['prompt'], "negative_prompt": kwargs['negative_prompt'],
254
+ "height": downscaled_height, "width": downscaled_width, "num_frames": kwargs['num_frames'],
255
+ "frame_rate": DEFAULT_FPS, "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
256
+ "output_type": "latent", "conditioning_items": kwargs['conditioning_items'], **first_pass_config
257
+ }
258
+
259
+ with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type):
260
+ latents_raw = self.pipeline(**pipeline_kwargs).images
261
+
262
+ return latents_raw.to(self.main_device)
263
+
264
+ @log_function_io
265
+ def _finalize_generation(self, temp_latent_paths: List[Path], base_filename: str, seed: int) -> Tuple[str, str, int]:
266
+ """Consolidates latents, decodes them to video, and saves final artifacts."""
267
+ logging.info("Finalizing generation: decoding latents to video.")
268
+ all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
269
+ final_latents = torch.cat(all_tensors_cpu, dim=2)
270
+
271
+ final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
272
+ torch.save(final_latents, final_latents_path)
273
+ logging.info(f"Final latents saved to: {final_latents_path}")
274
+
275
+ pixel_tensor = vae_manager_singleton.decode(
276
+ final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05))
277
+ )
278
+ video_path = self._save_and_log_video(pixel_tensor, f"{base_filename}_{seed}")
279
+ return str(video_path), str(final_latents_path), seed
280
+
281
+ @log_function_io
282
+ def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int) -> List[ConditioningItem]:
283
+ """[UNIFIED] Prepares ConditioningItems from a mixed list of file paths and tensors."""
284
+ if not items_list: return []
285
+ height_padded, width_padded = self._align(height), self._align(width)
286
+ padding_values = calculate_padding(height, width, height_padded, width_padded)
287
+
288
+ conditioning_items = []
289
+ for media_item, frame, weight in items_list:
290
+ if isinstance(media_item, str):
291
+ tensor = load_image_to_tensor_with_resize_and_crop(media_item, height, width)
292
+ tensor = torch.nn.functional.pad(tensor, padding_values)
293
+ tensor = tensor.to(self.main_device, dtype=self.runtime_autocast_dtype)
294
+ elif isinstance(media_item, torch.Tensor):
295
+ tensor = media_item.to(self.main_device, dtype=self.runtime_autocast_dtype)
296
+ else:
297
+ logging.warning(f"Unknown conditioning media type: {type(media_item)}. Skipping.")
298
+ continue
299
+
300
+ safe_frame = max(0, min(int(frame), num_frames - 1))
301
+ conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
302
+ return conditioning_items
303
+
304
+ def _apply_ui_overrides(self, config_dict: Dict, overrides: Dict):
305
+ """Applies advanced settings from the UI to a config dictionary."""
306
+ # Override step counts
307
+ for key in ["num_inference_steps", "skip_initial_inference_steps", "skip_final_inference_steps"]:
308
+ ui_value = overrides.get(key)
309
+ if ui_value and ui_value > 0:
310
+ config_dict[key] = ui_value
311
+ logging.info(f"Override: '{key}' set to {ui_value} by UI.")
312
+
313
+ # Override guidance settings
314
+ preset = overrides.get("guidance_preset", "Padr茫o (Recomendado)")
315
+ guidance_overrides = {}
316
+ if preset == "Agressivo":
317
+ guidance_overrides = {"guidance_scale": [1, 2, 8, 12, 8, 2, 1], "stg_scale": [0, 0, 5, 6, 5, 3, 2]}
318
+ elif preset == "Suave":
319
+ guidance_overrides = {"guidance_scale": [1, 1, 4, 5, 4, 1, 1], "stg_scale": [0, 0, 2, 2, 2, 1, 0]}
320
+ elif preset == "Customizado":
321
+ try:
322
+ guidance_overrides["guidance_scale"] = json.loads(overrides["guidance_scale_list"])
323
+ guidance_overrides["stg_scale"] = json.loads(overrides["stg_scale_list"])
324
+ except Exception as e:
325
+ logging.warning(f"Failed to parse custom guidance values: {e}. Using defaults.")
326
+
327
+ if guidance_overrides:
328
+ config_dict.update(guidance_overrides)
329
+ logging.info(f"Applying '{preset}' guidance preset overrides.")
330
+
331
+ def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
332
+ with tempfile.TemporaryDirectory() as temp_dir:
333
+ temp_path = os.path.join(temp_dir, f"{base_filename}.mp4")
334
+ video_encode_tool_singleton.save_video_from_tensor(pixel_tensor, temp_path, fps=DEFAULT_FPS)
335
+ final_path = RESULTS_DIR / f"{base_filename}.mp4"
336
+ shutil.move(temp_path, final_path)
337
+ logging.info(f"Video saved successfully to: {final_path}")
338
+ return final_path
339
 
340
+ def _apply_precision_policy(self):
341
+ precision = str(self.config.get("precision", "bfloat16")).lower()
342
+ if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16
343
+ elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16
344
+ else: self.runtime_autocast_dtype = torch.float32
345
+ logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}")
346
+
347
+ def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT) -> int:
348
+ return ((dim - 1) // alignment + 1) * alignment
 
349
 
350
+ def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int:
351
+ num_frames = int(round(duration_s * DEFAULT_FPS))
352
+ aligned_frames = self._align(num_frames)
353
+ return max(aligned_frames, min_frames) # Use max(aligned_frames) para garantir alinhamento
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
355
+ def _get_random_seed(self) -> int:
356
+ """Always generates and returns a new random seed."""
357
+ return random.randint(0, 2**32 - 1)
358
 
359
  # ==============================================================================
360
+ # --- INSTANCIA脟脙O SINGLETON ---
361
  # ==============================================================================
362
+ try:
363
+ video_generation_service = VideoService()
364
+ logging.info("Global VideoService orchestrator instance created successfully.")
365
+ except Exception as e:
366
+ logging.critical(f"Failed to initialize VideoService: {e}", exc_info=True)
367
+ sys.exit(1)