# FILE: api/ltx_server_refactored_complete.py # DESCRIPTION: Final high-level orchestrator for LTX-Video generation. # This version delegates all low-level tasks to dedicated utility modules and managers, # focusing solely on the business logic of video generation workflows. import gc import json import logging import os import shutil import sys import tempfile import time from pathlib import Path from typing import Dict, List, Optional, Tuple import torch import yaml import numpy as np # ============================================================================== # --- SETUP E IMPORTAÇÕES DO PROJETO --- # ============================================================================== # Constantes de configuração do ambiente DEPS_DIR = Path("/data") LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video" RESULTS_DIR = Path("/app/output") DEFAULT_FPS = 24.0 FRAMES_ALIGNMENT = 8 # Garante que a biblioteca LTX-Video seja importável def add_deps_to_path(): repo_path = str(LTX_VIDEO_REPO_DIR.resolve()) if repo_path not in sys.path: sys.path.insert(0, repo_path) logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}") add_deps_to_path() # --- Módulos da nossa Arquitetura --- try: from api.gpu_manager import gpu_manager from managers.vae_manager import vae_manager_singleton from tools.video_encode_tool import video_encode_tool_singleton # Nosso módulo de utilitários LTX, que encapsula a complexidade from api.ltx.ltx_utils import ( build_ltx_pipeline_on_cpu, seed_everything, load_image_to_tensor_with_resize_and_crop, ConditioningItem, ) except ImportError as e: logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True) sys.exit(1) # ============================================================================== # --- FUNÇÕES AUXILIARES DO ORQUESTRADOR --- # ============================================================================== def calculate_padding(orig_h: int, orig_w: int, target_h: int, target_w: int) -> Tuple[int, int, int, int]: """Calculates symmetric padding required to meet target dimensions.""" pad_h = target_h - orig_h pad_w = target_w - orig_w pad_top = pad_h // 2 pad_bottom = pad_h - pad_top pad_left = pad_w // 2 pad_right = pad_w - pad_left return (pad_left, pad_right, pad_top, pad_bottom) # ============================================================================== # --- CLASSE DE SERVIÇO (O ORQUESTRADOR) --- # ============================================================================== class VideoService: """ Orchestrates the high-level logic of video generation, delegating low-level tasks to specialized managers and utility modules. """ def __init__(self): t0 = time.perf_counter() logging.info("Initializing VideoService Orchestrator...") RESULTS_DIR.mkdir(parents=True, exist_ok=True) target_main_device_str = str(gpu_manager.get_ltx_device()) target_vae_device_str = str(gpu_manager.get_ltx_vae_device()) logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'") self.config = self._load_config() self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config) self.main_device = torch.device("cpu") self.vae_device = torch.device("cpu") self.move_to_device(main_device_str=target_main_device_str, vae_device_str=target_vae_device_str) self._apply_precision_policy() vae_manager_singleton.attach_pipeline(self.pipeline, device=self.vae_device, autocast_dtype=self.runtime_autocast_dtype) logging.info(f"VideoService ready. Startup time: {time.perf_counter()-t0:.2f}s") def _load_config(self) -> Dict: """Loads the YAML configuration file.""" config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml" logging.info(f"Loading config from: {config_path}") with open(config_path, "r") as file: return yaml.safe_load(file) def move_to_device(self, main_device_str: str, vae_device_str: str): """Moves pipeline components to their designated target devices.""" target_main_device = torch.device(main_device_str) target_vae_device = torch.device(vae_device_str) logging.info(f"Moving LTX models -> Main Pipeline: {target_main_device}, VAE: {target_vae_device}") self.main_device = target_main_device self.pipeline.to(self.main_device) self.vae_device = target_vae_device self.pipeline.vae.to(self.vae_device) if self.latent_upsampler: self.latent_upsampler.to(self.main_device) logging.info("LTX models successfully moved to target devices.") def move_to_cpu(self): """Moves all LTX components to CPU to free VRAM for other services.""" self.move_to_device(main_device_str="cpu", vae_device_str="cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() def finalize(self): """Cleans up GPU memory after a generation task.""" gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() try: torch.cuda.ipc_collect(); except Exception: pass # ========================================================================== # --- LÓGICA DE NEGÓCIO: ORQUESTRADORES PÚBLICOS --- # ========================================================================== def generate_narrative_low(self, prompt: str, **kwargs) -> Tuple[Optional[str], Optional[str], Optional[int]]: """Orchestrates the generation of a video from a multi-line prompt (sequence of scenes).""" logging.info("Starting narrative low-res generation...") used_seed = self._resolve_seed(kwargs.get("seed")) seed_everything(used_seed) prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()] if not prompt_list: raise ValueError("Prompt is empty or contains no valid lines.") num_chunks = len(prompt_list) total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0)) frames_per_chunk = (total_frames // num_chunks // FRAMES_ALIGNMENT) * FRAMES_ALIGNMENT overlap_frames = self.config.get("overlap_frames", 8) temp_latent_paths = [] overlap_condition_item = None try: for i, chunk_prompt in enumerate(prompt_list): logging.info(f"Generating narrative chunk {i+1}/{num_chunks}: '{chunk_prompt[:50]}...'") current_frames = frames_per_chunk + (overlap_frames if i > 0 else 0) current_conditions = kwargs.get("initial_conditions", []) if i == 0 else [] if overlap_condition_item: current_conditions.append(overlap_condition_item) chunk_latents = self._generate_single_chunk_low( prompt=chunk_prompt, num_frames=current_frames, seed=used_seed + i, conditioning_items=current_conditions, **kwargs ) if chunk_latents is None: raise RuntimeError(f"Failed to generate latents for chunk {i+1}.") if i < num_chunks - 1: overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone() overlap_condition_item = ConditioningItem(media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0) if i > 0: chunk_latents = chunk_latents[:, :, overlap_frames:, :, :] chunk_path = RESULTS_DIR / f"temp_chunk_{i}_{used_seed}.pt" torch.save(chunk_latents.cpu(), chunk_path) temp_latent_paths.append(chunk_path) return self._finalize_generation(temp_latent_paths, "narrative_video", used_seed) except Exception as e: logging.error(f"Error during narrative generation: {e}", exc_info=True) return None, None, None finally: for path in temp_latent_paths: if path.exists(): path.unlink() self.finalize() def generate_single_low(self, **kwargs) -> Tuple[Optional[str], Optional[str], Optional[int]]: """Orchestrates the generation of a video from a single prompt in one go.""" logging.info("Starting single-prompt low-res generation...") used_seed = self._resolve_seed(kwargs.get("seed")) seed_everything(used_seed) try: total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0), min_frames=9) final_latents = self._generate_single_chunk_low( num_frames=total_frames, seed=used_seed, conditioning_items=kwargs.get("initial_conditions", []), **kwargs ) if final_latents is None: raise RuntimeError("Failed to generate latents.") temp_latent_path = RESULTS_DIR / f"temp_single_{used_seed}.pt" torch.save(final_latents.cpu(), temp_latent_path) return self._finalize_generation([temp_latent_path], "single_video", used_seed) except Exception as e: logging.error(f"Error during single generation: {e}", exc_info=True) return None, None, None finally: self.finalize() # ========================================================================== # --- UNIDADES DE TRABALHO E HELPERS INTERNOS --- # ========================================================================== def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]: """Calls the pipeline to generate a single chunk of latents.""" height_padded, width_padded = (self._align(d) for d in (kwargs['height'], kwargs['width'])) downscale_factor = self.config.get("downscale_factor", 0.6666666) vae_scale_factor = self.pipeline.vae_scale_factor downscaled_height = self._align(int(height_padded * downscale_factor), vae_scale_factor) downscaled_width = self._align(int(width_padded * downscale_factor), vae_scale_factor) first_pass_config = self.config.get("first_pass", {}).copy() if kwargs.get("ltx_configs_override"): first_pass_config.update(self._prepare_guidance_overrides(kwargs["ltx_configs_override"])) pipeline_kwargs = { "prompt": kwargs['prompt'], "negative_prompt": kwargs['negative_prompt'], "height": downscaled_height, "width": downscaled_width, "num_frames": kwargs['num_frames'], "frame_rate": DEFAULT_FPS, "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']), "output_type": "latent", "conditioning_items": kwargs['conditioning_items'], **first_pass_config } with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type): latents_raw = self.pipeline(**pipeline_kwargs).images return latents_raw.to(self.main_device) def _finalize_generation(self, temp_latent_paths: List[Path], base_filename: str, seed: int) -> Tuple[str, str, int]: """Consolidates latents, decodes them to video, and saves final artifacts.""" logging.info("Finalizing generation: decoding latents to video.") all_tensors_cpu = [torch.load(p) for p in temp_latent_paths] final_latents = torch.cat(all_tensors_cpu, dim=2) final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt" torch.save(final_latents, final_latents_path) logging.info(f"Final latents saved to: {final_latents_path}") pixel_tensor = vae_manager_singleton.decode( final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)) ) video_path = self._save_and_log_video(pixel_tensor, f"{base_filename}_{seed}") return str(video_path), str(final_latents_path), seed def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int) -> List[ConditioningItem]: if not items_list: return [] height_padded, width_padded = self._align(height), self._align(width) padding_values = calculate_padding(height, width, height_padded, width_padded) conditioning_items = [] for media, frame, weight in items_list: tensor = self._prepare_conditioning_tensor(media, height, width, padding_values) safe_frame = max(0, min(int(frame), num_frames - 1)) conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight))) return conditioning_items def _prepare_conditioning_tensor(self, media_path: str, height: int, width: int, padding: Tuple) -> torch.Tensor: tensor = load_image_to_tensor_with_resize_and_crop(media_path, height, width) tensor = torch.nn.functional.pad(tensor, padding) return tensor.to(self.main_device, dtype=self.runtime_autocast_dtype) def _prepare_guidance_overrides(self, ltx_configs: Dict) -> Dict: overrides = {} preset = ltx_configs.get("guidance_preset", "Padrão (Recomendado)") # ... (logic for presets remains the same) return overrides def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path: with tempfile.TemporaryDirectory() as temp_dir: temp_path = os.path.join(temp_dir, f"{base_filename}.mp4") video_encode_tool_singleton.save_video_from_tensor(pixel_tensor, temp_path, fps=DEFAULT_FPS) final_path = RESULTS_DIR / f"{base_filename}.mp4" shutil.move(temp_path, final_path) logging.info(f"Video saved successfully to: {final_path}") return final_path def _apply_precision_policy(self): precision = str(self.config.get("precision", "bfloat16")).lower() if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16 elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16 else: self.runtime_autocast_dtype = torch.float32 logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}") def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT) -> int: return ((dim - 1) // alignment + 1) * alignment def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int: num_frames = int(round(duration_s * DEFAULT_FPS)) aligned_frames = self._align(num_frames) return max(aligned_frames + 1, min_frames) def _resolve_seed(self, seed: Optional[int]) -> int: return random.randint(0, 2**32 - 1) if seed is None else int(seed) # ============================================================================== # --- INSTANCIAÇÃO SINGLETON --- # ============================================================================== try: video_generation_service = VideoService() logging.info("Global VideoService orchestrator instance created successfully.") except Exception as e: logging.critical(f"Failed to initialize VideoService: {e}", exc_info=True) sys.exit(1)